diff --git a/110/paper.pdf b/110/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6c5a98307f9624d6b1becf5c3ef21ca8351ab071 --- /dev/null +++ b/110/paper.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d13bb63519a2b71d947cad77a7fdce405a8fe8149948a912d60ec73311c0f7 +size 1047016 diff --git a/110/replication_package/README.pdf b/110/replication_package/README.pdf new file mode 100644 index 0000000000000000000000000000000000000000..59a8da2fb4d6487e061b9e94e1e0e70a25c56b71 --- /dev/null +++ b/110/replication_package/README.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78a718f187ebdc73405e2c0fd25b19f5f355ddebff74865072cbb606f9ad750e +size 83852 diff --git a/110/replication_package/replication/Data/Raw/allcity_info.dta b/110/replication_package/replication/Data/Raw/allcity_info.dta new file mode 100644 index 0000000000000000000000000000000000000000..68eb31fad02918686048f37f1843c0f4a52e3403 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/allcity_info.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12a4cb5b0d282c620703079f8249182f5be48ba77acc8dccdcbd91201b396f30 +size 26619 diff --git a/110/replication_package/replication/Data/Raw/baidu.dta b/110/replication_package/replication/Data/Raw/baidu.dta new file mode 100644 index 0000000000000000000000000000000000000000..7554d251a92f8da7d54c512ccaa3bafa064ad0a7 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/baidu.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1592c3d50dfd55c71dc6e941b060ad427988755640cd8dd629e5ceb10a616e9d +size 1610593 diff --git a/110/replication_package/replication/Data/Raw/city_info.dta b/110/replication_package/replication/Data/Raw/city_info.dta new file mode 100644 index 0000000000000000000000000000000000000000..cf3d6ab8071166fc913446d54457aaaab56231dd --- /dev/null +++ b/110/replication_package/replication/Data/Raw/city_info.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a82a01813262f00682801f8d1b6cc82495ab1b91d5b0cad81c45617fa5ecf32 +size 157347 diff --git a/110/replication_package/replication/Data/Raw/daily_monitor_api.dta b/110/replication_package/replication/Data/Raw/daily_monitor_api.dta new file mode 100644 index 0000000000000000000000000000000000000000..0b88e50ccc7e329aa2334e876e1b6d5f942838c2 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/daily_monitor_api.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:601b057fbd7bb156dc6c09ac76944951b5f5fda616e716f150389a656325790f +size 73326101 diff --git a/110/replication_package/replication/Data/Raw/enf_info.dta b/110/replication_package/replication/Data/Raw/enf_info.dta new file mode 100644 index 0000000000000000000000000000000000000000..b07cdc7bb04109314c7a423bcc4ae95f7e8ee019 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/enf_info.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:725cfabc1df243f8581550753f1dc4ca837a9b3cfadc91cdbee5334da91884a7 +size 125241117 diff --git a/110/replication_package/replication/Data/Raw/firm_info.dta b/110/replication_package/replication/Data/Raw/firm_info.dta new file mode 100644 index 0000000000000000000000000000000000000000..782a4e684f36ab8297b19f22fbabec24ae55082d --- /dev/null +++ b/110/replication_package/replication/Data/Raw/firm_info.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:219e4405db12a981ae0968460f46759c4ca55ac25bae511a7452ecaab33ecf55 +size 48410992 diff --git a/110/replication_package/replication/Data/Raw/lights.dta b/110/replication_package/replication/Data/Raw/lights.dta new file mode 100644 index 0000000000000000000000000000000000000000..558c419bbcd28eb939fbc2df05004b3061ffbeae --- /dev/null +++ b/110/replication_package/replication/Data/Raw/lights.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:660809efe234fe8cce30755a42884a612c898be7f84e58d442fd630fd2a3f7cb +size 590633 diff --git a/110/replication_package/replication/Data/Raw/mayor.dta b/110/replication_package/replication/Data/Raw/mayor.dta new file mode 100644 index 0000000000000000000000000000000000000000..e97fd499bea3d1a0d2c35d688e27e299dd50c9f2 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/mayor.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:049e787aab43af46ed8d7c119813ddbd24aa6f3d520b83a47572ef1e1cbf22b3 +size 158452 diff --git a/110/replication_package/replication/Data/Raw/monitor_city_long.dta b/110/replication_package/replication/Data/Raw/monitor_city_long.dta new file mode 100644 index 0000000000000000000000000000000000000000..565fbd8d5b7dd27edadfc53ddfa977b51a2f0e8e --- /dev/null +++ b/110/replication_package/replication/Data/Raw/monitor_city_long.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a62866ac354457cc8d873bc4ee89eb50ccdc662d93700bba83c8ad61265cd4c +size 398439 diff --git a/110/replication_package/replication/Data/Raw/monitor_info.dta b/110/replication_package/replication/Data/Raw/monitor_info.dta new file mode 100644 index 0000000000000000000000000000000000000000..25503f3806ec6c4515ccb740156cc15687fa25c8 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/monitor_info.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72f23e3fbfa4045e19762601b67364054560599b0229375e6bace842a2d9cd9a +size 17207 diff --git a/110/replication_package/replication/Data/Raw/non-asif.dta b/110/replication_package/replication/Data/Raw/non-asif.dta new file mode 100644 index 0000000000000000000000000000000000000000..b5a07b87716572138784aa6e6027b590f0ce38bc --- /dev/null +++ b/110/replication_package/replication/Data/Raw/non-asif.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2dae2c0e50bb65703d9f7b7adb60708170aa9b95451792a5a756b87b26401b5 +size 145814 diff --git a/110/replication_package/replication/Data/Raw/pm.dta b/110/replication_package/replication/Data/Raw/pm.dta new file mode 100644 index 0000000000000000000000000000000000000000..fcadd5fd1c3d76392344e3f661507cf159deb96a --- /dev/null +++ b/110/replication_package/replication/Data/Raw/pm.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b48d012c21a4b6d2640315da680593a4984956da648010306a5aff782c8a06c6 +size 402981 diff --git a/110/replication_package/replication/Data/Raw/pm_pix.dta b/110/replication_package/replication/Data/Raw/pm_pix.dta new file mode 100644 index 0000000000000000000000000000000000000000..bd964bfb0ca6245cae4fbafc68c4d2ceffb0def8 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/pm_pix.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:161a1cd39bba8a13dd987fe56cb957571ceb1570f536957524550a3723c6951d +size 296456487 diff --git a/110/replication_package/replication/Data/Raw/weather_daily.dta b/110/replication_package/replication/Data/Raw/weather_daily.dta new file mode 100644 index 0000000000000000000000000000000000000000..3b6f2668339237a281e36c8dda4bcb583af17321 --- /dev/null +++ b/110/replication_package/replication/Data/Raw/weather_daily.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ec43c966d187b8e8bf642914cdd514a0b67907ceceaf830a86567d77f1f16e5 +size 356358846 diff --git a/110/replication_package/replication/Data/age_2017.dta b/110/replication_package/replication/Data/age_2017.dta new file mode 100644 index 0000000000000000000000000000000000000000..a49ea86641e9dc621c07266fb357a895319e5b79 --- /dev/null +++ b/110/replication_package/replication/Data/age_2017.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:075f53223d92b3ec57941b91f325a70b5a61f8097336d9a98ef72b83d2e83526 +size 9971 diff --git a/110/replication_package/replication/Data/age_year.dta b/110/replication_package/replication/Data/age_year.dta new file mode 100644 index 0000000000000000000000000000000000000000..c25afd56fa573bfef0a61c8407889604dc3e4735 --- /dev/null +++ b/110/replication_package/replication/Data/age_year.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbacc1ada89b49d026131d50dc255fefa6a1302d9f4577dc1ea9aaa4fa7dfbe2 +size 17228 diff --git a/110/replication_package/replication/Data/city_enf.dta b/110/replication_package/replication/Data/city_enf.dta new file mode 100644 index 0000000000000000000000000000000000000000..bcfef6deb8f986c5edd068edf5e825a530d69036 --- /dev/null +++ b/110/replication_package/replication/Data/city_enf.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7736abea6375079aa347b33c17db4eb6616fa420b53834820fdcff4c7805f225 +size 5212280 diff --git a/110/replication_package/replication/Data/city_enf_rd.dta b/110/replication_package/replication/Data/city_enf_rd.dta new file mode 100644 index 0000000000000000000000000000000000000000..563889b58c4cadefed5282b871d50a5c2c260d74 --- /dev/null +++ b/110/replication_package/replication/Data/city_enf_rd.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c6b8b5d881fa88d1312b4206033bd85a1181741cc116a23ef11f9b739cd0bd +size 10401144 diff --git a/110/replication_package/replication/Data/city_pm.dta b/110/replication_package/replication/Data/city_pm.dta new file mode 100644 index 0000000000000000000000000000000000000000..ac92944b092d62e1f1725d4ec69219640aee675c --- /dev/null +++ b/110/replication_package/replication/Data/city_pm.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b40fbf0c7ac82654fd5d45a40fcd4490a79a566cdf5d84e7eb301d7d57e35e84 +size 14746346 diff --git a/110/replication_package/replication/Data/city_pm_rd.dta b/110/replication_package/replication/Data/city_pm_rd.dta new file mode 100644 index 0000000000000000000000000000000000000000..495194cd3f02cfa58227794e29e45de2a7e072fe --- /dev/null +++ b/110/replication_package/replication/Data/city_pm_rd.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e449b5afef866304b825c89830b1c1e2afc13a8f59a921c5d29a5a73f6bd2d0 +size 29496042 diff --git a/110/replication_package/replication/Data/enf.dta b/110/replication_package/replication/Data/enf.dta new file mode 100644 index 0000000000000000000000000000000000000000..c32db531fb658752f8cfc5c8390178d923dc50b7 --- /dev/null +++ b/110/replication_package/replication/Data/enf.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2607fee216a4488bf52da3a9833a0b9e86a8da4066af5ea57f0ebb3efd53540 +size 447505 diff --git a/110/replication_package/replication/Data/firm_enf.dta b/110/replication_package/replication/Data/firm_enf.dta new file mode 100644 index 0000000000000000000000000000000000000000..3858d4702d0cc1ff850ec2823aa7ce3da44e5c34 --- /dev/null +++ b/110/replication_package/replication/Data/firm_enf.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c057d3ef97271990288ae4773bdc26d1be58b65e269c61b958b1319c41a7c257 +size 1790795729 diff --git a/110/replication_package/replication/Data/mayor_panel.dta b/110/replication_package/replication/Data/mayor_panel.dta new file mode 100644 index 0000000000000000000000000000000000000000..850d18b11cfb31190d6368c6a31302a085280e7b --- /dev/null +++ b/110/replication_package/replication/Data/mayor_panel.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:943e087105600af41c4686d6b327678a454fa3ccf6b0a955203eaf7690781fac +size 5591041 diff --git a/110/replication_package/replication/Data/monitor_api.dta b/110/replication_package/replication/Data/monitor_api.dta new file mode 100644 index 0000000000000000000000000000000000000000..41099576487400824d0c32e8cc0e34aabbc1be97 --- /dev/null +++ b/110/replication_package/replication/Data/monitor_api.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee5a6f082c4897a3f25a001b9a89559c5e54fdf64016ac6b445732163f01142b +size 1165784 diff --git a/110/replication_package/replication/Data/monitor_pix.dta b/110/replication_package/replication/Data/monitor_pix.dta new file mode 100644 index 0000000000000000000000000000000000000000..c8242527a429fd6fed05cc94cfa67aa1b29d68c1 --- /dev/null +++ b/110/replication_package/replication/Data/monitor_pix.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b480cdfd1bb7de424b67048ebc5e42a5418a2d44eb1dd336e3342f6df9dfb51c +size 967587 diff --git a/110/replication_package/replication/Data/monthly_api.dta b/110/replication_package/replication/Data/monthly_api.dta new file mode 100644 index 0000000000000000000000000000000000000000..ce8c689ad376577d0a0be5f4d039faf8bd3b50d6 --- /dev/null +++ b/110/replication_package/replication/Data/monthly_api.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e581d51abea4737c85604861f62fc370e6908c8ca28442b2891ae063bf5d6fc +size 1930462 diff --git a/110/replication_package/replication/Data/pix.dta b/110/replication_package/replication/Data/pix.dta new file mode 100644 index 0000000000000000000000000000000000000000..c540f540a446b4da75938e29f85a102cab2e956c --- /dev/null +++ b/110/replication_package/replication/Data/pix.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a307757e58c7bc113f829f0494542ee98f1d00a5795469b2d4e78e782117b234 +size 221127 diff --git a/110/replication_package/replication/Data/share.dta b/110/replication_package/replication/Data/share.dta new file mode 100644 index 0000000000000000000000000000000000000000..a5cb1b7c72d226e6962564ad35852bf175efe624 --- /dev/null +++ b/110/replication_package/replication/Data/share.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e01d6591a4efbed529daf66bd71fec07bbda9093a022bb874f9613f493f2eca4 +size 11366 diff --git a/110/replication_package/replication/Data/weather_monthly.dta b/110/replication_package/replication/Data/weather_monthly.dta new file mode 100644 index 0000000000000000000000000000000000000000..7801e82b472157153364365ced422ac6f54dcfa2 --- /dev/null +++ b/110/replication_package/replication/Data/weather_monthly.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ccb314e369ee87ed719872313ea3ed2b91d493f8bdd2ca2e44b619a579656f +size 297844 diff --git a/110/replication_package/replication/Data/weather_quarterly.dta b/110/replication_package/replication/Data/weather_quarterly.dta new file mode 100644 index 0000000000000000000000000000000000000000..11f770e9b723ca53d703ade867214ecb58095ef1 --- /dev/null +++ b/110/replication_package/replication/Data/weather_quarterly.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f93b8adcffe878426f1c004bbda5d36daa347fbc78c8c7651a55bd12232d32b4 +size 122260 diff --git a/110/replication_package/replication/Data/wind_quarterly.dta b/110/replication_package/replication/Data/wind_quarterly.dta new file mode 100644 index 0000000000000000000000000000000000000000..14d0af0753ce3649cf38828d801f66cdae03cdad --- /dev/null +++ b/110/replication_package/replication/Data/wind_quarterly.dta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c1a0b439edc00aa35d7b8a42cdada8c415b15774c026e396a4bdd6f699697e9 +size 59316 diff --git a/110/replication_package/replication/Do-file/Appendix.do b/110/replication_package/replication/Do-file/Appendix.do new file mode 100644 index 0000000000000000000000000000000000000000..73651cb7d3437290029b8f75c718392392f50f2f --- /dev/null +++ b/110/replication_package/replication/Do-file/Appendix.do @@ -0,0 +1,1564 @@ +* Set Directory +clear +set more off +set scheme s1mono + +cd "$path" +global data_files "$path/Data" +global out_files "$path/output" + +**============================================================================== +* Table A1 +use "$data_files/Raw/allcity_info", clear + +label variable pm25 "AOD" +label variable number "\# Monitors" +label variable area "Size of Built-up Area (km2)" +label variable pop "Urban Population (10,000)" + +eststo clear +estpost tabstat pm25 number area pop, by(mainsample) statistics(mean sd) columns(statistics) listwise nototal +esttab using "$out_files/TableA1.tex", replace tex main(mean) aux(sd) nogaps nodepvar compress fragment nostar noobs unstack nonote nomtitle label + +**============================================================================== +* Table C1 +use "$data_files/firm_enf.dta", clear +keep if min_dist<50 & starty<=2010 +drop if revenue == . +drop if key == . + +label variable any_air "Any Air Pollution Enforcement" +label variable any_air_shutdown "\quad Suspension" +label variable any_air_fine "\quad Fine" +label variable any_air_renovate "\quad Upgrading" +label variable any_air_warning "\quad Warning" +label variable air "\# Air Pollution Enforcement" +label variable any_water "Any Water Pollu. Enforc." +label variable any_waste "Any Solid Waste Pollu. Enforc." +label variable any_proc "Any Procedure Pollu. Enforc." + +label variable min_dist_10 "Monitor within 10 km" +label variable min_dist "Distance to Monitor (km)" +label variable starty "Year Started" +label variable employment "Employment" +label variable revenue "Revenue" +label variable up "Upwind Firms" + +eststo clear +estpost summarize any_air* air any_water any_waste any_proc up +esttab using "$out_files/TableC1a1.tex", replace cells("mean(fmt(a2)) sd(fmt(a2)) count") noobs nolines nogaps nodepvar compress fragment nonumbers label mlabels(none) tex +display "Periods: " +display "Frequency: " + +keep if year==2010 & quarter==1 + +gen state = (ownership==1 | ownership==2) +gen private = (ownership==3) +gen foreign = (ownership==9) +gen rest = (ownership==4|ownership==5) + +label variable state "Owner: SOEs" +label variable private "Owner: Private" +label variable foreign "Owner: Foreign" +label variable rest "Owner: Other" + +eststo clear +estpost summarize min_dist_10 min_dist starty state private foreign rest employment revenue +esttab using "$out_files/TableC1a2.tex", replace cells("mean(fmt(a2)) sd(fmt(a2)) count") noobs nolines nogaps nodepvar compress fragment nonumbers label mlabels(none) tex +display "Periods: " +display "Frequency: " + +use "$data_files/city_pm.dta", clear +drop if pm25 == . + +label variable number "\# Monitors" +label variable area "Size of Built-up Area (km2)" +label variable pop "Urban Population (10,000)" +label variable age_year "Age of the Mayor" +label variable pre "Precipitation (mm)" +label variable tem_mean "Mean Temperature" +label variable pm25 "Aerosol Optical Depth" + +eststo clear +estpost summarize number area pop age_year pre tem_mean pm25 +esttab using "$out_files/TableC1b1.tex", replace cells("mean(fmt(a2)) sd(fmt(a2)) count") noobs nolines nogaps nodepvar compress fragment nonumbers label mlabels(none) tex +display "Periods: " +display "Frequency: " + +use "$data_files/city_pm.dta", clear +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +merge 1:1 city_cn year month using "$data_files/Raw/baidu.dta" +keep if _merge == 3 +drop _merge + +label variable sear_freq_w1 "Search Index: air pollution" +label variable sear_freq_w2 "Search Index: haze/smoke" +label variable sear_freq_w3 "Search Index: PM25" +label variable sear_freq_w4 "Search Index: air mask" +label variable sear_freq_w5 "Search Index: air purifier" + +eststo clear +estpost summarize sear* +esttab using "$out_files/TableC1b3.tex", replace cells("mean(fmt(a2)) sd(fmt(a2)) count") noobs nolines nogaps nodepvar compress fragment nonumbers label mlabels(none) tex +display "Periods: " +display "Frequency: " + +use "$data_files/city_enf.dta", clear + +gen any_air_total = any_air+any_air_rest +label variable any_air_total "\# Firms Any Air Pollu. Enfor. (incl non-ASIF)" +label variable any_air "\# Firms Any Air Pollu. Enfor." + +eststo clear +estpost summarize any_air any_air_total +esttab using "$out_files/TableC1b2.tex", replace cells("mean(fmt(a2)) sd(fmt(a2)) count") noobs nolines nogaps nodepvar compress fragment nonumbers label mlabels(none) tex +display "Periods: " +display "Frequency: " + +use "$data_files/monitor_api.dta", clear + +label variable pm25api "Particulate Matter 2.5 (PM$\_2.5$)" +label variable pm10api "Particulate Matter 10 (PM$\_10$))" +label variable AQI "Air Quality Index (AQI) " + +eststo clear +estpost summarize pm25api pm10api AQI +esttab using "$out_files/TableC1c.tex", replace cells("mean(fmt(a2)) sd(fmt(a2)) count") noobs nolines nogaps nodepvar compress fragment nonumbers label mlabels(none) substitute(\_ _) tex + +**============================================================================== +* Table C4 +use "$data_files/monitor_api.dta", clear + +gen log_pm25api = log(pm25api) +gen log_pm10api = log(pm10api) +gen log_aqi = log(AQI) +replace log_aqi = . if log_pm25api == . + +rename pm25 AOD +label variable AOD "AOD" + +eststo clear +reghdfe log_pm25api AOD pre tem_mean, a(monitor_id year#month) cl(city_id) +eststo A +estadd ysumm, mean +reghdfe log_pm10api AOD pre tem_mean, a(monitor_id year#month) cl(city_id) +eststo B +estadd ysumm, mean +reghdfe log_aqi AOD pre tem_mean, a(monitor_id year#month) cl(city_id) +eststo C +estadd ysumm, mean +esttab A B C using "$out_files/TableC4.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(AOD) drop() stats(ymean N, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +**============================================================================== +* Table C5 +use "$data_files/firm_enf.dta", clear +drop if revenue == . +drop if key == . +keep if min_dist<50 & starty<=2010 & time==1 + +gen twodigit = int(industry/100) + +lab def twodigit_lb 6 "Mining and Washing of Coal & 6" /// + 7 "Extraction of Petroleum and Natural Gas & 7" /// + 8 "Mining and Processing of Ferrous Metal Ores & 8" /// + 9 "Mining and Processing of Non-Ferrous Metal Ores & 9" /// + 10 "Mining and Processing of Nonmetallic Mineral & 10" /// + 11 "Mining Support & 11" /// + 12 "Other Mining & 12" /// + 13 "Agricultural and Sideline Food Processing & 13" /// + 14 "Fermentation & 14" /// + 15 "Beverage Manufacturing & 15" /// + 16 "Tobacco Manufacturing & 16" /// + 17 "Textile Mills & 17" /// + 18 "Wearing Apparel and Clothing Accessories Manufacturing & 18" /// + 19 "Leather, Fur and Related Products Manufacturing & 19" /// + 20 "Wood and Bamboo Products Manufacturing & 20" /// + 21 "Furniture Manufacturing & 21" /// + 22 "Products Manufacturing & 22" /// + 23 "Printing and Reproduction of Recorded Media & 23" /// + 24 "Education and Entertainment Articles Manufacturing & 24" /// + 25 "Petrochemicals Manufacturing & 25" /// + 26 "Chemical Products Manufacturing& 26" /// + 27 "Medicine Manufacturing & 27" /// + 28 "Chemical Fibers Manufacturing & 28" /// + 29 "Rubber Products Manufacturing & 29" /// + 30 "Plastic Products Manufacturing & 30" /// + 31 "Non-Metallic Mineral Products Manufacturing & 31" /// + 32 "Iron and Steel Smelting & 32" /// + 33 "Non-Ferrous Metal Smelting & 33" /// + 34 "Fabricated Metal Products Manufacturing & 34" /// + 35 "General Purpose Machinery Manufacturing & 35" /// + 36 "Special Purpose Machinery Manufacturing & 36" /// + 37 "Transport Equipment Manufacturing & 37" /// + 38 "Electrical machinery and equipment Manufacturing & 38" /// + 39 "Electrical Equipment Manufacturing & 39" /// + 40 "Computers and Electronic Products Manufacturing & 40" /// + 41 "General Instruments and Other Equipment Manufacturing & 41" /// + 42 "Craft-works Manufacturing & 42" /// + 43 "Renewable Materials Recovery & 43" /// + 44 "Electricity and Heat Supply & 44" /// + 45 "Gas Production and Supply & 45" /// + 46 "Water Production and Supply & 46", add + +label values twodigit twodigit_lb + +eststo clear + +estpost tabulate twodigit +esttab using "$out_files/TableC5.tex", replace tex cells("b(label(freq)) pct(fmt(2))") varlabels(`e(labels)', blist(Total)) nolines nogaps compress fragment label + + +**============================================================================== +* Table C6 +use "$data_files/Raw/daily_monitor_api.dta", clear + +* Construct daily indicators +gen above_100=0 & AQI!=. +replace above_100=1 if AQI>=100 & AQI!=. +gen above_200=0 & AQImax!=. +replace above_200=1 if AQImax>=200 & AQImax!=. + +* Collapse data to monthly level +collapse (mean) above_200 pm25api pm10api AQI, by(year month city_id) + +* Merge with weather data +merge 1:1 city_id year month using "$data_files/weather_monthly.dta" +keep if _merge == 3 +drop _merge + +gen quarter = int((month-1)/3)+1 +collapse (mean) above_200 pm25api pm10api AQI pre tem_mean, by(year quarter city_id) + +* Construct monthly variables +egen time=group(year quarter) +replace pre=. if pre==-9999 + +bysort city_id: egen med_pre=median(pre) +gen high_pre=0 if pre!=. +replace high_pre=1 if pre>med_pre & pre!=. +label var high_pre "\$Rain_{>\tilde{x}}$" + +gen log_pre=log(pre) +gen log_api25=log(pm25api) +gen log_aqi=log(AQI) +gen log_api10=log(pm10api) + +gen tem_meand = int(tem_mean) + +* Monthly pollution regressions +reghdfe log_api25 high_pre, absorb(time city_id tem_meand) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe log_api10 high_pre, absorb(time city_id tem_meand) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe log_aqi high_pre, absorb(time city_id tem_meand) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe above_200 high_pre if AQI!=., absorb(time city_id tem_meand) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) + +esttab A B C D using "$out_files/TableC6.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() drop(_cons) stats(ymean EN, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +**============================================================================== +* Table C7 +use "$data_files/firm_enf.dta", clear +drop if revenue == . +drop if key == . + +label var min_dist_10 "Mon\$\_{<10km}\$" +label var any_air "Any Enforcement (0/1)" +label var post "Post" +label var key "High Pollution" + +* Table +eststo clear +reghdfe any_air c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +reghdfe any_water c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +reghdfe any_waste c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +reghdfe any_proc c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +esttab A B C D using "$out_files/TableC7a.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(c.min_dist_10*) stats(ymean EN FirmFE ITFE PTFE, labels("Mean Outcome" "Observations" "Firm FE" "Industry-Time FE" "Province-Time FE")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +merge m:1 city_id using "$data_files/Raw/city_info.dta", keepusing(disttocoast) +keep if _merge == 3 +drop _merge + +eststo clear +reghdfe any_air c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +estadd local DTFE = "No" +estadd local FCTFE = "No" +estadd local CTFE = "No" +reghdfe any_air c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time c.disttocoast#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +estadd local DTFE = "Yes" +estadd local FCTFE = "No" +estadd local CTFE = "No" +reghdfe any_air c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time c.disttocoast#time c.employment#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "Yes" +estadd local DTFE = "Yes" +estadd local FCTFE = "Yes" +estadd local CTFE = "No" +reghdfe any_air c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time city_id#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local FirmFE = "Yes" +estadd local ITFE = "Yes" +estadd local PTFE = "No" +estadd local DTFE = "No" +estadd local FCTFE = "Yes" +estadd local CTFE = "Yes" +esttab A B C D using "$out_files/TableC7b.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(c.min_dist_10*) stats(ymean EN DTFE FCTFE CTFE FirmFE ITFE PTFE, labels("Mean Outcome" "Observations" "Distance to coast-Time FE" "Firm characteristics-Time FE" "City-Time FE" "Firm FE" "Industry-Time FE" "Province-Time FE" )) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +**============================================================================== +* Table C8 +use "$data_files/Raw/city_info.dta", clear +merge 1:1 city_id using "$data_files/share.dta" +drop if _merge == 2 +drop _merge + +label variable number "\# Monitors" +label variable number_iv "Min \# Monitors" + +regress share_rev_10 number, r +eststo A +estadd ysumm, mean +regress share_emp_10 number, r +eststo B +estadd ysumm, mean +regress share_rev_5 number, r +eststo C +estadd ysumm, mean +regress share_emp_5 number, r +eststo D +estadd ysumm, mean +esttab A B C D using "$out_files/TableC8a.tex", replace b(a2) noconstant se(a2) label nolines nogaps compress fragment nonumbers mlabels(none) collabels() drop(_cons) stats(ymean N, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +ivregress 2sls share_rev_10 (number = number_iv), r +eststo A +estadd ysumm, mean +ivregress 2sls share_emp_10 (number = number_iv), r +eststo B +estadd ysumm, mean +ivregress 2sls share_rev_5 (number = number_iv), r +eststo C +estadd ysumm, mean +ivregress 2sls share_emp_5 (number = number_iv), r +eststo D +estadd ysumm, mean +esttab A B C D using "$out_files/TableC8b.tex", replace b(a2) noconstant se(a2) label nolines nogaps compress fragment nonumbers mlabels(none) collabels() drop(_cons) stats(ymean N, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +**============================================================================== +* Table C9 +* Robustness: additional controls +use "$data_files/city_pm.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe pm25 RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe pm25 c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B +reghdfe pm25 RD_Estimate i.time#c.area i.time#c.pop, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo C +ivreghdfe pm25 i.time#c.area i.time#c.pop i.time#c.background (RD_Estimate=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo D +reghdfe pm25 RD_Estimate i.time#c.area i.time#c.pop i.time#c.background i.time#c.GDP, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo E +ivreghdfe pm25 i.time#c.area i.time#c.pop i.time#c.background i.time#c.GDP (RD_Estimate=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo F +esttab A B C D E F using "$out_files/TableC9a.tex", tex keep(RD_Estimate) transform(@/1, pattern(0 0 0 0)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN, labels( "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) + +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe log_any_air RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +estadd local CFE = "Yes" +estadd local TTFE = "Yes" +estadd local CSPost = "Yes" +estadd local CSTFE = "No" +estadd local CCTFE = "No" +estadd local Weather = "Yes" +eststo A +ivreghdfe log_any_air c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +estadd local CFE = "Yes" +estadd local TTFE = "Yes" +estadd local CSPost = "Yes" +estadd local CSTFE = "No" +estadd local CCTFE = "No" +estadd local Weather = "Yes" +eststo B +reghdfe log_any_air RD_Estimate i.time#c.area i.time#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +estadd local CFE = "Yes" +estadd local TTFE = "Yes" +estadd local CSPost = "No" +estadd local CSTFE = "Yes" +estadd local CCTFE = "No" +estadd local Weather = "Yes" +eststo C +ivreghdfe log_any_air i.time#c.area i.time#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +estadd local CFE = "Yes" +estadd local TTFE = "Yes" +estadd local CSPost = "No" +estadd local CSTFE = "Yes" +estadd local CCTFE = "No" +estadd local Weather = "Yes" +eststo D +reghdfe log_any_air RD_Estimate i.time#c.area i.time#c.pop i.time#c.background i.time#c.GDP, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +estadd local CFE = "Yes" +estadd local TTFE = "Yes" +estadd local CSPost = "No" +estadd local CSTFE = "Yes" +estadd local CCTFE = "Yes" +estadd local Weather = "Yes" +eststo E +ivreghdfe log_any_air i.time#c.area i.time#c.pop i.time#c.background i.time#c.GDP (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +estadd local CFE = "Yes" +estadd local TTFE = "Yes" +estadd local CSPost = "No" +estadd local CSTFE = "Yes" +estadd local CCTFE = "Yes" +estadd local Weather = "Yes" +eststo F +esttab A B C D E F using "$out_files/TableC9b.tex", tex keep(RD_Estimate) transform(@/1, pattern(0 0 0 0)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN CFE TTFE CSPost CSTFE CCTFE Weather, labels("Observations" "City FE" "Target-Time FE" "City size $\times$ Post" "City size-Time FE" "City char.-Time FE" "Weather")) starlevels(* 0.10 ** 0.05 *** 0.01) + +**============================================================================== +* Table C10 +* Robustness: Sample Restriction +use "$data_files/city_pm.dta", clear + +gen prov_id = int(city_id/100) +drop if prov_id == 54 | prov_id == 65 + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +reghdfe pm25 RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe pm25 c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_pm_rd.dta", clear + +gen prov_id = int(city_id/100) +drop if prov_id == 54 | prov_id == 65 + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = pm25 if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe pm25 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC10a.tex", tex keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN, labels( "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) + +use "$data_files/city_enf.dta", clear + +gen prov_id = int(city_id/100) +drop if prov_id == 54 | prov_id == 65 + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +reghdfe log_any_air RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen prov_id = int(city_id/100) +drop if prov_id == 54 | prov_id == 65 + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = log_any_air if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +estadd local kern = "Uniform" +eststo C +reghdfe log_any_air RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +estadd local kern = "Uniform" +eststo D +esttab A B C D using "$out_files/TableC10b.tex", tex keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN kern band, labels("Observations" "Kernel" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +**============================================================================== +* Table C11 +* asif vs all +use "$data_files/city_enf.dta", clear + +gen log_any_air_total = log(any_air+any_air_rest+1) + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +reghdfe log_any_air_total RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air_total c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen log_any_air_total = log(any_air+any_air_rest+1) + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = log_any_air_total if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air_total dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air_total RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC11a.tex", tex keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN, labels("Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +use "$data_files/city_enf.dta", clear + +gen log_any_air_rest = log(any_air_rest+1) + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +reghdfe log_any_air_rest RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air_rest c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen log_any_air_rest = log(any_air_rest+1) + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = log_any_air_rest if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air_rest dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air_rest RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC11b.tex", tex keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN, labels("Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +**============================================================================== +* Table C12 +* kernel and covs +use "$data_files/city_pm_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = pm25 if year < 2012 +bys city_id: egen mean_bench = mean(bench) + +eststo clear +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo A +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) covs(cutoff mean_bench year month) kernel(tri) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo B +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) covs(cutoff mean_bench year month) kernel(epa) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) covs() kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo D +esttab A B C D using "$out_files/TableC12a1.tex", tex keep(RD_Estimate) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN band, labels("Observations" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + +eststo clear +rdrobust number dist1 if year>=2015, p(1) h(11.3) covs(cutoff) kernel(uni) vce(cluster city_id) +eststo A +rdrobust number dist1 if year>=2015, p(1) h(12.3) covs(cutoff) kernel(tri) vce(cluster city_id) +eststo B +rdrobust number dist1 if year>=2015, p(1) h(12.5) covs(cutoff) kernel(epa) vce(cluster city_id) +eststo C +rdrobust number dist1 if year>=2015, p(1) h(13.8) covs() kernel(uni) vce(cluster city_id) +eststo D +esttab A B C D using "$out_files/TableC12a2.tex", tex replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers noobs mlabels(none) keep(RD_Estimate) coeflabels(RD_Estimate "First stage") starlevels(* 0.10 ** 0.05 *** 0.01) + +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen dist2 = pop - 25 if cutoff == 1 +replace dist2 = pop - 50 if cutoff == 2 + +gen bench = log_any_air if year < 2012 +bys city_id: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +eststo clear +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo A +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) covs(cutoff mean_bench year quarter) kernel(tri) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo B +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) covs(cutoff mean_bench year quarter) kernel(epa) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) covs() kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo D +esttab A B C D using "$out_files/TableC12b1.tex", tex keep(RD_Estimate) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN band, labels("Observations" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + +eststo clear +rdrobust number dist1 if year>=2015, p(1) h(11.3) covs(cutoff) kernel(uni) vce(cluster city_id) +eststo A +estadd local kern = "Uniform" +estadd local cov = "Yes" +rdrobust number dist1 if year>=2015, p(1) h(13.1) covs(cutoff) kernel(tri) vce(cluster city_id) +eststo B +estadd local kern = "Epanechnikov" +estadd local cov = "Yes" +rdrobust number dist1 if year>=2015, p(1) h(12.5) covs(cutoff) kernel(epa) vce(cluster city_id) +eststo C +estadd local kern = "Triangle" +estadd local cov = "Yes" +rdrobust number dist1 if year>=2015, p(1) h(11.4) covs() kernel(uni) vce(cluster city_id) +eststo D +estadd local kern = "Uniform" +estadd local cov = "No" +esttab A B C D using "$out_files/TableC12b2.tex", tex replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers noobs mlabels(none) keep(RD_Estimate) coeflabels(RD_Estimate "First stage") stats(kern cov, labels("Kernel" "Covariates")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +**============================================================================== +* Table C13 +* Cutoff 1 +use "$data_files/city_pm.dta", clear + +gen dist1 = area - 20 + +gen bench = pm25 if year < 2012 +bys city_id: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +eststo clear +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo A +reghdfe pm25 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 if abs(dist1) < 11.3, a(year#month) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo B + +use "$data_files/city_enf.dta", clear + +gen dist1 = area - 20 + +gen bench = log_any_air if year < 2012 +bys city_id: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC13a1.tex", tex keep(RD_Estimate) transform(@/0.79 1/0.79, pattern(0 1 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN band, labels("Observations" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + +eststo clear +rdrobust number dist1 if year>=2015, p(1) h(11.3) kernel(uni) vce(cluster city_id) +eststo A +rdrobust number dist1 if year>=2015, p(1) h(11.3) kernel(uni) vce(cluster city_id) +eststo B +rdrobust number dist1 if year>=2015, p(1) h(11.3) kernel(uni) vce(cluster city_id) +eststo C +rdrobust number dist1 if year>=2015, p(1) h(11.3) covs() kernel(uni) vce(cluster city_id) +eststo D +esttab A B C D using "$out_files/TableC13a2.tex", tex replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers noobs mlabels(none) keep(RD_Estimate) coeflabels(RD_Estimate "First stage") starlevels(* 0.10 ** 0.05 *** 0.01) + +* Cutoff 2 +use "$data_files/city_pm.dta", clear + +gen dist1 = area - 50 + +gen bench = pm25 if year < 2012 +bys city_id: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +eststo clear +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(mean_bench year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo A +reghdfe pm25 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo B + +use "$data_files/city_enf.dta", clear + +gen dist1 = area - 50 + +gen bench = log_any_air if year < 2012 +bys city_id: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N_full) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC13b1.tex", tex keep(RD_Estimate) transform(@/1.76 1/1.76, pattern(0 1 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN band, labels("Observations" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + +eststo clear +rdrobust number dist1 if year>=2015, p(1) h(11.3) kernel(uni) vce(cluster city_id) +eststo A +estadd local kern = "Uniform" +estadd scalar band = 11.3 +rdrobust number dist1 if year>=2015, p(1) h(11.3) kernel(uni) vce(cluster city_id) +eststo B +estadd local kern = "Uniform" +estadd scalar band = 11.3 +rdrobust number dist1 if year>=2015, p(1) h(11.3) kernel(uni) vce(cluster city_id) +eststo C +estadd local kern = "Uniform" +estadd scalar band = 11.3 +rdrobust number dist1 if year>=2015, p(1) h(11.3) covs() kernel(uni) vce(cluster city_id) +eststo D +estadd local kern = "Uniform" +estadd local band = 11.3 +esttab A B C D using "$out_files/TableC13b2.tex", tex replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers noobs mlabels(none) keep(RD_Estimate) coeflabels(RD_Estimate "First stage") stats(kern band, labels("Kernel" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +**============================================================================== +* Table C14 +* Spillover in aod +use "$data_files/city_pm_rd.dta", clear + +rename pm pm_monitor +drop if pm_monitor == . +drop if pm_direct == . +drop if pm_indirect == . + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench_monitor = pm_monitor if year < 2012 +bys city_id cutoff: egen mean_bench_monitor = mean(bench_monitor) + +gen RD_Estimate = . +gen above = dist1 > 0 + +replace RD_Estimate = c.post1#c.number +eststo clear +reghdfe pm_monitor RD_Estimate c.post1#c.area c.post1#c.pop if cutoff == 1, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo A +summ pm_monitor +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +ivreghdfe pm_monitor c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv) if cutoff == 1, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo B +summ pm_monitor +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +rdrobust pm_monitor dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench_monitor year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +summ pm_monitor if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +eststo C +replace RD_Estimate = c.post1#c.above +reghdfe pm_monitor RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N) +summ pm_monitor if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +eststo D +esttab A B C D using "$out_files/TableC14a.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN ysumm, labels("Observations" "Mean Outcome")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + +use "$data_files/city_pm_rd.dta", clear + +rename pm pm_monitor +drop if pm_monitor == . +drop if pm_direct == . +drop if pm_indirect == . + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench_dir = pm_direct if year < 2012 +bys city_id cutoff: egen mean_bench_dir = mean(bench_dir) + +gen bench_in = pm_indirect if year < 2012 +bys city_id cutoff: egen mean_bench_in = mean(bench_in) + +gen RD_Estimate = . +gen above = dist1 > 0 + +replace RD_Estimate = c.post1#c.number +eststo clear +reghdfe pm_direct RD_Estimate c.post1#c.area c.post1#c.pop if cutoff == 1, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo A +summ pm_direct +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +ivreghdfe pm_direct c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv) if cutoff == 1, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo B +summ pm_direct +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +rdrobust pm_direct dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench_dir year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +summ pm_direct if abs(dist1) < 11.3 +estadd scalar ysumm = r(mean) +eststo C +replace RD_Estimate = c.post1#c.above +reghdfe pm_direct RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N) +summ pm_direct if abs(dist1) < 11.3 +estadd scalar ysumm = r(mean) +eststo D +esttab A B C D using "$out_files/TableC14b.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN ysumm, labels("Observations" "Mean Outcome")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + +replace RD_Estimate = c.post1#c.number +eststo clear +reghdfe pm_indirect RD_Estimate c.post1#c.area c.post1#c.pop if cutoff == 1, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo A +summ pm_indirect +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +ivreghdfe pm_indirect c.post1#c.area c.post1#c.pop pre tem_mean (RD_Estimate=c.post1#c.number_iv) if cutoff == 1, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo B +summ pm_indirect +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +rdrobust pm_indirect dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench_in year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +summ pm_indirect if abs(dist1) < 11.3 +estadd scalar ysumm = r(mean) +eststo C +replace RD_Estimate = c.post1#c.above +reghdfe pm_indirect RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +summ pm_indirect if abs(dist1) < 11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N) +eststo D +esttab A B C D using "$out_files/TableC14c.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN ysumm, labels("Observations" "Mean Outcome")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + +* Spillover in enf +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe log_any_air_10 RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +summ log_any_air_10 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air_10 c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +summ log_any_air_10 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench_10 = log_any_air_10 if year < 2012 +bys city_id cutoff: egen mean_bench_10 = mean(bench_10) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air_10 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench_10 year quarter) kernel(uni) vce(cluster city_id) +summ log_any_air_10 if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air_10 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +summ log_any_air_10 if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC14d.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN ysumm, labels("Observations" "Mean Outcome")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + + +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe log_any_air_20 RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +summ log_any_air_20 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air_20 c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +summ log_any_air_20 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench_20 = log_any_air_20 if year < 2012 +bys city_id cutoff: egen mean_bench_20 = mean(bench_20) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air_20 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench_20 year quarter) kernel(uni) vce(cluster city_id) +summ log_any_air_20 if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air_20 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +summ log_any_air_20 if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC14e.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN ysumm, labels("Observations" "Mean Outcome")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe log_any_air_50 RD_Estimate c.post1#c.area c.post1#c.pop, a(city_id pred tem_meand age_year incentive2#time) cluster(city_id) +summ log_any_air_50 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air_50 c.post1#c.area c.post1#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id pred tem_meand age_year incentive2#time) cluster(city_id) +summ log_any_air_50 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench_50 = log_any_air_50 if year < 2012 +bys city_id cutoff: egen mean_bench_50 = mean(bench_50) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air_50 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench_50 year quarter) kernel(uni) vce(cluster city_id) +summ log_any_air_50 if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd local kern = "Uniform" +estadd scalar band = 11.3 +eststo C +reghdfe log_any_air_50 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +summ log_any_air_50 if abs(dist1)<11.3 +estadd scalar ysumm = r(mean) +estadd scalar EN = e(N) +estadd local kern = "Uniform" +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/TableC14f.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN ysumm kern band, labels("Observations" "Mean Outcome" "Kernel" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + + +**============================================================================== +* Table C15 +* Promotion +use "$data_files/city_pm.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen above57 = age<=57 +gen RD_Estimate = c.post1#c.number +label variable above57 "Below 58" + +eststo clear +reghdfe pm25 RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe pm25 RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57 if age >= 51 & age <= 62, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe pm25 RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57 if age >= 53 & age <= 62, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe pm25 RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57 if age >= 55 & age <= 60, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +esttab A B C D using "$out_files/TableC15a.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(RD_Estimate c.RD_Estimate#c.above57) coeflabels(RD_Estimate "\# Monitors" c.RD_Estimate#c.above57 "\# Monitors $\times$ Below 58") stats(ymean EN, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen above57 = age <= 57 +gen RD_Estimate = c.post1#c.number +label variable above57 "Below 58" + +eststo clear +reghdfe log_any_air RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe log_any_air RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57 if age >= 51 & age <= 62, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe log_any_air RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57 if age >= 53 & age <= 62, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe log_any_air RD_Estimate c.post1#c.area c.post1#c.pop c.RD_Estimate#c.above57 if age >= 55 & age <= 60, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +esttab A B C D using "$out_files/TableC15b.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(RD_Estimate c.RD_Estimate#c.above57) coeflabels(RD_Estimate "\# Monitors" c.RD_Estimate#c.above57 "\# Monitors $\times$ Below 58") stats(ymean EN, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + + +**============================================================================== +* Table C16 +* Balance +use "$data_files/city_pm.dta", clear +keep if year < 2015 + +replace light = log(light) +gen log_any_air = log(any_air+1) + +collapse (mean) pm25 light log_any_air number area pop age city_id, by(city_cn) +gen above57 = age > 57 + +label variable number "\# Monitors" +label variable area "Size of buildup area" +label variable pop "Urban population" +label variable pm25 "AOD before 2015" +label variable light "Night light before 2015" +label variable log_any_air "log(\# Firms) before 2015" + +regress above57 number area pop pm25 light log_any_air +test number area pop pm25 light log_any_air +regress above57 number area pop pm25 light log_any_air if age >= 51 & age <= 62 +test number area pop pm25 light log_any_air +regress above57 number area pop pm25 light log_any_air if age >= 53 & age <= 62 +test number area pop pm25 light log_any_air +regress above57 number area pop pm25 light log_any_air if age >= 55 & age <= 60 +test number area pop pm25 light log_any_air + +eststo clear +eststo tot: estpost summarize number area pop pm25 light log_any_air +eststo treat1: estpost summarize number area pop pm25 light log_any_air if above57==1 +eststo control1: estpost summarize number area pop pm25 light log_any_air if above57==0 +eststo diff1: estpost ttest number area pop pm25 light log_any_air, by(above57) +eststo diff1: estadd scalar pvalue = 0.19 +eststo diff2: estpost ttest number area pop pm25 light log_any_air if age >= 51 & age <= 62, by(above57) +eststo diff2: estadd scalar pvalue = 0.15 +eststo diff3: estpost ttest number area pop pm25 light log_any_air if age >= 53 & age <= 62, by(above57) +eststo diff3: estadd scalar pvalue = 0.29 +eststo diff4: estpost ttest number area pop pm25 light log_any_air if age >= 55 & age <= 60, by(above57) +eststo diff4: estadd scalar pvalue = 0.37 +esttab tot treat1 control1 diff1 diff2 diff3 diff4 using "$out_files/TableC16.tex", tex label noconstant nolines nogaps compress fragment nonumbers mlabels(,none) collabels(,none) cells(mean(pattern(1 1 1 0 0 0 0) fmt(a2)) & b(star pattern(0 0 0 1 1 1 1) fmt(a2)) sd(pattern(1 1 1 0 0 0 0) fmt(a2) par) & se(pattern(0 0 0 1 1 1 1) fmt(a2) par)) stats(N pvalue, labels("Observations" "Joint Test (p-value)")) starlevels(* 0.10 ** 0.05 *** 0.01) replace + + +**============================================================================== +* Table C17 + +use "$data_files/city_pm.dta", clear +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +merge 1:1 city_cn year month using "$data_files/Raw/baidu.dta" +keep if _merge == 3 +drop _merge + +replace sear_freq_w1 = sear_freq_w1/pop +replace sear_freq_w2 = sear_freq_w2/pop +replace sear_freq_w3 = sear_freq_w3/pop +replace sear_freq_w4 = sear_freq_w4/pop +replace sear_freq_w5 = sear_freq_w5/pop + +foreach x of varlist sear_freq_w* { + egen m_`x' = mean(`x') + egen sd_`x' = sd(`x') + gen std_`x' = (`x'- m_`x')/sd_`x' +} + +forvalues i=1/5 { +replace sear_freq_w`i'=0 if sear_freq_w`i'==. +gen log_w`i'=log(sear_freq_w`i'+1) +gen any_w`i'=0 +replace any_w`i'=1 if sear_freq_w`i'>0 & sear_freq_w`i'!=. +} + +eststo clear +ivreghdfe log_w1 i.time#c.area i.time#c.pop (c.post1#c.number=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo A +summ log_w1 +estadd scalar ymean = r(mean) +ivreghdfe log_w2 i.time#c.area i.time#c.pop (c.post1#c.number=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo B +summ log_w2 +estadd scalar ymean = r(mean) +ivreghdfe log_w3 i.time#c.area i.time#c.pop (c.post1#c.number=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo C +summ log_w3 +estadd scalar ymean = r(mean) +ivreghdfe log_w4 i.time#c.area i.time#c.pop (c.post1#c.number=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo D +summ log_w4 +estadd scalar ymean = r(mean) +ivreghdfe log_w5 i.time#c.area i.time#c.pop (c.post1#c.number=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo E +summ log_w5 +estadd scalar ymean = r(mean) +estfe A B C D E, labels(city_id "City FE" time "Time FE") +return list +esttab A B C D E using "$out_files/TableC17.tex", replace b(a2) se(a2) keep(c.post1#c.number) coeflabels(c.post1#c.number "\# Monitors") label noconstant nolines nogaps compress fragment nonumbers mlabels(none) collabels() stats(ymean N, labels( "Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +**============================================================================== +** Figures + +**============================================================================== +* Figure D3 +use "$data_files/firm_enf.dta", clear + +fvset base 4 min_d_d4 + +reghdfe any_air i.post##i.min_d_d4 if min_dist<50 & starty<=2010, absorb(id time industry#time prov_id#time) cluster(city_id) poolsize(1) compact +coefplot, baselevels omitted vert yline(0, lc(cranberry)) keep(1.post1#*) coeflabels(1.post1#0.min_d_d4 = "0-5km" 1.post1#1.min_d_d4 = "5-10km" 1.post1#2.min_d_d4 = "10-15km" 1.post1#3.min_d_d4 = "15-20km" 1.post1#4.min_d_d4 = "20-50km") drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle() le(95) +graph export "$out_files/any_air_gradient_50_ci.pdf", replace + + +**============================================================================== +* Figure D5 +use "$data_files/firm_enf.dta", clear + +merge m:1 city_id using "$data_files/Raw/city_info.dta", keepusing(env_lat env_lon centroid_lat centroid_lon) +keep if _merge == 3 +drop _merge + +geodist env_lat env_lon lat lon, gen(env_dist) +gen min_env_d4 = int(env_dist/5) +replace min_env_d4 = 4 if min_env_d4 > 4 + +* set base level +fvset base 20 time +fvset base 4 min_env_d4 + +geodist centroid_lat centroid_lon lat lon, gen(cen_dist) +gen min_cen_d4 = int(cen_dist/5) +replace min_cen_d4 = 4 if min_cen_d4 > 4 + +* set base level +fvset base 20 time +fvset base 4 min_cen_d4 + +reghdfe any_air i.time##i.min_env_d4 i.post##i.min_d_d4 i.post##i.min_cen_d4 if env_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) poolsize(1) compact + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#0.min_env_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_5_) replace + +twoway (rarea enf_5_ul1 enf_5_ll1 enf_5_at, color(gs6%20)) (scatter enf_5_b enf_5_at, msymbol(p) mc(black%60)) (line enf_5_b enf_5_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_env_event_5.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#1.min_env_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_10_) replace + +twoway (rarea enf_10_ul1 enf_10_ll1 enf_10_at, color(gs6%20)) (scatter enf_10_b enf_10_at, msymbol(p) mc(black%60)) (line enf_10_b enf_10_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_env_event_10.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#2.min_env_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_15_) replace + +twoway (rarea enf_15_ul1 enf_15_ll1 enf_15_at, color(gs6%20)) (scatter enf_15_b enf_10_at, msymbol(p) mc(black%60)) (line enf_15_b enf_15_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_env_event_15.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#3.min_env_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_20_) replace + +twoway (rarea enf_20_ul1 enf_20_ll1 enf_20_at, color(gs6%20)) (scatter enf_20_b enf_20_at, msymbol(p) mc(black%60)) (line enf_20_b enf_20_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_env_event_20.pdf", replace + + + +reghdfe any_air i.time##i.min_cen_d4 i.post##i.min_d_d4 i.post##i.min_env_d4 if cen_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) poolsize(1) compact + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#0.min_cen_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_5_) replace + +twoway (rarea enf_5_ul1 enf_5_ll1 enf_5_at, color(gs6%20)) (scatter enf_5_b enf_5_at, msymbol(p) mc(black%60)) (line enf_5_b enf_5_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_cen_event_5.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#1.min_cen_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_10_) replace + +twoway (rarea enf_10_ul1 enf_10_ll1 enf_10_at, color(gs6%20)) (scatter enf_10_b enf_10_at, msymbol(p) mc(black%60)) (line enf_10_b enf_10_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_cen_event_10.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#2.min_cen_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_15_) replace + +twoway (rarea enf_15_ul1 enf_15_ll1 enf_15_at, color(gs6%20)) (scatter enf_15_b enf_10_at, msymbol(p) mc(black%60)) (line enf_15_b enf_15_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_cen_event_15.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#3.min_cen_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_20_) replace + +twoway (rarea enf_20_ul1 enf_20_ll1 enf_20_at, color(gs6%20)) (scatter enf_20_b enf_20_at, msymbol(p) mc(black%60)) (line enf_20_b enf_20_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.01 0.01)) ylab(-0.01(0.005)0.01) +graph export "$out_files/any_air_cen_event_20.pdf", replace + + +**============================================================================== +* Figure D5 +use "$data_files/city_enf.dta", clear +set scheme s1mono + +fvset base 2014 year +fvset base 20 time + +reghdfe log_any_air i.time##c.number i.post1##c.area i.post1##c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) poolsize(1) compact +coefplot, baselevels omitted vert yline(-0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) keep(*me#c.number) coeflabels() drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) le(95) mc(black) gen(pm1_) replace + +regress number number_iv +predict num_hat + +reghdfe log_any_air i.time##c.num_hat i.post1##c.area i.post1##c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) poolsize(1) compact +coefplot, baselevels omitted vert yline(-0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) keep(*me#c.num_hat) coeflabels() drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) le(95) mc(black) gen(pm2_) replace + +twoway (rarea pm1_ul1 pm1_ll1 pm1_at, color(gs6%20)) (line pm1_b pm1_at, lwidth(medthick) lp(dash) lc(blackblack%70)) (rarea pm2_ul1 pm2_ll1 pm2_at, color(gs6%20)) (line pm2_b pm2_at, lp(dash) lc(blackblack%40)), yline(-0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.2 0.6)) ylabel(-0.2(0.2)0.6) legend(order(2 "DiD" 4 "DiD+IV") pos(2) ring(0) rows(2)) + +graph export "$out_files/eventenf.pdf", replace + + +**============================================================================== +* Figure D11 +* balance +use "$data_files/city_pm.dta", clear +keep if year < 2015 + +replace light = log(light) +gen log_any_air = log(any_air+1) + +collapse (mean) pm25 light log_any_air number area pop age city_id GDP, by(city_cn) +gen above57 = age > 57 + +label variable number "\# Monitors" +label variable area "Size of buildup area" +label variable pop "Urban population" +label variable pm25 "AOD before 2015" +label variable light "Night light before 2015" +label variable log_any_air "log(\# Firms) before 2015" + +fvset base 58 age +regress number i.age if age>=50 & age<=60 +coefplot, baselevels omitted vert drop(_cons) keep(*.age) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor")coeflabels(50.age = "50" 51.age = "51" 52.age = "52" 53.age = "53" 54.age = "54" 55.age = "55" 56.age = "56" 57.age = "57" 58.age = "58" 59.age = "59" 60.age = "60") le(95) +graph export "$out_files/number_age.pdf", replace + +regress area i.age if age>=50 & age<=60 +coefplot, baselevels omitted vert drop(_cons) keep(*.age) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor")coeflabels(50.age = "50" 51.age = "51" 52.age = "52" 53.age = "53" 54.age = "54" 55.age = "55" 56.age = "56" 57.age = "57" 58.age = "58" 59.age = "59" 60.age = "60") le(95) +graph export "$out_files/area_age.pdf", replace + +regress pop i.age if age>=50 & age<=60 +coefplot, baselevels omitted vert drop(_cons) keep(*.age) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor")coeflabels(50.age = "50" 51.age = "51" 52.age = "52" 53.age = "53" 54.age = "54" 55.age = "55" 56.age = "56" 57.age = "57" 58.age = "58" 59.age = "59" 60.age = "60") le(95) +graph export "$out_files/pop_age.pdf", replace + +regress pm25 i.age if age>=50 & age<=60 +coefplot, baselevels omitted vert drop(_cons) keep(*.age) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor")coeflabels(50.age = "50" 51.age = "51" 52.age = "52" 53.age = "53" 54.age = "54" 55.age = "55" 56.age = "56" 57.age = "57" 58.age = "58" 59.age = "59" 60.age = "60") le(95) +graph export "$out_files/pm_age.pdf", replace + +regress light i.age if age>=50 & age<=60 +coefplot, baselevels omitted vert drop(_cons) keep(*.age) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor")coeflabels(50.age = "50" 51.age = "51" 52.age = "52" 53.age = "53" 54.age = "54" 55.age = "55" 56.age = "56" 57.age = "57" 58.age = "58" 59.age = "59" 60.age = "60") le(95) +graph export "$out_files/light_age.pdf", replace + +regress log_any_air i.age if age>=50 & age<=60 +coefplot, baselevels omitted vert drop(_cons) keep(*.age) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor")coeflabels(50.age = "50" 51.age = "51" 52.age = "52" 53.age = "53" 54.age = "54" 55.age = "55" 56.age = "56" 57.age = "57" 58.age = "58" 59.age = "59" 60.age = "60") le(95) +graph export "$out_files/enf_age.pdf", replace + + +**============================================================================== +* Figure D7 +* Bandwidths +use "$data_files/city_pm_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +frame create fs fs_b fs_var +quietly{ + forvalues x = 4(2)20 { + qui rdrobust number dist1, p(1) h(`x') kernel(uni) covs(cutoff) vce(cluster city_id) + frame post fs (e(tau_cl)) (e(se_tau_cl)) + } +} + +frame fs: gen fs_ul = fs_b + 1.96*fs_var +frame fs: gen fs_ll = fs_b - 1.96*fs_var +frame fs: gen fs_at = 2 + 2*_n + +frame fs: twoway (connected fs_b fs_at, sort msymbol(S) color(black)) (line fs_ul fs_at, sort lpattern(dash) lcolor(gs9)) /* +*/ (line fs_ll fs_at, sort lpattern(dash) lcolor(gs9)), ytitle(Number of monitors) yline(0, lc(cranberry)) /* +*/ xtitle(Bandwidth) xline(11.3, lcolor(blue) lpattern(dash) lwidth(thin)) /* +*/ graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ysc(r(0 2)) ylabel(0(0.5)2) xlabel(4(2)20) +graph export "$out_files/band_fs.pdf", replace + +gen bench = pm25 if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +frame create rd rd_b rd_var +quietly{ + forvalues x = 4(2)20 { + qui rdrobust pm25 dist1 if year >= 2015, fuzzy(number) covs(cutoff mean_bench year) p(1) h(`x') vce(cluster city_id) kernel(uni) + frame post rd (e(tau_cl)) (e(se_tau_cl)) + } +} + +frame rd: gen rd_ul = rd_b + 1.96*rd_var +frame rd: gen rd_ll = rd_b - 1.96*rd_var +frame rd: gen rd_at = 2 + 2*_n + +frame rd: twoway (connected rd_b rd_at, sort msymbol(S) color(black)) (line rd_ul rd_at, sort lpattern(dash) lcolor(gs9)) /* +*/ (line rd_ll rd_at, sort lpattern(dash) lcolor(gs9)), ytitle(Number of monitors) yline(0, lc(cranberry)) /* +*/ xtitle(Bandwidth) xline(11.3, lcolor(blue) lpattern(dash) lwidth(thin)) /* +*/ graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ysc(r(-0.075 0.05)) ylabel(-0.075(0.025)0.05) xlabel(4(2)20) +graph export "$out_files/band_rd.pdf", replace + + +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = log_any_air if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +frame drop rd +frame create rd rd_b rd_var +quietly{ + forvalues x = 4(2)20 { + qui rdrobust log_any_air dist1 if year >= 2015, fuzzy(number) covs(cutoff mean_bench year) p(1) h(`x') vce(cluster city_id) kernel(uni) + frame post rd (e(tau_cl)) (e(se_tau_cl)) + } +} + +frame rd: gen rd_ul = rd_b + 1.96*rd_var +frame rd: gen rd_ll = rd_b - 1.96*rd_var +frame rd: gen rd_at = 2 + 2*_n + +frame rd: twoway (connected rd_b rd_at, sort msymbol(S) color(black)) (line rd_ul rd_at, sort lpattern(dash) lcolor(gs9)) /* +*/ (line rd_ll rd_at, sort lpattern(dash) lcolor(gs9)), ytitle(Number of monitors) yline(0, lc(cranberry)) /* +*/ xtitle(Bandwidth) xline(11.3, lcolor(blue) lpattern(dash) lwidth(thin)) /* +*/ graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ysc(r(-0.2 0.6)) ylabel(-0.2(0.2)0.6) xlabel(4(2)20) +graph export "$out_files/band_rd_enf.pdf", replace + + +**============================================================================== +* Figure D7 +use "$data_files/city_pm_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +hist dist1 if cutoff == 1 & year == 2015 & month == 1, /* +*/ start(-24) width(12) xline(0, lc(cranberry)) ysc(r(0 0.025)) /* +*/ ylabel(0(0.005)0.025) xtitle("Size of the Build-up Area") /* +*/ graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) /* +*/ plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) /* +*/ legend(nobox region(fcolor(white) margin(zero) lcolor(white))) +graph export "$out_files/Cutoff1Score1.pdf", replace + +hist dist1 if cutoff == 2 & year == 2015 & month == 1, /* +*/ start(-60) width(12) xline(0, lc(cranberry)) ysc(r(0 0.025)) /* +*/ ylabel(0(0.005)0.025) xtitle("Size of the Build-up Area") /* +*/ graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) /* +*/ plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) /* +*/ legend(nobox region(fcolor(white) margin(zero) lcolor(white))) +graph export "$out_files/Cutoff2Score1.pdf", replace + +rddensity dist1 if year==2015 & month==1 & dist1<40 & dist1>-40, all plot plot_range(-20 20) nohist graph_opt(legend(off) xtitle("Size of the Buildup Area") ytitle("Density") graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white))) +graph export "$out_files/DensityTest1.pdf", as(pdf) replace + + +**============================================================================== +* Figure D9 +* hist +use "$data_files/Raw/firm_info.dta", clear + +hist min_dist if min_dist < 50, xtitle("Distance to the closest monitor (km)") /* +*/ graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) /* +*/ plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) /* +*/ legend(nobox region(fcolor(white) margin(zero) lcolor(white))) +graph export "$out_files/hist_min_dist.pdf", as(pdf) replace + + + + + diff --git a/110/replication_package/replication/Do-file/Figure.do b/110/replication_package/replication/Do-file/Figure.do new file mode 100644 index 0000000000000000000000000000000000000000..9de53e7c7d0efff3f6076084537ffa4d1b40484c --- /dev/null +++ b/110/replication_package/replication/Do-file/Figure.do @@ -0,0 +1,177 @@ +* Set Directory +clear +set more off +set scheme s1mono + +cd "$path" +global data_files "$path/Data" +global out_files "$path/output" + +**============================================================================== +* Figure 1 +use "$data_files/firm_enf.dta", clear + +fvset base 2014 year + +* Binscatter plot +binscatter any_air min_dist if min_dist<50 & starty<=2010, by(post) line(none) ytitle(Any Air Pollution Related Enforcement) xtitle(Distance to the Closest Monitor(km)) legend(region(lwidth(none)) pos(12) ring(0) lab(1 pre-policy (2010-2014)) lab(2 post-policy (2015-2017))) mc(black cranberry) lc(black cranberry) ysc(r(0.001 0.013)) ylab(0.001(0.002)0.013) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) +graph export "$out_files/any_air_gradient_50.pdf", replace + +binscatter any_air year if min_dist<50 & starty<=2010, by(min_dist_10) ytitle(Any Air Pollution Related Enforcement) xtitle(Year) legend(region(lwidth(none)) pos(12) ring(0) order(2 1) lab(1 10-50km) lab(2 0-10km)) mc(black cranberry) line(none) xtick(2010(1)2017) xlabel(2010 "2010" 2011 "2011" 2012 "2012" 2013 "2013" 2014 "2014" 2015 "2015" 2016 "2016" 2017 "2017") ysc(r(0.001 0.013)) ylab(0.001(0.002)0.013) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) +graph export "$out_files/any_air_trend.pdf", replace + +use "$data_files/Raw/pm.dta", clear +drop pm25 + +rename pm_indirect pm3 +rename pm_direct pm2 + +reshape long pm, i(city_id year month) j(group) + +append using "$data_files/pix.dta" +replace group = 1 if group == . + +egen time = group(year month) + +binscatter pm year, by(group) line(none) legend(region(lwidth(none)) pos(12) ring(0) cols(3) lab(1 Monitor (≤ 10km)) lab(2 City Center (10-50km)) lab(3 Surrounding Area (> 50km))) ysc(r(0.33 0.47)) ylab(0.30(0.02)0.48) mc(cranberry black gray) m(D O S) ytitle("Aerosol Optical Depth") xtitle("Year") xtick(2010(1)2017) xlabel(2010 "2010" 2011 "2011" 2012 "2012" 2013 "2013" 2014 "2014" 2015 "2015" 2016 "2016" 2017 "2017") ysc(r(0.24 0.48)) ylab(0.24(0.04)0.48) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) +graph export "$out_files/monitors_trend.pdf", replace + +**============================================================================== +* Figure 2 +use "$data_files/firm_enf.dta", clear + +* set base level +fvset base 20 time +fvset base 4 min_d_d4 + +* Event Study Specification +reghdfe any_air i.time##i.min_d_d4 if min_dist<50 & starty<=2010, absorb(id time industry#time prov_id#time) cluster(city_id) + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#0.min_d_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_5_) replace + +twoway (rarea enf_5_ul1 enf_5_ll1 enf_5_at, color(gs6%20)) (scatter enf_5_b enf_5_at, msymbol(p) mc(black%60)) (line enf_5_b enf_5_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.006 0.012)) ylab(-0.006(0.006)0.012) +graph export "$out_files/event_enf_min_dist5.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#1.min_d_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_10_) replace + +twoway (rarea enf_10_ul1 enf_10_ll1 enf_10_at, color(gs6%20)) (scatter enf_10_b enf_10_at, msymbol(p) mc(black%60)) (line enf_10_b enf_10_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.006 0.012)) ylab(-0.006(0.006)0.012) +graph export "$out_files/event_enf_min_dist10.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#2.min_d_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_15_) replace + +twoway (rarea enf_15_ul1 enf_15_ll1 enf_15_at, color(gs6%20)) (scatter enf_15_b enf_10_at, msymbol(p) mc(black%60)) (line enf_15_b enf_15_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.006 0.012)) ylab(-0.006(0.006)0.012) +graph export "$out_files/event_enf_min_dist15.pdf", replace + +qui coefplot, baselevels omitted vert yline(0, lc(black)) xline(20.5, lc(cranberry) lp(dash)) keep(*.time#3.min_d_d4) drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) xtitle(Year) le(95) mc(black) ciopts(recast(rcap) lwidth(0.3) lpattern(dash)) gen(enf_20_) replace + +twoway (rarea enf_20_ul1 enf_20_ll1 enf_20_at, color(gs6%20)) (scatter enf_20_b enf_20_at, msymbol(p) mc(black%60)) (line enf_20_b enf_20_at, lp(dash) lc(blackblack%60)), yline(0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.006 0.012)) ylab(-0.006(0.006)0.012) +graph export "$out_files/event_enf_min_dist20.pdf", replace + +**============================================================================== +* Figure 3 +use "$data_files/Raw/city_info.dta", clear +merge 1:1 city_id using "$data_files/share.dta" +drop if _merge == 2 +drop _merge + +binsreg share_rev_10 number, ci(95) graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(% of High-Pollution Activity <10km from Monitor) xtitle(# Monitors) +graph export "$out_files/revenue_share.pdf", replace + +**============================================================================== +* Figure 4 +use "$data_files/city_pm.dta", clear + +reghdfe pm25, a(city_id) residuals(pm_res) + +collapse (mean) pm_res, by(number_iv year) +twoway (line pm_res year if number==1, lc(navy)) (line pm_res year if number==2, lc(maroon)) /* +*/ (line pm_res year if number==4, lc(forest_green)) (line pm_res year if number==6, lc(dkorange)), /* +*/ yline(0, lc(black)) xline(2014.5, lp(dash) lc(cranberry)) graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle("Aerosol Optical Depth") xtitle("Year") xtick(2010(1)2017) xsc(r(2010 2017)) xlabel(2010 "2010" 2011 "2011" 2012 "2012" 2013 "2013" 2014 "2014" 2015 "2015" 2016 "2016" 2017 "2017") legend(off) text(-0.008 2016.8 "One" -0.05 2016.8 "Two" -0.085 2016.8 "Four" -0.097 2016.8 "Six") +graph export "$out_files/number_iv_trend.pdf", replace + +use "$data_files/city_pm.dta", clear + +fvset base 2014 year +fvset base 20 time +drop if year==2014 & month==12 + +reghdfe pm25 i.time##c.number i.post1##i.quarter##c.area i.post1##i.quarter##c.pop i.quarter##c.area i.quarter##c.pop, a(city_id year#month pred tem_meand age_year year#incentive2) cluster(city_id) +coefplot, baselevels omitted vert yline(-0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) keep(*me#c.number) coeflabels() drop() ysc(r(-0.08 0.04)) ylabel(-0.08(0.04)0.04) graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) le(95) mc(black) gen(pm1_) replace + +regress number number_iv area pop +predict num_hat + +reghdfe pm25 i.time##c.num_hat i.post1##i.quarter##c.area i.post1##i.quarter##c.pop i.quarter##c.area i.quarter##c.pop, a(city_id year#month pred tem_meand age_year year#incentive2) cluster(city_id) +coefplot, baselevels omitted vert yline(-0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) keep(*me#c.num_hat) coeflabels() drop() graphregion(color(white) fcolor(white)) plotregion(color(white)) ytitle(Parameter Estimate) le(95) mc(black) gen(pm2_) replace + +twoway (rarea pm1_ul1 pm1_ll1 pm1_at, color(gs6%20)) (line pm1_b pm1_at, lwidth(medthick) lp(dash) lc(blackblack%70)) (rarea pm2_ul1 pm2_ll1 pm2_at, color(gs6%20)) (line pm2_b pm2_at, lp(dash) lc(blackblack%40)), yline(-0, lc(black)) xline(20.5, lp(dash) lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) ytitle("") xtitle("") xtick(1(1)32) xlabel(1 "2010" 5 "2011" 9 "2012" 13 "2013" 17 "2014" 21 "2015" 25 "2016" 29 "2017") ysc(r(-0.08 0.08)) ylabel(-0.08(0.04)0.08) legend(order(2 "DiD" 4 "DiD+IV") pos(2) ring(0) rows(2)) +graph export "$out_files/eventpm.pdf", replace + +**============================================================================== +* Figure 5 +use "$data_files/city_pm_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = pm25 if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +regress number cutoff if abs(dist1)<22 +predict res_num, residuals +rdplot res_num dist1 if abs(dist1)<15 & year==2015 & month==1, p(2) h(15) nbins(5) kernel(uni) ci(95) vce(cluster city_id) graph_options(ytitle(Residualized # of Monitors) xtitle(Distance to the Closest Geographical Size Cutoff) legend(off) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(nobox region(fcolor(white) margin(zero) lcolor(white))) ysc(r(-1.5 1.5)) ylabel(-1.5(0.5)1.5) xsc(r(-15 15)) xlabel(-15(5)15)) +graph export "$out_files/fs_ci.pdf", replace + +regress incentive2 cutoff if abs(dist1)<22 +predict res_inc2, residuals +rdplot res_inc2 dist1 if abs(dist1)<15 & year==2015 & month==1, p(2) h(15) nbins(5) kernel(uni) ci(95) vce(cluster city_id) graph_options(ytitle(Residualized Reduction Goal(%)) xtitle(Distance to the Closest Geographical Size Cutoff) legend(off) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(nobox region(fcolor(white) margin(zero) lcolor(white))) ysc(r(-10 10)) ylabel(-10(5)10) xsc(r(-15 15)) xlabel(-15(5)15)) +graph export "$out_files/inc2_ci.pdf", replace + +regress pm25 cutoff mean_bench if abs(dist1)<22 & year>=2015 +predict res_pm, residuals +rdplot res_pm dist1 if abs(dist1)<15 & year>=2015 & year>2011, p(2) h(15) nbins(5) covs() kernel(uni) ci(95) vce(cluster city_id) graph_options(ytitle(Residualized AOD) xtitle(Distance to the Closest Geographical Size Cutoff) legend(off) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(nobox region(fcolor(white) margin(zero) lcolor(white))) ysc(r(-0.06 0.06)) ylabel(-0.06(0.03)0.06) xsc(r(-15 15)) xlabel(-15(5)15)) +graph export "$out_files/rd_ci.pdf", replace + +regress pm25 cutoff mean_bench if abs(dist1)<22 & year<2015 & year>2010 +predict res_pm_pre, residuals +rdplot res_pm_pre dist1 if abs(dist1)<15 & year<2015 & year>2011, p(2) h(15) nbins(5) covs() kernel(uni) ci(95) vce(cluster city_id) graph_options(ytitle(Residualized AOD) xtitle(Distance to the Closest Geographical Size Cutoff) legend(off) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(nobox region(fcolor(white) margin(zero) lcolor(white))) ysc(r(-0.06 0.06)) ylabel(-0.06(0.03)0.06) xsc(r(-15 15)) xlabel(-15(5)15)) +graph export "$out_files/rd_ci_pre.pdf", replace + +**============================================================================== +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = log_any_air if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +regress log_any_air cutoff mean_bench if abs(dist1)<22 & year>=2015 +predict res_air, residuals +rdplot res_air dist1 if abs(dist1)<16 & year>=2015 & year>2011, p(2) h(15) nbins(5) covs() kernel(uni) ci(95) vce(cluster city_id) graph_options(ytitle(Residualized log(# Any Enf)) xtitle(Distance to the Closest Geographical Size Cutoff) legend(off) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(nobox region(fcolor(white) margin(zero) lcolor(white))) ysc(r(-0.5 0.5)) ylabel(-0.5(0.25)0.5) xsc(r(-15 15)) xlabel(-15(5)15)) +graph export "$out_files/rd_ci_enf.pdf", replace + +regress log_any_air cutoff if abs(dist1)<22 & year<2015 & year>2011 +predict res_air_pre, residuals +rdplot res_air_pre dist1 if abs(dist1)<16 & year<2015 & year>2011, p(2) h(15) nbins(5) covs() kernel(uni) ci(95) vce(cluster city_id) graph_options(ytitle(Residualized log(# Any Enf)) xtitle(Distance to the Closest Geographical Size Cutoff) legend(off) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(nobox region(fcolor(white) margin(zero) lcolor(white))) ysc(r(-0.5 0.5)) ylabel(-0.5(0.25)0.5) xsc(r(-15 15)) xlabel(-15(5)15)) +graph export "$out_files/rd_ci_enf_pre.pdf", replace + +**============================================================================== +* Figure 6 +use "$data_files/city_pm.dta", clear + +fvset base 58 age +reghdfe pm25 c.post1#c.number c.post1#c.area c.post1#c.pop i.age#c.post1#c.number if age >= 50 & age <= 60, a(city_id year#month pred tem_meand incentive2#i.year) cluster(city_id) +coefplot, baselevels omitted vert keep(*age#c.post1#c.number) drop(_cons c.post1#c.number c.post1#c.area c.post1#c.pop) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor") coeflabels(50.age#c.post1#c.number = "50" 51.age#c.post1#c.number = "51" 52.age#c.post1#c.number = "52" 53.age#c.post1#c.number = "53" 54.age#c.post1#c.number = "54" 55.age#c.post1#c.number = "55" 56.age#c.post1#c.number = "56" 57.age#c.post1#c.number = "57" 58.age#c.post1#c.number = "58" 59.age#c.post1#c.number = "59" 60.age#c.post1#c.number = "60") le(95) ysc(r(-0.04 0.02)) ylab(-0.04(0.02)0.02) +graph export "$out_files/promotion_age.pdf", replace + +use "$data_files/city_enf.dta", clear + +fvset base 58 age +reghdfe log_any_air c.post1#c.number i.age#c.post1#c.number c.post1#c.area c.post1#c.pop if age >= 50 & age <= 60, a(city_id year#quarter incentive2#i.year) cluster(city_id) +coefplot, baselevels omitted vert keep(*age#c.post1#c.number) drop(_cons c.post1#c.number c.post1#c.area c.post1#c.pop) yline(0, lc(cranberry)) graphregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white) ilpattern(blank)) plotregion(fcolor(white) lcolor(none) ifcolor(white) ilcolor(white)) legend(off) ytitle("") xtitle("Age of Mayor") coeflabels(50.age#c.post1#c.number = "50" 51.age#c.post1#c.number = "51" 52.age#c.post1#c.number = "52" 53.age#c.post1#c.number = "53" 54.age#c.post1#c.number = "54" 55.age#c.post1#c.number = "55" 56.age#c.post1#c.number = "56" 57.age#c.post1#c.number = "57" 58.age#c.post1#c.number = "58" 59.age#c.post1#c.number = "59" 60.age#c.post1#c.number = "60") le(95) ysc(r(-0.12 0.24)) ylab(-0.12(0.12)0.24) +graph export "$out_files/promotion_age_enf.pdf", replace + + + + diff --git a/110/replication_package/replication/Do-file/Install.do b/110/replication_package/replication/Do-file/Install.do new file mode 100644 index 0000000000000000000000000000000000000000..75cb9fb79a72694b6b5adc6ec4718f73e2e99fc5 --- /dev/null +++ b/110/replication_package/replication/Do-file/Install.do @@ -0,0 +1,39 @@ +*** Install all the programs in the ado/plus-folder from internet + +ssc install geodist, replace +ssc install binscatter, replace +ssc install ivreg2, replace +ssc install ranktest, replace +ssc install estout, replace +ssc install erepost, replace +ssc install coefplot, replace +ssc install tmpdir, replace +ssc install reg2hdfe, replace + +cap ado uninstall rdrobust +net install rdrobust, from("https://raw.githubusercontent.com/rdpackages/rdrobust/master/stata") replace + +cap ado uninstall binsreg +net install binsreg, from("https://raw.githubusercontent.com/nppackages/binsreg/master/stata") replace + +cap ado uninstall ftools +net install ftools, from("https://raw.githubusercontent.com/sergiocorreia/ftools/master/src/") replace + +cap ado uninstall reghdfe +net install reghdfe, from("https://raw.githubusercontent.com/sergiocorreia/reghdfe/master/src/") replace + +cap ado uninstall ivreghdfe +net install ivreghdfe, from("https://raw.githubusercontent.com/sergiocorreia/ivreghdfe/master/src/") replace + +cap ado uninstall lpdensity +net install lpdensity, from("https://raw.githubusercontent.com/nppackages/lpdensity/master/stata") replace + +cap ado uninstall rddensity +net install rddensity, from("https://raw.githubusercontent.com/rdpackages/rddensity/master/stata") replace */ + + +***The End + + + + diff --git a/110/replication_package/replication/Do-file/MakeData.do b/110/replication_package/replication/Do-file/MakeData.do new file mode 100644 index 0000000000000000000000000000000000000000..81f4fcd2c59f22f0dcbb20230d64837145e8f107 --- /dev/null +++ b/110/replication_package/replication/Do-file/MakeData.do @@ -0,0 +1,351 @@ +* Set Directory +clear +set more off +set scheme s1mono + +cd "$path" +global data_files "$path/Data" +global out_files "$path/output" + +**============================================================================== +* Weather data +use "$data_files/Raw/weather_daily.dta", clear + +collapse (sum) pre (mean) tem_mean, by(city_id year month) +save "$data_files/weather_monthly.dta", replace + +use "$data_files/Raw/weather_daily.dta", clear +gen quarter = int((month-1)/3)+1 + +collapse (sum) pre (mean) tem_mean, by(city_id year quarter) +save "$data_files/weather_quarterly.dta", replace + +use "$data_files/Raw/weather_daily.dta", clear +gen quarter = int((month-1)/3)+1 + +bys city_id year quarter: egen wd = mode(wdmax), max +keep city_id year quarter wd +duplicates drop +save "$data_files/wind_quarterly.dta", replace + +**============================================================================== +* Firm-level: enforcement data +use "$data_files/Raw/firm_info.dta", clear + +merge 1:m id using "$data_files/Raw/enf_info.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id year quarter using "$data_files/weather_quarterly.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id year quarter using "$data_files/wind_quarterly.dta" +keep if _merge == 3 +drop _merge + +capture drop ibear +program ibear + + args lat1 lon1 lat2 lon2 newvar + + tempname d2r r2d + scalar `d2r' = _pi / 180 + scalar `r2d' = 180 / _pi + + gen `newvar' = atan2(sin((`lon2'-`lon1') * `d2r') * cos(`lat2' * `d2r') , /// + cos(`lat1' * `d2r') * sin(`lat2' * `d2r') - /// + sin(`lat1' * `d2r') * cos(`lat2' * `d2r') * /// + cos((`lon2'-`lon1') * `d2r')) + + // normalize atan2 results (-pi to pi) to range from 0 to 360 degrees + replace `newvar' = mod((`newvar' * `r2d') + 360,3 60) + +end + +ibear monitor_lat monitor_lon lat lon angle +replace wd = 22.5*(wd-1) + +gen upwd = 1 if angle - wd < 45 & angle - wd > -45 +replace upwd = 1 if (angle - wd < 45 | angle - 360 - wd > -45) & wd <= 45 +replace upwd = 1 if (angle + 360 - wd < 45 | angle - wd > -45) & wd >= 315 +replace upwd = 0 if upwd == . + +egen time = group(year quarter) +gen min_dist_10 = min_dist < 10 +gen post1 = year >= 2015 + +gen air_1 = air==1 +gen air_2 = air>=2 + +gen leni = (any_air_shutdown + any_air_fine + any_air_renovate == 1) +gen stri = (any_air_shutdown + any_air_fine + any_air_renovate == 3) + +gen min_d_d4 = int(min_dist/5) +replace min_d_d4 = 4 if min_d_d4 > 4 + +bysort city_id: egen med_pre=median(pre) +gen high_pre=0 if pre!=. +replace high_pre=1 if pre!=. & pre>med_pre + +save "$data_files/firm_enf.dta", replace + +**============================================================================== +use "$data_files/firm_enf.dta", clear + +gen any_air_10 = any_air & min_dist < 10 +gen any_air_20 = any_air & min_dist < 50 & min_dist > 10 +gen any_air_50 = any_air & min_dist > 50 + +collapse (sum) any_air any_air_*, by(city_id year quarter) +save "$data_files/enf.dta", replace + +**============================================================================== +* Mayor data +use "$data_files/Raw/mayor.dta", clear +format %td start_date +format %td end_date +format %td birthdate + +bysort city_cn city_id (start_date end_date): gen next_start=start_date[_n+1] +replace next_start=end_date if next_start==. +format %td next_start + +foreach v of varlist start_date next_start end_date { + gen `v'_m = mofd(`v') + format %tm `v'_m +} + +bysort city_cn city_id (start_date next_start_m): replace next_start_m=next_start_m-1 if _n!=_N +expand next_start_m-start_date_m + 1 + +by city_cn city_id name start_date (next_start_m), sort: gen month_date = start_date_m + _n - 1 +format month_date %tm +gen month=month(dofm(month_date)) +gen year=year(dofm(month_date)) + +sort city_cn city_id month_date name + +destring city_id, replace +drop if city_id==. +save "$data_files/mayor_panel.dta", replace + +keep if year==2015 +format %td birthdate +gen birth_year=year(birthdate) +gen birth_month=month(birthdate) +gen age_2017=2018-birth_year +bysort city_id: egen age = mode(age_2017), maxmode +bysort city_id: egen age_month = mode(birth_month), maxmode + +keep city_id city_cn age age_month +duplicates drop + +replace age = age+1 if age_month==1 & age==57 +drop age_month + +save "$data_files/age_2017.dta", replace + +use "$data_files/mayor_panel.dta", clear +format %td birthdate +gen birth_year=year(birthdate) +gen birth_month=month(birthdate) + +gen age = year-birth_year +bys city_id year: egen age_year = mode(age) + +keep if year >= 2010 & year <= 2017 +keep city_id year age_year +duplicates drop + +bys city_id (year): replace age_year = age_year[_n-1]+1 if age_year == . +bys city_id (year): replace age_year = age_year[_n+1]-1 if age_year == . +bys city_id (year): replace age_year = age_year[_n-1]+1 if age_year == . +bys city_id (year): replace age_year = age_year[_n+1]-1 if age_year == . + +save "$data_files/age_year.dta", replace + +**============================================================================== +* Monitor data +use "$data_files/raw/pm_pix.dta", clear + +keep city_id p_id p_lon p_lat +duplicates drop + +rename city_id city_id2 +joinby city_id2 using "$data_files/Raw/monitor_city_long.dta" + +geodist p_lat p_lon monitor_lat monitor_lon, gen(dist) +keep if dist < 20 + +expand 8 +bys city_id monitor_id p_id: gen year = 2009 + _n +expand 12 +bys city_id monitor_id p_id year: gen month = _n + +merge m:1 p_id year month using "$data_files/raw/pm_pix.dta" +keep if _merge == 3 +drop _merge + +drop if pm25 == . +bys monitor_id year month (dist): keep if _n == 1 + +keep monitor_id city_id year month pm25 compare +save "$data_files/monitor_pix.dta", replace + +keep if compare == 0 +collapse (mean) pm25, by(city_id year month) +rename pm25 pm + +save "$data_files/pix.dta", replace + +**============================================================================== +* City-level: AOD data +use "$data_files/Raw/city_info.dta", clear + +merge 1:m city_id using "$data_files/raw/pm.dta" +keep if _merge == 3 +drop _merge + +merge 1:1 city_id year month using "$data_files/weather_monthly.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id using "$data_files/age_2017.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id year using "$data_files/age_year.dta" +keep if _merge == 3 +drop _merge + +merge 1:1 city_id year month using "$data_files/Raw/lights.dta" +keep if _merge == 3 +drop _merge + +gen pred = int(pre/20) +gen tem_meand = int(tem_mean) + +sort city_id year month +gen quarter = int((month-1)/3)+1 +egen time = group(year quarter) + +merge m:1 city_id year quarter using "$data_files/enf.dta", keepusing(any_air) +replace any_air = 0 if _merge == 1 +drop _merge + +gen post1 = year >= 2015 + +merge 1:1 city_id year month using "$data_files/pix.dta" +drop _merge + +save "$data_files/city_pm.dta", replace + +expand 2, gen(cutoff) +replace cutoff = cutoff + 1 +save "$data_files/city_pm_rd.dta", replace + +**============================================================================== +* City-level: Enforcement data +use "$data_files/Raw/city_info.dta", clear +expand 8 +bys city_id: gen year = _n + 2009 +expand 4 +bys city_id year: gen quarter = _n + +merge 1:1 city_id year quarter using "$data_files/weather_quarterly.dta" +drop _merge + +merge 1:1 city_id year quarter using "$data_files/enf.dta" +drop _merge + +merge m:1 city_id using "$data_files/age_2017.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id year using "$data_files/age_year.dta" +keep if _merge == 3 +drop _merge + +gen pred = int(pre/20) +gen tem_meand = int(tem_mean) + +sort city_id year quarter +egen time = group(year quarter) + +replace any_air = 0 if any_air == . +gen log_any_air = log(any_air+1) + +replace any_air_10 = 0 if any_air_10 == . +gen log_any_air_10 = log(any_air_10+1) +replace any_air_20 = 0 if any_air_20 == . +gen log_any_air_20 = log(any_air_20+1) + +replace any_air_50 = 0 if any_air_50 == . +gen log_any_air_50 = log(any_air_50+1) + +gen post1 = year >= 2015 + +merge 1:1 city_id year quarter using "$data_files/Raw/non-asif.dta" +drop _merge + +save "$data_files/city_enf.dta", replace + +expand 2, gen(cutoff) +replace cutoff = cutoff + 1 +save "$data_files/city_enf_rd.dta", replace + +**============================================================================== +use "$data_files/Raw/daily_monitor_api.dta", clear + +collapse (mean) pm25api pm10api AQI, by(city_id monitor_id year month) +save "$data_files/monthly_api.dta", replace + +**============================================================================== +use "$data_files/Raw/monitor_info.dta", clear + +merge 1:m monitor_id using "$data_files/monitor_pix" +keep if _merge == 3 +drop _merge + +keep if year>=2015 & year<=2017 + +merge 1:1 monitor_id year month using "$data_files/monthly_api.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id year month using "$data_files/weather_monthly.dta" +keep if _merge == 3 +drop _merge + +merge m:1 city_id year using "$data_files/age_year.dta" +keep if _merge == 3 +drop _merge + +save "$data_files/monitor_api.dta", replace + +**============================================================================== +use "$data_files/Raw/firm_info.dta", clear + +keep if key == 1 +gen revenue_5 = revenue if min_dist < 5 +gen revenue_10 = revenue if min_dist < 10 + +gen employment_5 = employment if min_dist < 5 +gen employment_10 = employment if min_dist < 10 + +collapse (sum) revenue* employment*, by(city_id) + +gen share_rev_10 = revenue_10/revenue +gen share_rev_5 = revenue_5/revenue + +gen share_emp_10 = employment_10/employment +gen share_emp_5 = employment_5/employment + +keep share* city_id +save "$data_files/share.dta", replace + + + + + diff --git a/110/replication_package/replication/Do-file/Master.do b/110/replication_package/replication/Do-file/Master.do new file mode 100644 index 0000000000000000000000000000000000000000..163be2a0ac011b31b6f23cb970bf7db8805219f7 --- /dev/null +++ b/110/replication_package/replication/Do-file/Master.do @@ -0,0 +1,19 @@ +*** Master file for set path. Then run master file to produce all output. + +global path "C:/Users/s14756/Desktop/replication" +cd "$path/Do-file" + +sysdir set PLUS "$path/ado/plus" +sysdir set PERSONAL "$path/ado/personal" + +do "$path/Do-file/MakeData.do" +do "$path/Do-file/Figure.do" +do "$path/Do-file/Table.do" +do "$path/Do-file/Appendix.do" + + +***The End + + + + diff --git a/110/replication_package/replication/Do-file/Table.do b/110/replication_package/replication/Do-file/Table.do new file mode 100644 index 0000000000000000000000000000000000000000..333eb168e40073a1571ad11f2e792ddc1cf4cd4c --- /dev/null +++ b/110/replication_package/replication/Do-file/Table.do @@ -0,0 +1,284 @@ +* Set Directory +clear +set more off +set scheme s1mono + +cd "$path" +global data_files "$path/Data" +global out_files "$path/output" + +**============================================================================== +* Table 1-2 +// Conley +use "$data_files/firm_enf.dta", clear +keep if min_dist<50 & starty<=2010 +drop if revenue == . +drop if key == . + +label var min_dist_10 "Mon\$\_{<10km}\$" +label var any_air "Any Enforcement (0/1)" +label var post "Post" +label var key "High Pollution" + +gen min_dist_10_post1 = c.min_dist_10#c.post1 +egen ind_time = group(industry prov_id time) + +reg2hdfespatial any_air min_dist_10_post1, lat(lat) lon(lon) timevar(time) panelvar(id) altfetime(ind_time) distcutoff(100) lagcutoff(20) +scalar Conley1 = _se[min_dist_10_post1] + +reg2hdfespatial any_air_shutdown min_dist_10_post1, lat(lat) lon(lon) timevar(time) panelvar(id) altfetime(ind_time) distcutoff(100) lagcutoff(20) +scalar Conley2 = _se[min_dist_10_post1] + +reg2hdfespatial any_air_renovate min_dist_10_post1, lat(lat) lon(lon) timevar(time) panelvar(id) altfetime(ind_time) distcutoff(100) lagcutoff(20) +scalar Conley3 = _se[min_dist_10_post1] + +reg2hdfespatial any_air_fine min_dist_10_post1, lat(lat) lon(lon) timevar(time) panelvar(id) altfetime(ind_time) distcutoff(100) lagcutoff(20) +scalar Conley4 = _se[min_dist_10_post1] + +reg2hdfespatial any_air_warning min_dist_10_post1, lat(lat) lon(lon) timevar(time) panelvar(id) altfetime(ind_time) distcutoff(100) lagcutoff(20) +scalar Conley5 = _se[min_dist_10_post1] + +use "$data_files/firm_enf.dta", clear +drop if revenue == . +drop if key == . + +label var min_dist_10 "Mon\$\_{<10km}\$" +label var any_air "Any Enforcement (0/1)" +label var post "Post" +label var key "High Pollution" + +eststo clear +reghdfe any_air c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local Conley = "[`: di %9.5f Conley1']" +reghdfe any_air_shutdown c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local Conley = "[`: di %9.5f Conley2']" +reghdfe any_air_renovate c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local Conley = "[`: di %9.5f Conley3']" +reghdfe any_air_fine c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local Conley = "[`:di %9.5f Conley4']" +reghdfe any_air_warning c.min_dist_10#c.post1 if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo E +estadd ysumm, mean +estadd scalar EN = e(N_full) +estadd local Conley = "[`:di %9.5f Conley5']" +esttab A B C D E using "$out_files/Table1a.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(c.min_dist_10*) stats(ymean EN Conley, labels("Mean Outcome" "Observations" "Conley SE")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +* Table 1b +use "$data_files/firm_enf.dta", clear +drop if revenue == . +drop if key == . + +label var min_dist_10 "Mon\$\_{<10km}\$" +label var any_air "Any Enforcement (0/1)" +label var post "Post" +label var key "High Pollution" + +eststo clear +reghdfe air c.min_dist_10#c.post1##c.key if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe air_1 c.min_dist_10#c.post1##c.key if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe air_2 c.min_dist_10#c.post1##c.key if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe leni c.min_dist_10#c.post1##c.key if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe stri c.min_dist_10#c.post1##c.key if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo E +estadd ysumm, mean +estadd scalar EN = e(N_full) +esttab A B C D E using "$out_files/Table1b.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(c.min_dist_10*) stats(ymean EN, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +* Table 2 +gen Shock = high_pre +eststo clear +reghdfe any_air c.min_dist_10#c.post1##c.Shock tem_mean if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo A +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe any_air c.min_dist_10##c.post1##c.Shock tem_mean if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo B +estadd ysumm, mean +estadd scalar EN = e(N_full) +replace Shock = upwd +reghdfe any_air c.min_dist_10#c.post1##c.Shock tem_mean if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo C +estadd ysumm, mean +estadd scalar EN = e(N_full) +reghdfe any_air c.min_dist_10##c.post1##c.Shock tem_mean if min_dist<50 & starty<=2010, absorb(time id industry#time prov_id#time) cluster(city_id) +eststo D +estadd ysumm, mean +estadd scalar EN = e(N_full) +esttab A B C D using "$out_files/Table2.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep() drop(_cons tem_mean post1 min_dist_10) order(Shock c.min_dist_10#c.post1 c.min_dist_10#c.Shock c.min_dist_10#c.post1#c.Shock) stats(ymean EN, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + +**============================================================================== +* Table 3 +use "$data_files/city_pm.dta", clear +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe pm25 RD_Estimate c.post#c.area c.post#c.pop, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe pm25 c.post#c.area c.post#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_pm_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = pm25 if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust pm25 dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year month) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe pm25 RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/Table3a.tex", keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN, labels( "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) tex + +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" +label variable number_iv "Min \# Mon" + +gen RD_Estimate = c.post1#c.number + +eststo clear +reghdfe log_any_air RD_Estimate c.post#c.area c.post#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo A +ivreghdfe log_any_air c.post#c.area c.post#c.pop (RD_Estimate=c.post1#c.number_iv), a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +estadd scalar EN = e(N_full) +eststo B + +use "$data_files/city_enf_rd.dta", clear + +gen dist1 = area - 20 if cutoff == 1 +replace dist1 = area - 50 if cutoff == 2 + +gen bench = log_any_air if year < 2012 +bys city_id cutoff: egen mean_bench = mean(bench) + +gen above = dist1 > 0 +gen RD_Estimate = c.post1#c.above + +rdrobust log_any_air dist1 if year>=2015, fuzzy(number) p(1) h(11.3) covs(cutoff mean_bench year quarter) kernel(uni) vce(cluster city_id) +estadd scalar EN = e(N_h_l) + e(N_h_r) +estadd scalar band = e(h_l) +eststo C +reghdfe log_any_air RD_Estimate post1 above dist1 c.post1#c.dist1 c.above#c.dist1 c.post1#c.above#c.dist1 cutoff if abs(dist1) < 11.3, a(time) cl(city_id) +estadd scalar EN = e(N) +estadd scalar band = 11.3 +eststo D +esttab A B C D using "$out_files/Table3b.tex", tex keep(RD_Estimate) transform(@/1.21 1/1.21, pattern(0 0 0 1)) replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers mlabels(none) coeflabels(RD_Estimate "\# Monitors") stats(EN, labels("Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) + +replace RD_Estimate = number_iv + +eststo clear +regress number number_iv if year>=2015, vce(cluster city_id) +eststo A +regress number RD_Estimate pop area if year>=2015, vce(cluster city_id) +eststo B +rdrobust number dist1 if year>=2015, p(1) h(11.3) covs(cutoff) kernel(uni) vce(cluster city_id) +eststo C +estadd local kern = "Uniform" +estadd scalar band = 11.3 +rdrobust number dist1 if year>=2015, p(1) h(11.3) covs(cutoff) kernel(uni) vce(cluster city_id) +eststo D +estadd local kern = "Uniform" +estadd scalar band = 11.3 +esttab A B C D using "$out_files/Table3c.tex", tex replace b(a2) se(a2) label noconstant nolines nogaps compress fragment nonumbers noobs mlabels(none) keep(RD_Estimate) coeflabels(RD_Estimate "Estimate") stats(kern band, labels("Kernel" "Bandwidth")) starlevels(* 0.10 ** 0.05 *** 0.01) + + +**============================================================================== +* Table 4 +use "$data_files/monitor_api.dta", clear + +gen log_pm25api = log(pm25api) +gen log_pm10api = log(pm10api) + +gen reassign = (year == 2017) +replace reassign = 1 if year == 2016 & month >= 11 + +rename pm25 AOD +label variable AOD "AOD" +label variable reassign "Reassigned" + +eststo clear +reghdfe log_pm25api AOD pre tem_mean, a(monitor_id year#month) cl(city_id) +eststo A +estadd ysumm, mean +reghdfe log_pm25api AOD pre tem_mean if ~compare, a(monitor_id year#month) cl(city_id) +eststo B +estadd ysumm, mean +reghdfe log_pm25api AOD c.AOD#c.reassign pre tem_mean if ~compare, a(monitor_id year#month) cl(city_id) +eststo C +estadd ysumm, mean +reghdfe log_pm25api AOD pre tem_mean if compare, a(monitor_id year#month) cl(city_id) +eststo D +estadd ysumm, mean +reghdfe log_pm25api AOD c.AOD#c.reassign pre tem_mean if compare, a(monitor_id year#month) cl(city_id) +eststo E +estadd ysumm, mean + +use "$data_files/city_pm.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" + +gen reassign = (year == 2017) +replace reassign = 1 if year == 2016 & month >= 10 +label variable reassign "Reassigned" + +reghdfe pm25 c.post1#c.number c.post1#c.number#c.reassign c.post1#c.area c.post1#c.pop, a(city_id year#month pred tem_meand age_year incentive2#time) cluster(city_id) +eststo F +estadd ysumm, mean + +use "$data_files/city_enf.dta", clear + +label variable post1 "Post" +label variable number "\# Mon" + +gen reassign = (year == 2017) +replace reassign = 1 if year == 2016 & quarter == 4 +label variable reassign "Reassigned" + +reghdfe log_any_air c.post1#c.number c.post1#c.number#c.reassign c.post1#c.area c.post1#c.pop, a(city_id year#quarter pred tem_meand age_year incentive2#time) cluster(city_id) +eststo G +estadd ysumm, mean +esttab A B C D E F G using "$out_files/Table4.tex", replace b(a2) noconstant se(a2) nolines nogaps compress fragment nonumbers label mlabels(none) collabels() keep(AOD c.AOD#c.reassign c.post1#c.number c.post1#c.number#c.reassign) drop() coeflabels(c.AOD#c.reassign "AOD $\times$ Reassigned" c.post1#c.number "\# Monitors" c.post1#c.number#c.reassign "\# Monitors $\times$ Reassigned") stats(ymean N, labels("Mean Outcome" "Observations")) starlevels(* 0.10 ** 0.05 *** 0.01) substitute(\_ _) tex + + diff --git a/110/replication_package/replication/Do-file/classify.py b/110/replication_package/replication/Do-file/classify.py new file mode 100644 index 0000000000000000000000000000000000000000..d30fe2a6e3c9eeded147bdfe79b6d2dcaa79067b --- /dev/null +++ b/110/replication_package/replication/Do-file/classify.py @@ -0,0 +1,110 @@ +import time +import csv +import os +import re +from bs4 import BeautifulSoup + +def FindKey(RecordText, KeyWords): + + if any(word in RecordText for word in KeyWords): + output = 1 + else: + output = 0 + + return output + + +KeyWords1 = ['气','烟','尘','二氧化硫','颗粒','脱硝','脱硫','炉','焚烧','NO','PM'] +KeyWords2 = ['污水','水污染','沉淀','沟','渠','COD'] +KeyWords3 = ['固体'] +KeyWords4 = ['未批先建','批建不符','未验先投','清理明细表','开工','环评','手续','三同时','未经验收'] +KeyWords5 = ['罚款','经济处罚','万元'] +KeyWords6 = ['停'] +KeyWords7 = ['改','维修'] + +htfile = "output.txt" +fo = open(htfile, "w", encoding='utf_8_sig') + + +with open('records.csv', 'r') as f: + reader = csv.reader(f) + records_list = list(reader) + + +for record in records_list: + dir_path = '/Volumes/forMac/list/'+str(record[0]) + address = dir_path+'/'+str(record[1]) + print(address) + + fo.write(str(record[1])) + soup = BeautifulSoup(open(address).read(), "html.parser") + + output1=0 + output2=0 + output3=0 + output4=0 + output5=0 + output6=0 + output7=0 + + tables = soup.find_all('table') + cells = soup.find_all(style="color:#eeeeee;background-color:#3399FF") + + if len(cells)==0: + output1=0 + output2=0 + output3=0 + else: + if len(tables)==0: + text = soup.get_text() + else: + try: + block = cells[-1].find_parents('tr')[-1] + row = block.find_all('td') + if len(row)<4: + text = soup.get_text() + else: + text = block.get_text() + except: + text = soup.get_text() + + output1 = FindKey(text, KeyWords1) + output2 = FindKey(text, KeyWords2) + output3 = FindKey(text, KeyWords3) + output4 = FindKey(text, KeyWords4) + output5 = FindKey(text, KeyWords5) + output6 = FindKey(text, KeyWords6) + output7 = FindKey(text, KeyWords7) + + total=output1+output2+output3+output4 + + if total==0: + text = soup.get_text() + output4 = FindKey(text, KeyWords4) + + fo.write(",") + fo.write(str(output1)) + fo.write(",") + fo.write(str(output2)) + fo.write(",") + fo.write(str(output3)) + fo.write(",") + fo.write(str(output4)) + fo.write(",") + fo.write(str(output5)) + fo.write(",") + fo.write(str(output6)) + fo.write(",") + fo.write(str(output7)) + fo.write("\n") + +fo.close() + + + + + + + + + diff --git a/110/replication_package/replication/ado/personal/ols_spatial_HAC.ado b/110/replication_package/replication/ado/personal/ols_spatial_HAC.ado new file mode 100644 index 0000000000000000000000000000000000000000..b4633b13e69a6edde4a4009b072f58d0c283e54e --- /dev/null +++ b/110/replication_package/replication/ado/personal/ols_spatial_HAC.ado @@ -0,0 +1,408 @@ +program ols_spatial_HAC, eclass byable(recall) +version 11 +syntax varlist(ts fv min=2) [if] [in], /// + lat(varname numeric) lon(varname numeric) /// + Timevar(varname numeric) Panelvar(varname numeric) [LAGcutoff(integer 0) DISTcutoff(real 1) /// + DISPlay star bartlett dropvar] + +/*--------PARSING COMMANDS AND SETUP-------*/ + +capture drop touse +marksample touse // indicator for inclusion in the sample +gen touse = `touse' + +//parsing variables +loc Y = word("`varlist'",1) + +loc listing "`varlist'" + +loc X "" +scalar k = 0 + +//make sure that Y is not included in the other_var list +foreach i of loc listing { + if "`i'" ~= "`Y'"{ + loc X "`X' `i'" + scalar k = k + 1 // # indep variables + + } +} + + +//Kyle Meng's code to drop omitted variables that Stata would drop due to collinearity + +if "`dropvar'" == "dropvar"{ + + quietly reg `Y' `X' if `touse', nocons + + mat omittedMat=e(b) + local newVarList="" + local i=1 + scalar k = 0 //replace the old k if this option is selected + + foreach var of varlist `X'{ + if omittedMat[1,`i']!=0{ + loc newVarList "`newVarList' `var'" + scalar k = k + 1 + } + local i=`i'+1 + } + + loc X "`newVarList'" +} + +//generating a function of the included obs +quietly count if `touse' +scalar n = r(N) // # obs +scalar n_obs = r(N) + +/*--------FIRST DO OLS, STORE RESULTS-------*/ + + +quietly: reg `Y' `X' if `touse', nocons +estimates store OLS + +//est tab OLS, stats(N r2) + +/*--------SECOND, IMPORT ALL VALUES INTO MATA-------*/ + +mata{ + +Y_var = st_local("Y") //importing variable assignments to mata +X_var = st_local("X") +lat_var = st_local("lat") +lon_var = st_local("lon") +time_var = st_local("timevar") +panel_var = st_local("panelvar") + +//NOTE: values are all imported as "views" instead of being copied and pasted as Mata data because it is faster, however none of the matrices are changed in any way, so it should not permanently affect the data. + +st_view(Y=.,.,tokens(Y_var),"touse") //importing variables vectors to mata +st_view(X=.,.,tokens(X_var),"touse") +st_view(lat=.,.,tokens(lat_var),"touse") +st_view(lon=.,.,tokens(lon_var),"touse") +st_view(time=.,.,tokens(time_var),"touse") +st_view(panel=.,.,tokens(panel_var),"touse") + +k = st_numscalar("k") //importing other parameters +n = st_numscalar("n") +b = st_matrix("e(b)") // (estimated coefficients, row vector) +lag_var = st_local("lagcutoff") +lag_cutoff = strtoreal(lag_var) +dist_var = st_local("distcutoff") +dist_cutoff = strtoreal(dist_var) + +XeeX = J(k, k, 0) //set variance-covariance matrix equal to zeros + + +/*--------THIRD, CORRECT VCE FOR SPATIAL CORR-------*/ + +timeUnique = uniqrows(time) +Ntime = rows(timeUnique) // # of obs. periods + +for (ti = 1; ti <= Ntime; ti++){ + + + + // 1 if in year ti, 0 otherwise: + + rows_ti = time:==timeUnique[ti,1] + + //get subsets of variables for time ti (without changing original matrix) + + Y1 = select(Y, rows_ti) + X1 = select(X, rows_ti) + lat1 = select(lat, rows_ti) + lon1 = select(lon, rows_ti) + e1 = Y1 - X1*b' + + n1 = length(Y1) // # obs for period ti + + //loop over all observations in period ti + + for (i = 1; i <=n1; i++){ + + + //---------------------------------------------------------------- + // step a: get non-parametric weight + + //This is a Euclidean distance scale IN KILOMETERS specific to i + + lon_scale = cos(lat1[i,1]*pi()/180)*111 + lat_scale = 111 + + + // Distance scales lat and lon degrees differently depending on + // latitude. The distance here assumes a distortion of Euclidean + // space around the location of 'i' that is approximately correct for + // displacements around the location of 'i' + // + // Note: 1 deg lat = 111 km + // 1 deg lon = 111 km * cos(lat) + + distance_i = ((lat_scale*(lat1[i,1]:-lat1)):^2 + /// + (lon_scale*(lon1[i,1]:-lon1)):^2):^0.5 + + + + // this sets all observations beyon dist_cutoff to zero, and weights all nearby observations equally [this kernal is isotropic] + + window_i = distance_i :<= dist_cutoff + + //---------------------------------------------------------------- + // adjustment for the weights if a "bartlett" kernal is selected as an option + + if ("`bartlett'"=="bartlett"){ + + // this weights observations as a linear function of distance + // that is zero at the cutoff distance + + weight_i = 1:- distance_i:/dist_cutoff + + window_i = window_i:*weight_i + } + + + //---------------------------------------------------------------- + // step b: construct X'e'eX for the given observation + + XeeXh = ((X1[i,.]'*J(1,n1,1)*e1[i,1]):*(J(k,1,1)*e1':*window_i'))*X1 + + //add each new k x k matrix onto the existing matrix (will be symmetric) + + XeeX = XeeX + XeeXh + + } //i +} // ti + + + +// ----------------------------------------------------------------- +// generate the VCE for only cross-sectional spatial correlation, +// return it for comparison + +invXX = luinv(X'*X) * n + +XeeX_spatial = XeeX / n + +V = invXX * XeeX_spatial * invXX / n + +// Ensures that the matrix is symmetric +// in theory, it should be already, but it may not be due to rounding errors for large datasets +V = (V+V')/2 + +st_matrix("V_spatial", V) + +} // mata + + +//------------------------------------------------------------------ +// storing old statistics about the estimate so postestimation can be used + +matrix beta = e(b) +scalar r2_old = e(r2) +scalar df_m_old = e(df_m) +scalar df_r_old = e(df_r) +scalar rmse_old = e(rmse) +scalar mss_old = e(mss) +scalar rss_old = e(rss) +scalar r2_a_old = e(r2_a) + +// the row and column names of the new VCE must match the vector b + +matrix colnames V_spatial = `X' +matrix rownames V_spatial = `X' + +// this sets the new estimates as the most recent model + +ereturn post beta V_spatial, esample(`touse') + +// then filling back in all the parameters for postestimation + +ereturn local cmd = "ols_spatial" + +ereturn scalar N = n_obs + +ereturn scalar r2 = r2_old +ereturn scalar df_m = df_m_old +ereturn scalar df_r = df_r_old +ereturn scalar rmse = rmse_old +ereturn scalar mss = mss_old +ereturn scalar rss = rss_old +ereturn scalar r2_a = r2_a_old + +ereturn local title = "Linear regression" +ereturn local depvar = "`Y'" +ereturn local predict = "regres_p" +ereturn local model = "ols" +ereturn local estat_cmd = "regress_estat" + +//storing these estimates for comparison to OLS and the HAC estimates + +estimates store spatial + + + +/*--------FOURTH, CORRECT VCE FOR SERIAL CORR-------*/ + +mata{ + +panelUnique = uniqrows(panel) +Npanel = rows(panelUnique) // # of panels + +for (pi = 1; pi <= Npanel; pi++){ + + // 1 if in panel pi, 0 otherwise: + + rows_pi = panel:==panelUnique[pi,1] + + //get subsets of variables for panel pi (without changing original matrix) + + Y1 = select(Y, rows_pi) + X1 = select(X, rows_pi) + time1 = select(time, rows_pi) + e1 = Y1 - X1*b' + + n1 = length(Y1) // # obs for panel pi + + //loop over all observations in panel pi + + for (t = 1; t <=n1; t++){ + + // ---------------------------------------------------------------- + // step a: get non-parametric weight + + // this is the weight for Newey-West with a Bartlett kernal + + //weight = (1:-abs(time1[t,1] :- time1))/(lag_cutoff+1) // correction: need to removing parentheses to compute inter-temporal (6/10/18) + weight = 1:-abs(time1[t,1] :- time1)/(lag_cutoff+1) + + + // obs var far enough apart in time are prescribed to have no estimated + // correlation (Greene recomments lag_cutoff >= T^0.25 {pg 546}) + + window_t = (abs(time1[t,1]:- time1) :<= lag_cutoff) :* weight + + //this is required so diagonal terms in var-covar matrix are not + //double counted (since they were counted once above for the spatial + //correlation estimates: + + window_t = window_t :* (time1[t,1] :!= time1) + + // ---------------------------------------------------------------- + // step b: construct X'e'eX for given observation + + XeeXh = ((X1[t,.]'*J(1,n1,1)*e1[t,1]):*(J(k,1,1)*e1':*window_t'))*X1 + + //add each new k x k matrix onto the existing matrix (will be symmetric) + + XeeX = XeeX + XeeXh + + } // t +} // pi + + + + +// ----------------------------------------------------------------- +// generate the VCE for x-sectional spatial correlation and serial correlation + +XeeX_spatial_HAC = XeeX / n + +V = invXX * XeeX_spatial_HAC * invXX / n + +// Ensures that the matrix is symmetric +// in theory, it should be already, but it may not be due to rounding errors for large datasets +V = (V+V')/2 + +st_matrix("V_spatial_HAC", V) + +} // mata + +//------------------------------------------------------------------ +//storing results + +matrix beta = e(b) + +// the row and column names of the new VCE must match the vector b + +matrix colnames V_spatial_HAC = `X' +matrix rownames V_spatial_HAC = `X' + +// this sets the new estimates as the most recent model + +marksample touse // indicator for inclusion in the sample + +ereturn post beta V_spatial_HAC, esample(`touse') + +// then filling back in all the parameters for postestimation + +ereturn local cmd = "ols_spatial_HAC" + +ereturn scalar N = n_obs +ereturn scalar r2 = r2_old +ereturn scalar df_m = df_m_old +ereturn scalar df_r = df_r_old +ereturn scalar rmse = rmse_old +ereturn scalar mss = mss_old +ereturn scalar rss = rss_old +ereturn scalar r2_a = r2_a_old + +ereturn local title = "Linear regression" +ereturn local depvar = "`Y'" +ereturn local predict = "regres_p" +ereturn local model = "ols" +ereturn local estat_cmd = "regress_estat" + +//storing these estimates for comparison to OLS and the HAC estimates + +estimates store spatHAC + +//------------------------------------------------------------------ +//displaying results + +disp as txt " " +disp as txt "OLS REGRESSION" +disp as txt " " +disp as txt "SE CORRECTED FOR CROSS-SECTIONAL SPATIAL DEPENDANCE" +disp as txt " AND PANEL-SPECIFIC SERIAL CORRELATION" +disp as txt " " +disp as txt "DEPENDANT VARIABLE: `Y'" +disp as txt "INDEPENDANT VARIABLES: `X'" +disp as txt " " +disp as txt "SPATIAL CORRELATION KERNAL CUTOFF: `distcutoff' KM" + +if "`bartlett'" == "bartlett" { + disp as txt "(NOTE: LINEAR BARTLETT WINDOW USED FOR SPATIAL KERNAL)" +} + +disp as txt "SERIAL CORRELATION KERNAL CUTOFF: `lagcutoff' PERIODS" + +ereturn display // standard Stata regression table format + +// displaying different SE if option selected + +if "`display'" == "display"{ + disp as txt " " + disp as txt "STANDARD ERRORS UNDER OLS, WITH SPATIAL CORRECTION AND WITH SPATIAL AND SERIAL CORRECTION:" + estimates table OLS spatial spatHAC, b(%7.3f) se(%7.3f) t(%7.3f) stats(N r2) +} + +if "`star'" == "star"{ + disp as txt " " + disp as txt "STANDARD ERRORS UNDER OLS, WITH SPATIAL CORRECTION AND WITH SPATIAL AND SERIAL CORRECTION:" + estimates table OLS spatial spatHAC, b(%7.3f) star(0.10 0.05 0.01) +} + +//------------------------------------------------------------------ +// cleaning up Mata environment + +capture mata mata drop V invXX XeeX XeeXh XeeX_spatial_HAC window_t window_i weight t i ti pi X1 Y1 e1 time1 n1 lat lon lat1 lon1 lat_scale lon_scale rows_ti rows_pi timeUnique panelUnique Ntime Npanel X X_var XeeX_spatial Y Y_var b dist_cutoff dist_var distance_i k lag_cutoff lag_var lat_var lon_var n panel panel_var time time_var weight_i + +/* +if "`bartlett'" == "bartlett" { + capture mata mata drop weight_i +} +*/ + +end \ No newline at end of file diff --git a/110/replication_package/replication/ado/personal/reg2hdfespatial.ado b/110/replication_package/replication/ado/personal/reg2hdfespatial.ado new file mode 100644 index 0000000000000000000000000000000000000000..67fb3daf1dacd29d14cd5c08d73fcba10cbfd2e7 --- /dev/null +++ b/110/replication_package/replication/ado/personal/reg2hdfespatial.ado @@ -0,0 +1,202 @@ +capture program drop reg2hdfespatial +*! Thiemo Fetzer 4/2015: WRAPPER PROGRAM TO ESTIMATE SPATIAL HAC FOR OLS REGRESSION MODELS WITH HIGH DIMENSIONAL FIXED EFFECTS +*! The function uses the reg2hdfe procedure to demean the data by the time- and panel-variable you specify +*! This ensures that you do not compute large variance covariance matrices to compute +*! Spatial HAC errors for coefficients you do not actually care about. +*! Updates available on http://www.trfetzer.com +*! Please email me in case you find any bugs or have suggestions for improvement. +*! Please cite: Fetzer, T. (2014) "Can Workfare Programs Moderate Violence? Evidence from India", STICERD Working Paper. +*! Also credit Sol Hsiang. +*! Hsiang, S. M. (2010). Temperatures and cyclones strongly associated with economic production in the Caribbean and Central America. PNAS, 107(35), 15367–72. +*! The Use of the function is simple +*! reg2hdfespatial Yvar Xvarlist, lat(latvar) lon(lonvar) Timevar(tvar) Panelvar(pvar) [DISTcutoff(#) LAGcutoff(#) bartlett DISPlay star dropvar demean altfetime(varname) altfepanel(varname)] +*! +*! +*! You can also specify other fixed effects: +*! reg2hdfespatial Yvar Xvarlist ,timevar(year) panelvar(district) altfetime(regionyear) lat(y) lon(x) distcutoff(500) lagcutoff(20) +*! +*! here I specify the time variable as the year, but I demean the data first +*! by region x year fixed effects. +*! This turns out to matter as the OLS_Spatial_HAC for the autocorrelation correction which you may want +*! to be done at a level different from the level at which you have the time fixed effects specified. +/*----------------------------------------------------------------------------- + + Syntax: + + reg2hdfespatial Yvar Xvarlist, lat(latvar) lon(lonvar) Timevar(tvar) Panelvar(pvar) [DISTcutoff(#) LAGcutoff(#) bartlett DISPlay star dropvar demean altfetime(varname) altfepanel(varname)] + + -----------------------------------------------------------------------------*/ + +program reg2hdfespatial, eclass byable(recall) +//version 9.2 +version 11 +syntax varlist(ts fv min=2) [if] [in], /// + lat(varname numeric) lon(varname numeric) /// + Timevar(varname numeric) Panelvar(varname numeric) [LAGcutoff(integer 0) DISTcutoff(real 1) /// + DISPlay star bartlett dropvar altfetime(varname) altfepanel(varname) ] + +/*--------PARSING COMMANDS AND SETUP-------*/ + +preserve +if "`if'"~="" { + qui keep `if' +} + + +capture drop touse +marksample touse // indicator for inclusion in the sample +gen touse = `touse' + +*keep if touse +//parsing variables +loc Y = word("`varlist'",1) + +loc listing "`varlist'" + + +loc X "" +scalar k_variables = 0 + +//make sure that Y is not included in the other_var list +foreach i of loc listing { + if "`i'" ~= "`Y'"{ + loc X "`X' `i'" + scalar k_variables = k_variables + 1 // # indep variables + + } +} +local wdir `c(pwd)' + +tmpdir returns r(tmpdir): +local tdir `r(tmpdir)' + + +**clear temp folder of existing files +qui cd "`tdir'" +local tempfiles : dir . files "*.dta" +foreach f in `tempfiles' { + erase `f' +} + +quietly { +if("`altfepanel'" !="" & "`altfetime'" !="") { +di "CASE 1" +reg2hdfe `Y' `X' `lat' `lon' `timevar' `panelvar' , id1(`altfepanel') id2(`altfetime') out("`tdir'") noregress +loc iteratevarlist "`Y' `X' `lat' `lon' `timevar' `panelvar'" +reg2hdfe `Y' `X' , id1(`altfepanel') id2(`altfetime') +} +if("`altfepanel'" =="" & "`altfetime'" !="") { +di "CASE 2" + +reg2hdfe `Y' `X' `lat' `lon' `timevar' , id1(`panelvar') id2(`altfetime') out("`tdir'") noregress +loc iteratevarlist "`Y' `X' `lat' `lon' `timevar' " + +reg2hdfe `Y' `X' , id1(`panelvar') id2(`altfetime') +} +if("`altfepanel'" !="" & "`altfetime'" =="") { +di "CASE 3" + +reg2hdfe `Y' `X' `lat' `lon' `panelvar' , id1(`altfepanel') id2(`timevar') out("`tdir'") noregress +reg2hdfe `Y' `X' , id1(`altfepanel') id2(`timevar') +loc iteratevarlist "`Y' `X' `lat' `lon' `panelvar'" +} +if("`altfepanel'" =="" & "`altfetime'" =="") { +di "CASE 4" +reg2hdfe `Y' `X' `lat' `lon' , id1(`panelvar') id2(`timevar') out("`tdir'") noregress +loc iteratevarlist "`Y' `X' `lat' `lon'" +reg2hdfe `Y' `X' , id1(`panelvar') id2(`timevar') +} + + foreach var of varlist `X' { + lincom `var' + if `r(se)' != 0 { + loc newVarList "`newVarList' `var'" + scalar k_variables = k_variables + 1 + } + } + + loc XX "`newVarList'" + + +/* From reg2hdfe.ado */ +tempfile tmp1 tmp2 tmp3 readdata + + use _ids, clear + sort __uid + qui save "`tmp1'", replace + if "`cluster'"!="" { + merge __uid using _clustervar + if r(min)0 + if _rc!=0 { + di as error "xq() must contain only positive integers." + exit + } + + if ("`controls'`absorb'"!="") di as text "warning: xq() is specified in combination with controls() or absorb(). note that binning takes places after residualization, so the xq variable should contain bins of the residuals." + } + if `nquantiles'!=20 & ("`xq'"!="" | "`discrete'"!="") { + di as error "Cannot specify nquantiles in combination with discrete or an xq variable." + exit + } + if "`reportreg'"!="" & !inlist("`linetype'","lfit","qfit") { + di as error "Cannot specify 'reportreg' when no fit line is being created." + exit + } + if "`replace'"=="" { + if `"`savegraph'"'!="" { + if regexm(`"`savegraph'"',"\.[a-zA-Z0-9]+$") confirm new file `"`savegraph'"' + else confirm new file `"`savegraph'.gph"' + } + if `"`savedata'"'!="" { + confirm new file `"`savedata'.csv"' + confirm new file `"`savedata'.do"' + } + } + + * Mark sample (reflects the if/in conditions, and includes only nonmissing observations) + marksample touse + markout `touse' `by' `xq' `controls' `absorb', strok + qui count if `touse' + local samplesize=r(N) + local touse_first=_N-`samplesize'+1 + local touse_last=_N + + * Parse varlist into y-vars and x-var + local x_var=word("`varlist'",-1) + local y_vars=regexr("`varlist'"," `x_var'$","") + local ynum=wordcount("`y_vars'") + + * Check number of unique byvals & create local storing byvals + if "`by'"!="" { + local byvarname `by' + + capture confirm numeric variable `by' + if _rc { + * by-variable is string => generate a numeric version + tempvar by + tempname bylabel + egen `by'=group(`byvarname'), lname(`bylabel') + } + + local bylabel `:value label `by'' /*catch value labels for numeric by-vars too*/ + + tempname byvalmatrix + qui tab `by' if `touse', nofreq matrow(`byvalmatrix') + + local bynum=r(r) + forvalues i=1/`bynum' { + local byvals `byvals' `=`byvalmatrix'[`i',1]' + } + } + else local bynum=1 + + + ****** Create residuals ****** + + if (`"`controls'`absorb'"'!="") quietly { + + * Parse absorb to define the type of regression to be used + if `"`absorb'"'!="" { + local regtype "areg" + local absorb "absorb(`absorb')" + } + else { + local regtype "reg" + } + + * Generate residuals + + local firstloop=1 + foreach var of varlist `x_var' `y_vars' { + tempvar residvar + `regtype' `var' `controls' `wt' if `touse', `absorb' + predict `residvar' if e(sample), residuals + if ("`addmean'"!="noaddmean") { + summarize `var' `wt' if `touse', meanonly + replace `residvar'=`residvar'+r(mean) + } + + label variable `residvar' "`var'" + if `firstloop'==1 { + local x_r `residvar' + local firstloop=0 + } + else local y_vars_r `y_vars_r' `residvar' + } + + } + else { /*absorb and controls both empty, no need for regression*/ + local x_r `x_var' + local y_vars_r `y_vars' + } + + + ****** Regressions for fit lines ****** + + if ("`reportreg'"=="") local reg_verbosity "quietly" + + if inlist("`linetype'","lfit","qfit") `reg_verbosity' { + + * If doing a quadratic fit, generate a quadratic term in x + if "`linetype'"=="qfit" { + tempvar x_r2 + gen `x_r2'=`x_r'^2 + } + + * Create matrices to hold regression results + tempname e_b_temp + forvalues i=1/`ynum' { + tempname y`i'_coefs + } + + * LOOP over by-vars + local counter_by=1 + if ("`by'"=="") local noby="noby" + foreach byval in `byvals' `noby' { + + * LOOP over rd intervals + tokenize "`rd'" + local counter_rd=1 + + while ("`1'"!="" | `counter_rd'==1) { + + * display text headers + if "`reportreg'"!="" { + di "{txt}{hline}" + if ("`by'"!="") { + if ("`bylabel'"=="") di "-> `byvarname' = `byval'" + else { + di "-> `byvarname' = `: label `bylabel' `byval''" + } + } + if ("`rd'"!="") { + if (`counter_rd'==1) di "RD: `x_var'<=`1'" + else if ("`2'"!="") di "RD: `x_var'>`1' & `x_var'<=`2'" + else di "RD: `x_var'>`1'" + } + } + + * set conditions on reg + local conds `touse' + + if ("`by'"!="" ) local conds `conds' & `by'==`byval' + + if ("`rd'"!="") { + if (`counter_rd'==1) local conds `conds' & `x_r'<=`1' + else if ("`2'"!="") local conds `conds' & `x_r'>`1' & `x_r'<=`2' + else local conds `conds' & `x_r'>`1' + } + + * LOOP over y-vars + local counter_depvar=1 + foreach depvar of varlist `y_vars_r' { + + * display text headers + if (`ynum'>1) { + if ("`controls'`absorb'"!="") local depvar_name : var label `depvar' + else local depvar_name `depvar' + di as text "{bf:y_var = `depvar_name'}" + } + + * perform regression + if ("`reg_verbosity'"=="quietly") capture reg `depvar' `x_r2' `x_r' `wt' if `conds' + else capture noisily reg `depvar' `x_r2' `x_r' `wt' if `conds' + + * store results + if (_rc==0) matrix e_b_temp=e(b) + else if (_rc==2000) { + if ("`reg_verbosity'"=="quietly") di as error "no observations for one of the fit lines. add 'reportreg' for more info." + + if ("`linetype'"=="lfit") matrix e_b_temp=.,. + else /*("`linetype'"=="qfit")*/ matrix e_b_temp=.,.,. + } + else { + error _rc + exit _rc + } + + * relabel matrix row + if ("`by'"!="") matrix roweq e_b_temp = "by`counter_by'" + if ("`rd'"!="") matrix rownames e_b_temp = "rd`counter_rd'" + else matrix rownames e_b_temp = "=" + + * save to y_var matrix + if (`counter_by'==1 & `counter_rd'==1) matrix `y`counter_depvar'_coefs'=e_b_temp + else matrix `y`counter_depvar'_coefs'=`y`counter_depvar'_coefs' \ e_b_temp + + * increment depvar counter + local ++counter_depvar + } + + * increment rd counter + if (`counter_rd'!=1) mac shift + local ++counter_rd + + } + + * increment by counter + local ++counter_by + + } + + * relabel matrix column names + forvalues i=1/`ynum' { + if ("`linetype'"=="lfit") matrix colnames `y`i'_coefs' = "`x_var'" "_cons" + else if ("`linetype'"=="qfit") matrix colnames `y`i'_coefs' = "`x_var'^2" "`x_var'" "_cons" + } + + } + + ******* Define the bins ******* + + * Specify and/or create the xq var, as necessary + if "`xq'"=="" { + + if !(`touse_first'==1 & word("`:sortedby'",1)=="`x_r'") sort `touse' `x_r' + + if "`discrete'"=="" { /* xq() and discrete are not specified */ + + * Check whether the number of unique values > nquantiles, or <= nquantiles + capture mata: characterize_unique_vals_sorted("`x_r'",`touse_first',`touse_last',`nquantiles') + + if (_rc==0) { /* number of unique values <= nquantiles, set to discrete */ + local discrete discrete + if ("`genxq'"!="") di as text `"note: the x-variable has fewer unique values than the number of bins specified (`nquantiles'). It will therefore be treated as discrete, and genxq() will be ignored"' + + local xq `x_r' + local nquantiles=r(r) + if ("`by'"=="") { + tempname xq_boundaries xq_values + matrix `xq_boundaries'=r(boundaries) + matrix `xq_values'=r(values) + } + } + else if (_rc==134) { /* number of unique values > nquantiles, perform binning */ + if ("`genxq'"!="") local xq `genxq' + else tempvar xq + + if ("`fastxtile'"!="nofastxtile") fastxtile `xq' = `x_r' `wt' in `touse_first'/`touse_last', nq(`nquantiles') randvar(`randvar') randcut(`randcut') randn(`randn') + else xtile `xq' = `x_r' `wt' in `touse_first'/`touse_last', nq(`nquantiles') + + if ("`by'"=="") { + mata: characterize_unique_vals_sorted("`xq'",`touse_first',`touse_last',`nquantiles') + + if (r(r)!=`nquantiles') { + di as text "warning: nquantiles(`nquantiles') was specified, but only `r(r)' were generated. see help file under nquantiles() for explanation." + local nquantiles=r(r) + } + + tempname xq_boundaries xq_values + matrix `xq_boundaries'=r(boundaries) + matrix `xq_values'=r(values) + } + } + else { + error _rc + } + + } + + else { /* discrete is specified, xq() & genxq() are not */ + + if ("`controls'`absorb'"!="") di as text "warning: discrete is specified in combination with controls() or absorb(). note that binning takes places after residualization, so the residualized x-variable may contain many more unique values." + + capture mata: characterize_unique_vals_sorted("`x_r'",`touse_first',`touse_last',`=`samplesize'/2') + + if (_rc==0) { + local xq `x_r' + local nquantiles=r(r) + if ("`by'"=="") { + tempname xq_boundaries xq_values + matrix `xq_boundaries'=r(boundaries) + matrix `xq_values'=r(values) + } + } + else if (_rc==134) { + di as error "discrete specified, but number of unique values is > (sample size/2)" + exit 134 + } + else { + error _rc + } + } + } + else { + + if !(`touse_first'==1 & word("`:sortedby'",1)=="`xq'") sort `touse' `xq' + + * set nquantiles & boundaries + mata: characterize_unique_vals_sorted("`xq'",`touse_first',`touse_last',`=`samplesize'/2') + + if (_rc==0) { + local nquantiles=r(r) + if ("`by'"=="") { + tempname xq_boundaries xq_values + matrix `xq_boundaries'=r(boundaries) + matrix `xq_values'=r(values) + } + } + else if (_rc==134) { + di as error "discrete specified, but number of unique values is > (sample size/2)" + exit 134 + } + else { + error _rc + } + } + + ********** Compute scatter points ********** + + if ("`by'"!="") { + sort `touse' `by' `xq' + tempname by_boundaries + mata: characterize_unique_vals_sorted("`by'",`touse_first',`touse_last',`bynum') + matrix `by_boundaries'=r(boundaries) + } + + forvalues b=1/`bynum' { + if ("`by'"!="") { + mata: characterize_unique_vals_sorted("`xq'",`=`by_boundaries'[`b',1]',`=`by_boundaries'[`b',2]',`nquantiles') + tempname xq_boundaries xq_values + matrix `xq_boundaries'=r(boundaries) + matrix `xq_values'=r(values) + } + /* otherwise xq_boundaries and xq_values are defined above in the binning code block */ + + * Define x-means + tempname xbin_means + if ("`discrete'"=="discrete") { + matrix `xbin_means'=`xq_values' + } + else { + means_in_boundaries `x_r' `wt', bounds(`xq_boundaries') `medians' + matrix `xbin_means'=r(means) + } + + * LOOP over y-vars to define y-means + local counter_depvar=0 + foreach depvar of varlist `y_vars_r' { + local ++counter_depvar + + means_in_boundaries `depvar' `wt', bounds(`xq_boundaries') `medians' + + * store to matrix + if (`b'==1) { + tempname y`counter_depvar'_scatterpts + matrix `y`counter_depvar'_scatterpts' = `xbin_means',r(means) + } + else { + * make matrices conformable before right appending + local rowdiff=rowsof(`y`counter_depvar'_scatterpts')-rowsof(`xbin_means') + if (`rowdiff'==0) matrix `y`counter_depvar'_scatterpts' = `y`counter_depvar'_scatterpts',`xbin_means',r(means) + else if (`rowdiff'>0) matrix `y`counter_depvar'_scatterpts' = `y`counter_depvar'_scatterpts', ( (`xbin_means',r(means)) \ J(`rowdiff',2,.) ) + else /*(`rowdiff'<0)*/ matrix `y`counter_depvar'_scatterpts' = ( `y`counter_depvar'_scatterpts' \ J(-`rowdiff',colsof(`y`counter_depvar'_scatterpts'),.) ) ,`xbin_means',r(means) + } + } + } + + *********** Perform Graphing *********** + + * If rd is specified, prepare xline parameters + if "`rd'"!="" { + foreach xval in "`rd'" { + local xlines `xlines' xline(`xval', lpattern(dash) lcolor(gs8)) + } + } + + * Fill colors if missing + if `"`colors'"'=="" local colors /// + navy maroon forest_green dkorange teal cranberry lavender /// + khaki sienna emidblue emerald brown erose gold bluishgray /// + /* lime magenta cyan pink blue */ + if `"`mcolors'"'=="" { + if (`ynum'==1 & `bynum'==1 & "`linetype'"!="connect") local mcolors `: word 1 of `colors'' + else local mcolors `colors' + } + if `"`lcolors'"'=="" { + if (`ynum'==1 & `bynum'==1 & "`linetype'"!="connect") local lcolors `: word 2 of `colors'' + else local lcolors `colors' + } + local num_mcolor=wordcount(`"`mcolors'"') + local num_lcolor=wordcount(`"`lcolors'"') + + + * Prepare connect & msymbol options + if ("`linetype'"=="connect") local connect "c(l)" + if "`msymbols'"!="" { + local symbol_prefix "msymbol(" + local symbol_suffix ")" + } + + *** Prepare scatters + + * c indexes which color is to be used + local c=0 + + local counter_series=0 + + * LOOP over by-vars + local counter_by=0 + if ("`by'"=="") local noby="noby" + foreach byval in `byvals' `noby' { + local ++counter_by + + local xind=`counter_by'*2-1 + local yind=`counter_by'*2 + + * LOOP over y-vars + local counter_depvar=0 + foreach depvar of varlist `y_vars' { + local ++counter_depvar + local ++c + + * LOOP over rows (each row contains a coordinate pair) + local row=1 + local xval=`y`counter_depvar'_scatterpts'[`row',`xind'] + local yval=`y`counter_depvar'_scatterpts'[`row',`yind'] + + if !missing(`xval',`yval') { + local ++counter_series + local scatters `scatters' (scatteri + if ("`savedata'"!="") { + if ("`by'"=="") local savedata_scatters `savedata_scatters' (scatter `depvar' `x_var' + else local savedata_scatters `savedata_scatters' (scatter `depvar'_by`counter_by' `x_var'_by`counter_by' + } + } + else { + * skip the rest of this loop iteration + continue + } + + while (`xval'!=. & `yval'!=.) { + local scatters `scatters' `yval' `xval' + + local ++row + local xval=`y`counter_depvar'_scatterpts'[`row',`xind'] + local yval=`y`counter_depvar'_scatterpts'[`row',`yind'] + } + + * Add options + local scatter_options `connect' mcolor(`: word `c' of `mcolors'') lcolor(`: word `c' of `lcolors'') `symbol_prefix'`: word `c' of `msymbols''`symbol_suffix' + local scatters `scatters', `scatter_options') + if ("`savedata'"!="") local savedata_scatters `savedata_scatters', `scatter_options') + + + * Add legend + if "`by'"=="" { + if (`ynum'==1) local legend_labels off + else local legend_labels `legend_labels' lab(`counter_series' `depvar') + } + else { + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + + if (`ynum'==1) local legend_labels `legend_labels' lab(`counter_series' `byvarname'=`byvalname') + else local legend_labels `legend_labels' lab(`counter_series' `depvar': `byvarname'=`byvalname') + } + if ("`by'"!="" | `ynum'>1) local order `order' `counter_series' + + } + + } + + *** Fit lines + + if inlist(`"`linetype'"',"lfit","qfit") { + + * c indexes which color is to be used + local c=0 + + local rdnum=wordcount("`rd'")+1 + + tempname fitline_bounds + if ("`rd'"=="") matrix `fitline_bounds'=.,. + else matrix `fitline_bounds'=.,`=subinstr("`rd'"," ",",",.)',. + + * LOOP over by-vars + local counter_by=0 + if ("`by'"=="") local noby="noby" + foreach byval in `byvals' `noby' { + local ++counter_by + + ** Set the column for the x-coords in the scatterpts matrix + local xind=`counter_by'*2-1 + + * Set the row to start seeking from + * note: each time we seek a coeff, it should be from row (rd_num)(counter_by-1)+counter_rd + local row0=( `rdnum' ) * (`counter_by' - 1) + + + * LOOP over y-vars + local counter_depvar=0 + foreach depvar of varlist `y_vars_r' { + local ++counter_depvar + local ++c + + * Find lower and upper bounds for the fit line + matrix `fitline_bounds'[1,1]=`y`counter_depvar'_scatterpts'[1,`xind'] + + local fitline_ub_rindex=`nquantiles' + local fitline_ub=. + while `fitline_ub'==. { + local fitline_ub=`y`counter_depvar'_scatterpts'[`fitline_ub_rindex',`xind'] + local --fitline_ub_rindex + } + matrix `fitline_bounds'[1,`rdnum'+1]=`fitline_ub' + + * LOOP over rd intervals + forvalues counter_rd=1/`rdnum' { + + if (`"`linetype'"'=="lfit") { + local coef_quad=0 + local coef_lin=`y`counter_depvar'_coefs'[`row0'+`counter_rd',1] + local coef_cons=`y`counter_depvar'_coefs'[`row0'+`counter_rd',2] + } + else if (`"`linetype'"'=="qfit") { + local coef_quad=`y`counter_depvar'_coefs'[`row0'+`counter_rd',1] + local coef_lin=`y`counter_depvar'_coefs'[`row0'+`counter_rd',2] + local coef_cons=`y`counter_depvar'_coefs'[`row0'+`counter_rd',3] + } + + if !missing(`coef_quad',`coef_lin',`coef_cons') { + local leftbound=`fitline_bounds'[1,`counter_rd'] + local rightbound=`fitline_bounds'[1,`counter_rd'+1] + + local fits `fits' (function `coef_quad'*x^2+`coef_lin'*x+`coef_cons', range(`leftbound' `rightbound') lcolor(`: word `c' of `lcolors'')) + } + } + } + } + } + + * Prepare y-axis title + if (`ynum'==1) local ytitle `y_vars' + else if (`ynum'==2) local ytitle : subinstr local y_vars " " " and " + else local ytitle : subinstr local y_vars " " "; ", all + + * Display graph + local graphcmd twoway `scatters' `fits', graphregion(fcolor(white)) `xlines' xtitle(`x_var') ytitle(`ytitle') legend(`legend_labels' order(`order')) `options' + if ("`savedata'"!="") local savedata_graphcmd twoway `savedata_scatters' `fits', graphregion(fcolor(white)) `xlines' xtitle(`x_var') ytitle(`ytitle') legend(`legend_labels' order(`order')) `options' + `graphcmd' + + ****** Save results ****** + + * Save graph + if `"`savegraph'"'!="" { + * check file extension using a regular expression + if regexm(`"`savegraph'"',"\.[a-zA-Z0-9]+$") local graphextension=regexs(0) + + if inlist(`"`graphextension'"',".gph","") graph save `"`savegraph'"', `replace' + else graph export `"`savegraph'"', `replace' + } + + * Save data + if ("`savedata'"!="") { + + *** Save a CSV containing the scatter points + tempname savedatafile + file open `savedatafile' using `"`savedata'.csv"', write text `replace' + + * LOOP over rows + forvalues row=0/`nquantiles' { + + *** Put the x-variable at the left + * LOOP over by-vals + forvalues counter_by=1/`bynum' { + + if (`row'==0) { /* write variable names */ + if "`by'"!="" local bynlabel _by`counter_by' + file write `savedatafile' "`x_var'`bynlabel'," + } + else { /* write data values */ + if (`row'<=`=rowsof(`y1_scatterpts')') file write `savedatafile' (`y1_scatterpts'[`row',`counter_by'*2-1]) "," + else file write `savedatafile' ".," + } + } + + *** Now y-variables at the right + + * LOOP over y-vars + local counter_depvar=0 + foreach depvar of varlist `y_vars' { + local ++counter_depvar + + * LOOP over by-vals + forvalues counter_by=1/`bynum' { + + + if (`row'==0) { /* write variable names */ + if "`by'"!="" local bynlabel _by`counter_by' + file write `savedatafile' "`depvar'`bynlabel'" + } + else { /* write data values */ + if (`row'<=`=rowsof(`y`counter_depvar'_scatterpts')') file write `savedatafile' (`y`counter_depvar'_scatterpts'[`row',`counter_by'*2]) + else file write `savedatafile' "." + } + + * unless this is the last variable in the dataset, add a comma + if !(`counter_depvar'==`ynum' & `counter_by'==`bynum') file write `savedatafile' "," + + } /* end by-val loop */ + + } /* end y-var loop */ + + file write `savedatafile' _n + + } /* end row loop */ + + file close `savedatafile' + di as text `"(file `savedata'.csv written containing saved data)"' + + + + *** Save a do-file with the commands to generate a nicely labeled dataset and re-create the binscatter graph + + file open `savedatafile' using `"`savedata'.do"', write text `replace' + + file write `savedatafile' `"insheet using `savedata'.csv"' _n _n + + if "`by'"!="" { + foreach var of varlist `x_var' `y_vars' { + local counter_by=0 + foreach byval in `byvals' { + local ++counter_by + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + file write `savedatafile' `"label variable `var'_by`counter_by' "`var'; `byvarname'==`byvalname'""' _n + } + } + file write `savedatafile' _n + } + + file write `savedatafile' `"`savedata_graphcmd'"' _n + + file close `savedatafile' + di as text `"(file `savedata'.do written containing commands to process saved data)"' + + } + + *** Return items + ereturn post, esample(`touse') + + ereturn scalar N = `samplesize' + + ereturn local graphcmd `"`graphcmd'"' + if inlist("`linetype'","lfit","qfit") { + forvalues yi=`ynum'(-1)1 { + ereturn matrix y`yi'_coefs=`y`yi'_coefs' + } + } + + if ("`rd'"!="") { + tempname rdintervals + matrix `rdintervals' = (. \ `=subinstr("`rd'"," ","\",.)' ) , ( `=subinstr("`rd'"," ","\",.)' \ .) + + forvalues i=1/`=rowsof(`rdintervals')' { + local rdintervals_labels `rdintervals_labels' rd`i' + } + matrix rownames `rdintervals' = `rdintervals_labels' + matrix colnames `rdintervals' = gt lt_eq + ereturn matrix rdintervals=`rdintervals' + } + + if ("`by'"!="" & "`by'"=="`byvarname'") { /* if a numeric by-variable was specified */ + forvalues i=1/`=rowsof(`byvalmatrix')' { + local byvalmatrix_labels `byvalmatrix_labels' by`i' + } + matrix rownames `byvalmatrix' = `byvalmatrix_labels' + matrix colnames `byvalmatrix' = `by' + ereturn matrix byvalues=`byvalmatrix' + } + +end + + +********************************** + +* Helper programs + +program define means_in_boundaries, rclass + version 12.1 + + syntax varname(numeric) [aweight fweight], BOUNDsmat(name) [MEDians] + + * Create convenient weight local + if ("`weight'"!="") local wt [`weight'`exp'] + + local r=rowsof(`boundsmat') + matrix means=J(`r',1,.) + + if ("`medians'"!="medians") { + forvalues i=1/`r' { + sum `varlist' in `=`boundsmat'[`i',1]'/`=`boundsmat'[`i',2]' `wt', meanonly + matrix means[`i',1]=r(mean) + } + } + else { + forvalues i=1/`r' { + _pctile `varlist' in `=`boundsmat'[`i',1]'/`=`boundsmat'[`i',2]' `wt', percentiles(50) + matrix means[`i',1]=r(r1) + } + } + + return clear + return matrix means=means + +end + +*** copy of: version 1.21 8oct2013 Michael Stepner, stepner@mit.edu +program define fastxtile, rclass + version 11 + + * Parse weights, if any + _parsewt "aweight fweight pweight" `0' + local 0 "`s(newcmd)'" /* command minus weight statement */ + local wt "`s(weight)'" /* contains [weight=exp] or nothing */ + + * Extract parameters + syntax newvarname=/exp [if] [in] [,Nquantiles(integer 2) Cutpoints(varname numeric) ALTdef /// + CUTValues(numlist ascending) randvar(varname numeric) randcut(real 1) randn(integer -1)] + + * Mark observations which will be placed in quantiles + marksample touse, novarlist + markout `touse' `exp' + qui count if `touse' + local popsize=r(N) + + if "`cutpoints'"=="" & "`cutvalues'"=="" { /***** NQUANTILES *****/ + if `"`wt'"'!="" & "`altdef'"!="" { + di as error "altdef option cannot be used with weights" + exit 198 + } + + if `randn'!=-1 { + if `randcut'!=1 { + di as error "cannot specify both randcut() and randn()" + exit 198 + } + else if `randn'<1 { + di as error "randn() must be a positive integer" + exit 198 + } + else if `randn'>`popsize' { + di as text "randn() is larger than the population. using the full population." + local randvar="" + } + else { + local randcut=`randn'/`popsize' + + if "`randvar'"!="" { + qui sum `randvar', meanonly + if r(min)<0 | r(max)>1 { + di as error "with randn(), the randvar specified must be in [0,1] and ought to be uniformly distributed" + exit 198 + } + } + } + } + + * Check if need to gen a temporary uniform random var + if "`randvar'"=="" { + if (`randcut'<1 & `randcut'>0) { + tempvar randvar + gen `randvar'=runiform() + } + * randcut sanity check + else if `randcut'!=1 { + di as error "if randcut() is specified without randvar(), a uniform r.v. will be generated and randcut() must be in (0,1)" + exit 198 + } + } + + * Mark observations used to calculate quantile boundaries + if ("`randvar'"!="") { + tempvar randsample + mark `randsample' `wt' if `touse' & `randvar'<=`randcut' + } + else { + local randsample `touse' + } + + * Error checks + qui count if `randsample' + local samplesize=r(N) + if (`nquantiles' > r(N) + 1) { + if ("`randvar'"=="") di as error "nquantiles() must be less than or equal to the number of observations [`r(N)'] plus one" + else di as error "nquantiles() must be less than or equal to the number of sampled observations [`r(N)'] plus one" + exit 198 + } + else if (`nquantiles' < 2) { + di as error "nquantiles() must be greater than or equal to 2" + exit 198 + } + + * Compute quantile boundaries + _pctile `exp' if `randsample' `wt', nq(`nquantiles') `altdef' + + * Store quantile boundaries in list + forvalues i=1/`=`nquantiles'-1' { + local cutvallist `cutvallist' r(r`i') + } + } + else if "`cutpoints'"!="" { /***** CUTPOINTS *****/ + + * Parameter checks + if "`cutvalues'"!="" { + di as error "cannot specify both cutpoints() and cutvalues()" + exit 198 + } + if "`wt'"!="" | "`randvar'"!="" | "`ALTdef'"!="" | `randcut'!=1 | `nquantiles'!=2 | `randn'!=-1 { + di as error "cutpoints() cannot be used with nquantiles(), altdef, randvar(), randcut(), randn() or weights" + exit 198 + } + + tempname cutvals + qui tab `cutpoints', matrow(`cutvals') + + if r(r)==0 { + di as error "cutpoints() all missing" + exit 2000 + } + else { + local nquantiles = r(r) + 1 + + forvalues i=1/`r(r)' { + local cutvallist `cutvallist' `cutvals'[`i',1] + } + } + } + else { /***** CUTVALUES *****/ + if "`wt'"!="" | "`randvar'"!="" | "`ALTdef'"!="" | `randcut'!=1 | `nquantiles'!=2 | `randn'!=-1 { + di as error "cutvalues() cannot be used with nquantiles(), altdef, randvar(), randcut(), randn() or weights" + exit 198 + } + + * parse numlist + numlist "`cutvalues'" + local cutvallist `"`r(numlist)'"' + local nquantiles=wordcount(`"`r(numlist)'"')+1 + } + + * Pick data type for quantile variable + if (`nquantiles'<=100) local qtype byte + else if (`nquantiles'<=32,740) local qtype int + else local qtype long + + * Create quantile variable + local cutvalcommalist : subinstr local cutvallist " " ",", all + qui gen `qtype' `varlist'=1+irecode(`exp',`cutvalcommalist') if `touse' + label var `varlist' "`nquantiles' quantiles of `exp'" + + * Return values + if ("`samplesize'"!="") return scalar n = `samplesize' + else return scalar n = . + + return scalar N = `popsize' + + tokenize `"`cutvallist'"' + forvalues i=`=`nquantiles'-1'(-1)1 { + return scalar r`i' = ``i'' + } + +end + + +version 12.1 +set matastrict on + +mata: + +void characterize_unique_vals_sorted(string scalar var, real scalar first, real scalar last, real scalar maxuq) { + // Inputs: a numeric variable, a starting & ending obs #, and a maximum number of unique values + // Requires: the data to be sorted on the specified variable within the observation boundaries given + // (no check is made that this requirement is satisfied) + // Returns: the number of unique values found + // the unique values found + // the observation boundaries of each unique value in the dataset + + + // initialize returned results + real scalar Nunique + Nunique=0 + + real matrix values + values=J(maxuq,1,.) + + real matrix boundaries + boundaries=J(maxuq,2,.) + + // initialize computations + real scalar var_index + var_index=st_varindex(var) + + real scalar curvalue + real scalar prevvalue + + // perform computations + real scalar obs + for (obs=first; obs<=last; obs++) { + curvalue=_st_data(obs,var_index) + + if (curvalue!=prevvalue) { + Nunique++ + if (Nunique<=maxuq) { + prevvalue=curvalue + values[Nunique,1]=curvalue + boundaries[Nunique,1]=obs + if (Nunique>1) boundaries[Nunique-1,2]=obs-1 + } + else { + exit(error(134)) + } + + } + } + boundaries[Nunique,2]=last + + // return results + stata("return clear") + + st_numscalar("r(r)",Nunique) + st_matrix("r(values)",values[1..Nunique,.]) + st_matrix("r(boundaries)",boundaries[1..Nunique,.]) + +} + +end diff --git a/110/replication_package/replication/ado/plus/b/binscatter.sthlp b/110/replication_package/replication/ado/plus/b/binscatter.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..9ad362c5247f473c8d2539b72a6692d960df066e --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binscatter.sthlp @@ -0,0 +1,332 @@ +{smcl} +{* *! version 7.02 24nov2013}{...} +{viewerjumpto "Syntax" "binscatter##syntax"}{...} +{viewerjumpto "Description" "binscatter##description"}{...} +{viewerjumpto "Options" "binscatter##options"}{...} +{viewerjumpto "Examples" "binscatter##examples"}{...} +{viewerjumpto "Saved results" "binscatter##saved_results"}{...} +{viewerjumpto "Author" "binscatter##author"}{...} +{viewerjumpto "Acknowledgements" "binscatter##acknowledgements"}{...} +{title:Title} + +{p2colset 5 19 21 2}{...} +{p2col :{hi:binscatter} {hline 2}}Binned scatterplots{p_end} +{p2colreset}{...} + + +{marker syntax}{title:Syntax} + +{p 8 15 2} +{cmd:binscatter} +{varlist} {ifin} +{weight} +[{cmd:,} {it:options}] + + +{pstd} +where {it:varlist} is +{p_end} + {it:y_1} [{it:y_2} [...]] {it:x} + +{synoptset 26 tabbed}{...} +{synopthdr :options} +{synoptline} +{syntab :Main} +{synopt :{opth by(varname)}}plot separate series for each group (see {help binscatter##by_notes:important notes below}){p_end} +{synopt :{opt med:ians}}plot within-bin medians instead of means{p_end} + +{syntab :Bins} +{synopt :{opth n:quantiles(#)}}number of equal-sized bins to be created; default is {bf:20}{p_end} +{synopt :{opth gen:xq(varname)}}generate quantile variable containing the bins{p_end} +{synopt :{opt discrete}}each x-value to be used as a separate bin{p_end} +{synopt :{opth xq(varname)}}variable which already contains bins; bins therefore not recomputed{p_end} + +{syntab :Controls} +{synopt :{opth control:s(varlist)}}residualize the x & y variables on controls before plotting{p_end} +{synopt :{opth absorb(varname)}}residualize the x & y variables on a categorical variable{p_end} +{synopt :{opt noa:ddmean}}do not add the mean of each variable back to its residuals{p_end} + +{syntab :Fit Line} +{synopt :{opth line:type(binscatter##linetype:linetype)}}type of fit line; default is {bf:lfit}, may also be {bf:qfit}, {bf:connect}, or {bf:none}{p_end} +{synopt :{opth rd(numlist)}}create regression discontinuity at x-values{p_end} +{synopt :{opt reportreg}}display the regressions used to estimate the fit lines{p_end} + +{syntab :Graph Style} +{synopt :{cmdab:col:ors(}{it:{help colorstyle}list}{cmd:)}}ordered list of colors{p_end} +{synopt :{cmdab:mc:olors(}{it:{help colorstyle}list}{cmd:)}}overriding ordered list of colors for the markers{p_end} +{synopt :{cmdab:lc:olors(}{it:{help colorstyle}list}{cmd:)}}overriding ordered list of colors for the lines{p_end} +{synopt :{cmdab:m:symbols(}{it:{help symbolstyle}list}{cmd:)}}ordered list of symbols{p_end} +{synopt :{it:{help twoway_options}}}{help title options:titles}, {help legend option:legends}, {help axis options:axes}, added {help added line options:lines} and {help added text options:text}, + {help region options:regions}, {help name option:name}, {help aspect option:aspect ratio}, etc.{p_end} + +{syntab :Save Output} +{synopt :{opt savegraph(filename)}}save graph to file; format automatically detected from extension [ex: .gph .jpg .png]{p_end} +{synopt :{opt savedata(filename)}}save {it:filename}.csv containg scatterpoint data, and {it:filename}.do to process data into graph{p_end} +{synopt :{opt replace}}overwrite existing files{p_end} + +{syntab :fastxtile options} +{synopt :{opt nofastxtile}}use xtile instead of fastxtile{p_end} +{synopt :{opth randvar(varname)}}use {it:varname} to sample observations when computing quantile boundaries{p_end} +{synopt :{opt randcut(#)}}upper bound on {cmd:randvar()} used to cut the sample; default is {cmd:randcut(1)}{p_end} +{synopt :{opt randn(#)}}number of observations to sample when computing quantile boundaries{p_end} +{synoptline} +{p 4 6 2} +{opt aweight}s and {opt fweight}s are allowed; +see {help weight}. +{p_end} + + +{marker description}{...} +{title:Description} + +{pstd} +{opt binscatter} generates binned scatterplots, and is optimized for speed in large datasets. + +{pstd} +Binned scatterplots provide a non-parametric way of visualizing the relationship between two variables. +With a large number of observations, a scatterplot that plots every data point would become too crowded +to interpret visually. {cmd:binscatter} groups the x-axis variable into equal-sized bins, computes the +mean of the x-axis and y-axis variables within each bin, then creates a scatterplot of these data points. +The result is a non-parametric visualization of the conditional expectation function. + +{pstd} +{opt binscatter} provides built-in options to control for covariates before plotting the relationship +(see {help binscatter##controls:Controls}). Additionally, {cmd:binscatter} will plot fit lines based +on the underlying data, and can automatically handle regression discontinuities (see {help binscatter##fit_line:Fit Line}). + + +{marker options}{...} +{title:Options} + +{dlgtab:Main} + +{marker by_notes}{...} +{phang}{opth by(varname)} plots a separate series for each by-value. Both numeric and string by-variables +are supported, but numeric by-variables will have faster run times. + +{pmore}Users should be aware of the two ways in which {cmd:binscatter} does not condition on by-values: + +{phang3}1) When combined with {opt controls()} or {opt absorb()}, the program residualizes using the restricted model in which each covariate +has the same coefficient in each by-value sample. It does not run separate regressions for each by-value. If you wish to control for +covariates using a different model, you can residualize your x- and y-variables beforehand using your desired model then run {cmd:binscatter} +on the residuals you constructed. + +{phang3}2) When not combined with {opt discrete} or {opt xq()}, the program constructs a single set of bins +using the unconditional quantiles of the x-variable. It does not bin the x-variable separately for each by-value. +If you wish to use a different binning procedure (such as constructing equal-sized bins separately for each +by-value), you can construct a variable containing your desired bins beforehand, then run {cmd:binscatter} with {opt xq()}. + +{phang}{opt med:ians} creates the binned scatterplot using the median x- and y-value within each bin, rather than the mean. +This option only affects the scatter points; it does not, for instance, cause {opt linetype(lfit)} +to use quantile regression instead of OLS when drawing a fit line. + +{dlgtab:Bins} + +{phang}{opth n:quantiles(#)} specifies the number of equal-sized bins to be created. This is equivalent to the number of +points in each series. The default is {bf:20}. If the x-variable has fewer +unique values than the number of bins specified, then {opt discrete} will be automatically invoked, and no +binning will be performed. +This option cannot be combined with {opt discrete} or {opt xq()}. + +{pmore} +Binning is performed after residualization when combined with {opt controls()} or {opt absorb()}. +Note that the binning procedure is equivalent to running xtile, which in certain cases will generate +fewer quantile categories than specified. (e.g. {stata sysuse auto}; {stata xtile temp=mpg, nq(20)}; {stata tab temp}) + +{phang}{opth gen:xq(varname)} creates a categorical variable containing the computed bins. +This option cannot be combined with {opt discrete} or {opt xq()}. + +{phang}{opt discrete} specifies that the x-variable is discrete and that each x-value is to be treated as +a separate bin. {cmd:binscatter} will therefore plot the mean y-value associated with each x-value. +This option cannot be combined with {opt nquantiles()}, {opt genxq()} or {opt xq()}. + +{pmore} +In most cases, {opt discrete} should not be combined with {opt controls()} or {opt absorb()}, since residualization occurs before binning, +and in general the residual of a discrete variable will not be discrete. + +{phang}{opth xq(varname)} specifies a categorical variable that contains the bins to be used, instead of {cmd:binscatter} generating them. +This option is typically used to avoid recomputing the bins needlessly when {cmd:binscatter} is being run repeatedly on the same sample +and with the same x-variable. +It may be convenient to use {opt genxq(binvar)} in the first iteration, and specify {opt xq(binvar)} in subsequent iterations. +Computing quantiles is computationally intensive in large datasets, so avoiding repetition can reduce run times considerably. +This option cannot be combined with {opt nquantiles()}, {opt genxq()} or {opt discrete}. + +{pmore} +Care should be taken when combining {opt xq()} with {opt controls()} or {opt absorb()}. Binning takes place after residualization, +so if the sample changes or the control variables change, the bins ought to be recomputed as well. + +{marker controls}{...} +{dlgtab:Controls} + +{phang}{opth control:s(varlist)} residualizes the x-variable and y-variables on the specified controls before binning and plotting. +To do so, {cmd:binscatter} runs a regression of each variable on the controls, generates the residuals, and adds the sample mean of +each variable back to its residuals. + +{phang}{opth absorb(varname)} absorbs fixed effects in the categorical variable from the x-variable and y-variables before binning and plotting, +To do so, {cmd:binscatter} runs an {helpb areg} of each variable with {it:absorb(varname)} and any {opt controls()} specified. It then generates the +residuals and adds the sample mean of each variable back to its residuals. + +{phang}{opt noa:ddmean} prevents the sample mean of each variable from being added back to its residuals, when combined with {opt controls()} or {opt absorb()}. + +{marker fit_line}{...} +{dlgtab:Fit Line} + +{marker linetype}{...} +{phang}{opth line:type(binscatter##linetype:linetype)} specifies the type of line plotted on each series. +The default is {bf:lfit}, which plots a linear fit line. Other options are {bf:qfit} for a quadratic fit line, +{bf:connect} for connected points, and {bf:none} for no line. + +{pmore}Linear or quadratic fit lines are estimated using the underlying data, not the binned scatter points. When combined with +{opt controls()} or {opt absorb()}, the fit line is estimated after the variables have been residualized. + +{phang}{opth rd(numlist)} draws a dashed vertical line at the specified x-values and generates regression discontinuities when combined with {opt line(lfit|qfit)}. +Separate fit lines will be estimated below and above each discontinuity. These estimations are performed using the underlying data, not the binned scatter points. + +{pmore}The regression discontinuities do not affect the binned scatter points in any way. +Specifically, a bin may contain a discontinuity within its range, and therefore include data from both sides of the discontinuity. + +{phang}{opt reportreg} displays the regressions used to estimate the fit lines in the results window. + +{dlgtab:Graph Style} + +{phang}{cmdab:col:ors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors for each series + +{phang}{cmdab:mc:olors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors for the markers of each series, which overrides any list provided in {opt colors()} + +{phang}{cmdab:lc:olors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors for the line of each series, which overrides any list provided in {opt colors()} + +{phang}{cmdab:m:symbols(}{it:{help symbolstyle}list}{cmd:)} specifies an ordered list of symbols for each series + +{phang}{it:{help twoway_options}}: + +{pmore}Any unrecognized options added to {cmd:binscatter} are appended to the end of the twoway command which generates the +binned scatter plot. + +{pmore}These can be used to control the graph {help title options:titles}, +{help legend option:legends}, {help axis options:axes}, added {help added line options:lines} and {help added text options:text}, +{help region options:regions}, {help name option:name}, {help aspect option:aspect ratio}, etc. + +{dlgtab:Save Output} + +{phang}{opt savegraph(filename)} saves the graph to a file. The format is automatically detected from the extension specified [ex: {bf:.gph .jpg .png}], +and either {cmd:graph save} or {cmd:graph export} is run. If no file extension is specified {bf:.gph} is assumed. + +{phang}{opt savedata(filename)} saves {it:filename}{bf:.csv} containing the binned scatterpoint data, and {it:filename}{bf:.do} which +loads the scatterpoint data, labels the variables, and plots the binscatter graph. + +{pmore}Note that the saved result {bf:e(cmd)} provides an alternative way of capturing the binscatter graph and editing it. + +{phang}{opt replace} specifies that files be overwritten if they alredy exist + +{dlgtab:fastxtile options} + +{phang}{opt nofastxtile} forces the use of {cmd:xtile} instead of {cmd:fastxtile} to compute bins. There is no situation where this should +be necessary or useful. The {cmd:fastxile} program generates identical results to {cmd:xtile}, but runs faster on large datasets, and has +additional options for random sampling which may be useful to increase speed. + +{pmore}{cmd:fastxtile} is built into the {cmd:binscatter} code, but may also be installed +separately from SSC ({stata ssc install fastxtile:click here to install}) for use outside of {cmd:binscatter}. + +{phang}{opth randvar(varname)} requests that {it:varname} be used to select a +sample of observations when computing the quantile boundaries. Sampling increases +the speed of the binning procedure, but generates bins which are only approximately equal-sized +due to sampling error. It is possible to omit this option and still perform random sampling from U[0,1] +as described below in {opt randcut()} and {opt randn()}. + +{phang}{opt randcut(#)} specifies the upper bound on the variable contained +in {opt randvar(varname)}. Quantile boundaries are approximated using observations for which +{opt randvar()} <= #. If no variable is specified in {opt randvar()}, +a standard uniform random variable is generated. The default is {cmd:randcut(1)}. +This option cannot be combined with {opt randn()}. + +{phang}{opt randn(#)} specifies an approximate number of observations to sample when +computing the quantile boundaries. Quantile boundaries are approximated using observations +for which a uniform random variable is <= #/N. The exact number of observations +sampled may therefore differ from #, but it equals # in expectation. When this option is +combined with {opth randvar(varname)}, {it:varname} ought to be distributed U[0,1]. +Otherwise, a standard uniform random variable is generated. This option cannot be combined +with {opt randcut()}. + + +{marker examples}{...} +{title:Examples} + +{pstd}Load the 1988 extract of the National Longitudinal Survey of Young Women and Mature Women.{p_end} +{phang2}. {stata sysuse nlsw88}{p_end} +{phang2}. {stata keep if inrange(age,35,44) & inrange(race,1,2)}{p_end} + +{pstd}What is the relationship between job tenure and wages?{p_end} +{phang2}. {stata scatter wage tenure}{p_end} +{phang2}. {stata binscatter wage tenure}{p_end} + +{pstd}The scatter was too crowded to be easily interpetable. The binscatter is cleaner, but a linear fit looks unreasonable.{p_end} + +{pstd}Try a quadratic fit.{p_end} +{phang2}. {stata binscatter wage tenure, line(qfit)}{p_end} + +{pstd}We can also plot a linear regression discontinuity.{p_end} +{phang2}. {stata binscatter wage tenure, rd(2.5)}{p_end} + +{pstd} What is the relationship between age and wages?{p_end} +{phang2}. {stata scatter wage age}{p_end} +{phang2}. {stata binscatter wage age}{p_end} + +{pstd} The binscatter is again much easier to interpret. (Note that {cmd:binscatter} automatically +used each age as a discrete bin, since there are fewer than 20 unique values.){p_end} + +{pstd}How does the relationship vary by race?{p_end} +{phang2}. {stata binscatter wage age, by(race)}{p_end} + +{pstd} The relationship between age and wages is very different for whites and blacks. But what if we control for occupation?{p_end} +{phang2}. {stata binscatter wage age, by(race) absorb(occupation)}{p_end} + +{pstd} A very different picture emerges. Let's label this graph nicely.{p_end} +{phang2}. {stata binscatter wage age, by(race) absorb(occupation) msymbols(O T) xtitle(Age) ytitle(Hourly Wage) legend(lab(1 White) lab(2 Black))}{p_end} + + +{marker saved_results}{...} +{title:Saved Results} + +{pstd} +{cmd:binscatter} saves the following in {cmd:e()}: + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Macros}{p_end} +{synopt:{cmd:e(graphcmd)}}twoway command used to generate graph, which does not depend on loaded data{p_end} +{p 30 30 2}Note: it is often important to reference this result using `"`{bf:e(graphcmd)}'"' +rather than {bf:e(graphcmd)} in order to avoid truncation due to Stata's character limit for strings. + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Matrices}{p_end} +{synopt:{cmd:e(byvalues)}}ordered list of by-values {it:(if numeric by-variable specified)}{p_end} +{synopt:{cmd:e(rdintervals)}}ordered list of rd intervals {it:(if rd specified)}{p_end} +{synopt:{cmd:e(y#_coefs)}}fit line coefficients for #th y-variable {it:(if lfit or qfit specified)}{p_end} + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Functions}{p_end} +{synopt:{cmd:e(sample)}}marks sample{p_end} +{p2colreset}{...} + + +{marker author}{...} +{title:Author} + +{pstd}Michael Stepner{p_end} +{pstd}stepner@mit.edu{p_end} + + +{marker acknowledgements}{...} +{title:Acknowledgements} + +{pstd}The present version of {cmd:binscatter} is based on a program first written by Jessica Laird. + +{pstd}This program was developed under the guidance and direction of Raj Chetty and John +Friedman. Laszlo Sandor provided suggestions which improved the program considerably, and offered abundant help +testing it. + +{pstd}Thank you also to the users of early versions of the program who devoted time to reporting +the bugs that they encountered. diff --git a/110/replication_package/replication/ado/plus/b/binslogit.ado b/110/replication_package/replication/ado/plus/b/binslogit.ado new file mode 100644 index 0000000000000000000000000000000000000000..00b7642c9a65341eba7aa15c3e403eab02184684 --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binslogit.ado @@ -0,0 +1,2394 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binslogit +program define binslogit, eclass + version 13 + + syntax varlist(min=2 numeric fv ts) [if] [in] [fw pw] [, deriv(integer 0) at(string asis) nolink /// + logitopt(string asis) /// + dots(string) dotsgrid(string) dotsplotopt(string asis) /// + line(string) linegrid(integer 20) lineplotopt(string asis) /// + ci(string) cigrid(string) ciplotopt(string asis) /// + cb(string) cbgrid(integer 20) cbplotopt(string asis) /// + polyreg(string) polyreggrid(integer 20) polyregcigrid(integer 0) polyregplotopt(string asis) /// + by(varname) bycolors(string asis) bysymbols(string asis) bylpatterns(string asis) /// + nbins(string) binspos(string) binsmethod(string) nbinsrot(string) /// + pselect(numlist integer >=0) sselect(numlist integer >=0) /// + samebinsby randcut(numlist max=1 >=0 <=1) /// + nsims(integer 500) simsgrid(integer 20) simsseed(numlist integer max=1 >=0) /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) level(real 95) asyvar(string) /// + noplot savedata(string asis) replace /// + plotxrange(numlist asc max=2) plotyrange(numlist asc max=2) *] + + ********************************************* + * Regularization constant (for checking only) + local qrot=2 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + ********************** + ** Extract options *** + ********************** + * report the results for the cond. mean model? + if ("`link'"!="") local transform "F" + else local transform "T" + + * default vce, clustered? + if ("`vce'"=="") local vce "vce(robust)" + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'" + if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"| /// + "`1'"=="cluste"|"`1'"=="cluster") { + local clusterON "T" /* Mark cluster is specified */ + local clustervar `2' + } + if ("`vce'"=="vce(oim)"|"`vce'"=="vce(opg)") local vce_select "vce(ols)" + else local vce_select "`vce'" + + if ("`asyvar'"=="") local asyvar "off" + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`binsmethod'"=="") local binsmethod "DPI" + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binspos'"=="") local binspos "QS" + + + * analyze options related to degrees ************* + if ("`dots'"!="T"&"`dots'"!="F"&"`dots'"!="") { + numlist "`dots'", integer max(2) range(>=0) + local dots=r(numlist) + } + if ("`line'"!="T"&"`line'"!="F"&"`line'"!="") { + numlist "`line'", integer max(2) range(>=0) + local line=r(numlist) + } + if ("`ci'"!="T"&"`ci'"!="F"&"`ci'"!="") { + numlist "`ci'", integer max(2) range(>=0) + local ci=r(numlist) + } + if ("`cb'"!="T"&"`cb'"!="F"&"`cb'"!="") { + numlist "`cb'", integer max(2) range(>=0) + local cb=r(numlist) + } + + + if ("`dots'"=="F") { /* shut down dots */ + local dots "" + local dotsgrid 0 + } + if ("`line'"=="F") local line "" + if ("`ci'"=="F") local ci "" + if ("`cb'"=="F") local cb "" + + + *************************************************************** + * 4 cases: select J, select p, user specified both, and error + local selection "" + + * analyze nbins + if ("`nbins'"=="T") local nbins=0 + local len_nbins=0 + if ("`nbins'"!=""&"`nbins'"!="F") { + numlist "`nbins'", integer sort + local nbins=r(numlist) + local len_nbins: word count `nbins' + } + + * analyze numlist in pselect and sselect + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if (`len_p'==1&`len_s'==0) { + local slist `plist' + local len_s=1 + } + if (`len_p'==0&`len_s'==1) { + local plist `slist' + local len_p=1 + } + + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + if ("`nbins'"!=""|"`pselect'"!=""|"`sselect'"!="") { + di as error "nbins(), pselect() or sselect() incorrectly specified." + exit + } + } + + + * 1st case: select J + if (("`nbins'"=="0"|`len_nbins'>1|"`nbins'"=="")&("`binspos'"=="ES"|"`binspos'"=="QS")) local selection "J" + if ("`selection'"=="J") { + if (`len_p'>1|`len_s'>1) { + if ("`nbins'"=="") { + di as error "nbins() must be specified for degree/smoothness selection." + exit + } + else { + di as error "Only one p and one s are allowed to select # of bins." + exit + } + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + if ("`dots'"!=""&"`dots'"!="T"&"`dots'"!="F") { /* respect user-specified dots */ + local plist: word 1 of `dots' + local slist: word 2 of `dots' + if ("`slist'"=="") local slist `plist' + } + if ("`dots'"==""|"`dots'"=="T") local dots `plist' `slist' /* selection is based on dots */ + if ("`line'"=="T") local line `plist' `slist' + if ("`ci'"=="T") local ci `=`plist'+1' `=`slist'+1' + if ("`cb'"=="T") local cb `=`plist'+1' `=`slist'+1' + local len_p=1 + local len_s=1 + } /* e.g., binsreg y x, nbins(a b) or nbins(T) or pselect(a) nbins(T) */ + + + * 2nd case: select P (at least for one object) + if ("`selection'"!="J" & ("`dots'"==""|"`dots'"=="T"|"`line'"=="T"|"`ci'"=="T"|"`cb'"=="T")) { + local pselectOK "T" /* p selection CAN be turned on as long as one of the four is T */ + } + + if ("`pselectOK'"=="T" & `len_nbins'==1 & (`len_p'>1|`len_s'>1)) { + local selection "P" + } /* e.g., binsreg y x, pselect(a b) or pselect() dots(T) */ + + * 3rd case: completely user-specified J and p + if ((`len_p'<=1&`len_s'<=1) & "`selection'"!="J") { + local selection "NA" + if ("`dots'"==""|"`dots'"=="T") { + if (`len_p'==1&`len_s'==1) local dots `plist' `slist' + else local dots `deriv' `deriv' /* e.g., binsreg y x or , dots(0 0) nbins(20) */ + } + tokenize `dots' + if ("`2'"=="") local 2 `1' + if ("`line'"=="T") { + if (`len_p'==1&`len_s'==1) local line `plist' `slist' + else local line `dots' + } + if ("`ci'"=="T") { + if (`len_p'==1&`len_s'==1) local ci `=`plist'+1' `=`slist'+1' + else local ci `=`1'+1' `=`2'+1' + } + if ("`cb'"=="T") { + if (`len_p'==1&`len_s'==1) local cb `=`plist'+1' `=`slist'+1' + else local cb `=`1'+1' `=`2'+1' + } + } + + * exclude all other cases + if ("`selection'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + + ****** Now, extract from dots, line, etc. ************ + * dots + tokenize `dots' + local dots_p "`1'" + local dots_s "`2'" + if ("`dots_p'"==""|"`dots_p'"=="T") local dots_p=. + if ("`dots_s'"=="") local dots_s `dots_p' + + if ("`dotsgrid'"=="") local dotsgrid "mean" + local dotsngrid_mean=0 + if (strpos("`dotsgrid'","mean")!=0) { + local dotsngrid_mean=1 + local dotsgrid: subinstr local dotsgrid "mean" "", all + } + if (wordcount("`dotsgrid'")==0) local dotsngrid=0 + else { + confirm integer n `dotsgrid' + local dotsngrid `dotsgrid' + } + local dotsntot=`dotsngrid_mean'+`dotsngrid' + + + * line + tokenize `line' + local line_p "`1'" + local line_s "`2'" + local linengrid `linegrid' + if ("`line'"=="") local linengrid=0 + if ("`line_p'"==""|"`line_p'"=="T") local line_p=. + if ("`line_s'"=="") local line_s `line_p' + + * ci + if ("`cigrid'"=="") local cigrid "mean" + local cingrid_mean=0 + if (strpos("`cigrid'","mean")!=0) { + local cingrid_mean=1 + local cigrid: subinstr local cigrid "mean" "", all + } + if (wordcount("`cigrid'")==0) local cingrid=0 + else { + confirm integer n `cigrid' + local cingrid `cigrid' + } + local cintot=`cingrid_mean'+`cingrid' + + tokenize `ci' + local ci_p "`1'" + local ci_s "`2'" + if ("`ci'"=="") local cintot=0 + if ("`ci_p'"==""|"`ci_p'"=="T") local ci_p=. + if ("`ci_s'"=="") local ci_s `ci_p' + + * cb + tokenize `cb' + local cb_p "`1'" + local cb_s "`2'" + local cbngrid `cbgrid' + if ("`cb'"=="") local cbngrid=0 + if ("`cb_p'"==""|"`cb_p'"=="T") local cb_p=. + if ("`cb_s'"=="") local cb_s `cb_p' + + * Add warnings about degrees for estimation and inference + if ("`selection'"=="J") { + if ("`ci_p'"!=".") { + if (`ci_p'<=`dots_p') { + local ci_p=`dots_p'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the degree for dots()." + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<=`dots_p') { + local cb_p=`dots_p'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the degree for dots()." + } + } + } + if ("`selection'"=="NA") { + if ("`ci'"!=""|"`cb'"!="") { + di as text "Warning: Confidence intervals/bands are valid when nbins() is much larger than IMSE-optimal choice." + } + } + * if selection==P, compare ci_p/cb_p with P_opt later + + * poly fit + local polyregngrid `polyreggrid' + local polyregcingrid `polyregcigrid' + if ("`polyreg'"!="") { + confirm integer n `polyreg' + } + else { + local polyregngrid=0 + } + + * range of x axis and y axis? + tokenize `plotxrange' + local min_xr "`1'" + local max_xr "`2'" + tokenize `plotyrange' + local min_yr "`1'" + local max_yr "`2'" + + + * Simuls + local simsngrid=`simsgrid' + + * Record if nbins specified by users, set default + local nbins_full `nbins' /* local save common nbins */ + if ("`selection'"=="NA") local binselectmethod "User-specified" + else { + if ("`binsmethod'"=="DPI") local binselectmethod "IMSE-optimal plug-in choice" + if ("`binsmethod'"=="ROT") local binselectmethod "IMSE-optimal rule-of-thumb choice" + if ("`selection'"=="J") local binselectmethod "`binselectmethod' (select # of bins)" + if ("`selection'"=="P") local binselectmethod "`binselectmethod' (select degree and smoothness)" + } + + * Mass point check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + local fewmasspoints "T" /* count mass point, but turn off checks */ + } + + * extract dfcheck + if ("`dfcheck'"=="") local dfcheck 20 30 + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * evaluate at w from another dataset? + if (`"`at'"'!=`""'&`"`at'"'!=`"mean"'&`"`at'"'!=`"median"'&`"`at'"'!=`"0"') local atwout "user" + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + local sel_gtools "on" + * use gstats tab instead of tabstat/collapse + * use gquantiles instead of _pctile + * use gunique instead of binsreg_uniq + * use fasterxtile instead of irecode (within binsreg_irecode) + * shut down local checks & do not sort + } + + ************************* + **** error checks ******* + ************************* + if (`deriv'<0) { + di as error "derivative incorrectly specified." + exit + } + if (`deriv'>1&"`transform'"=="T") { + di as error "deriv cannot be greater than 1 if the conditional probability is requested." + exit + } + if (`dotsngrid'<0|`linengrid'<0|`cingrid'<0|`cbngrid'<0|`simsngrid'<0) { + di as error "Number of evaluation points incorrectly specified." + exit + } + if (`level'>100|`level'<0) { + di as error "Confidence level incorrectly specified." + exit + } + if ("`dots_p'"!=".") { + if (`dots_p'<`dots_s') { + di as error "p cannot be smaller than s." + exit + } + if (`dots_p'<`deriv') { + di as error "p for dots cannot be less than deriv." + exit + } + } + if ("`line_p'"!=".") { + if (`line_p'<`line_s') { + di as error "p cannot be smaller than s." + exit + } + if (`line_p'<`deriv') { + di as error "p for line cannot be less than deriv." + exit + } + } + if ("`ci_p'"!=".") { + if (`ci_p'<`ci_s') { + di as error "p cannot be smaller than s." + exit + } + if (`ci_p'<`deriv') { + di as error "p for CI cannot be less than deriv." + exit + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<`cb_s') { + di as error "p cannot be smaller than s." + exit + } + if (`cb_p'<`deriv') { + di as error "p for CB cannot be less than deriv." + exit + } + } + if ("`polyreg'"!="") { + if (`polyreg'<`deriv') { + di as error "polyreg() cannot be less than deriv()." + exit + } + } + + if (`"`savedata'"'!=`""') { + if ("`replace'"=="") { + confirm new file `"`savedata'.dta"' + } + if ("`plot'"!="") { + di as error "Plot cannot be turned off if graph data are requested." + exit + } + } + if (`polyregcingrid'!=0&"`polyreg'"=="") { + di as error "polyreg() is missing." + exit + } + if ("`binsmethod'"!="DPI"&"`binsmethod'"!="ROT") { + di as error "binsmethod incorrectly specified." + exit + } + ******** END error checking *************************** + + * Mark sample + preserve + + * Parse varlist into y_var, x_var and w_var + tokenize `varlist' + fvrevar `1', tsonly + local y_var "`r(varlist)'" + local y_varname "`1'" + fvrevar `2', tsonly + local x_var "`r(varlist)'" + local x_varname "`2'" + + macro shift 2 + local w_var "`*'" + * read eval point for w from another file + if ("`atwout'"=="user") { + append using `at' + } + + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" + local nwvar: word count `w_var' + + * Save the last obs in a vector and then drop it + tempname wuser /* a vector used to keep eval for w */ + if ("`atwout'"=="user") { + mata: st_matrix("`wuser'", st_data(`=_N', "`w_var'")) + qui drop in `=_N' + } + + * Get positions of factor vars + local indexlist "" + local i = 1 + foreach v in `w_var' { + if strpos("`v'", ".") == 0 { + local indexlist `indexlist' `i' + } + local ++i + } + + * add a default for at + if (`"`at'"'==""&`nwvar'>0) { + local at "mean" + } + + marksample touse + markout `touse' `by', strok + qui keep if `touse' + local nsize=_N /* # of rows in the original dataset */ + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") { + di as text in gr "Sorting dataset on `x_varname'..." + di as text in gr "Note: This step is omitted if dataset already sorted by `x_varname'." + sort `x_var', stable + } + local sorted "sorted" + } + + if ("`wtype'"=="f") qui sum `x_var' `wt', meanonly + else qui sum `x_var', meanonly + + local xmin=r(min) + local xmax=r(max) + local Ntotal=r(N) /* total sample size, with wt */ + * define the support of plot + if ("`plotxrange'"!="") { + local xsc `plotxrange' + if (wordcount("`xsc'")==1) local xsc `xsc' `xmax' + } + else local xsc `xmin' `xmax' + + * Effective sample size + local eN=`nsize' + * DO NOT check mass points and clusters outside loop unless needed + + * Check number of unique byvals & create local storing byvals + local byvarname `by' + if "`by'"!="" { + capture confirm numeric variable `by' + if _rc { + local bystring "T" + * generate a numeric version + tempvar by + tempname bylabel + qui egen `by'=group(`byvarname'), lname(`bylabel') + } + + local bylabel `:value label `by'' /* catch value labels for numeric by-vars too */ + + tempname byvalmatrix + qui tab `by', nofreq matrow(`byvalmatrix') + + local bynum=r(r) + forvalues i=1/`bynum' { + local byvals `byvals' `=`byvalmatrix'[`i',1]' + } + } + else local bynum=1 + + * Default colors, symbols, linepatterns + if (`"`bycolors'"'==`""') local bycolors /// + navy maroon forest_green dkorange teal cranberry lavender /// + khaki sienna emidblue emerald brown erose gold bluishgray + if (`"`bysymbols'"'==`""') local bysymbols /// + O D T S + X A a | V o d s t x + if (`"`bylpatterns'"'==`""') { + forval i=1/`bynum' { + local bylpatterns `bylpatterns' solid + } + } + + * Temp name in MATA + tempname xvec yvec byvec cluvec binedges + mata: `xvec'=st_data(., "`x_var'"); `yvec'=st_data(.,"`y_var'"); `byvec'=.; `cluvec'=. + + ******************************************************* + *** Mass point counting ******************************* + tempname Ndistlist Nclustlist mat_imse_var_rot mat_imse_bsq_rot mat_imse_var_dpi mat_imse_bsq_dpi + mat `Ndistlist'=J(`bynum',1,.) + mat `Nclustlist'=J(`bynum',1,.) + * Matrices saving imse + mat `mat_imse_var_rot'=J(`bynum',1,.) + mat `mat_imse_bsq_rot'=J(`bynum',1,.) + mat `mat_imse_var_dpi'=J(`bynum',1,.) + mat `mat_imse_bsq_dpi'=J(`bynum',1,.) + + if (`bynum'>1) mata: `byvec'=st_data(.,"`by'") + if ("`clusterON'"=="T") mata: `cluvec'=st_data(.,"`clustervar'") + + ******************************************************** + ********** Bins, based on FULL sample ****************** + ******************************************************** + * knotlist: inner knot seq; knotlistON: local, knot available before loop + + tempname fullkmat /* matrix name for saving knots based on the full sample */ + + * Extract user-specified knot list + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + capture numlist "`binspos'", ascending + if (_rc==0) { + local knotlistON "T" + local knotlist `binspos' + local nbins: word count `knotlist' + local first: word 1 of `knotlist' + local last: word `nbins' of `knotlist' + if (`first'<=`xmin'|`last'>=`xmax') { + di as error "Inner knots specified out of allowed range." + exit + } + else { + local nbins=`nbins'+1 + local nbins_full `nbins' + local pos "user" + + foreach el of local knotlist { + mat `fullkmat'=(nullmat(`fullkmat') \ `el') + } + mat `fullkmat'=(`xmin' \ `fullkmat' \ `xmax') + } + } + else { + di as error "Numeric list incorrectly specified in binspos()." + exit + } + } + + * Discrete x? + if ("`fewmasspoints'"!="") local fullfewobs "T" + + * Bin selection using the whole sample if + if ("`fullfewobs'"==""&"`selection'"!="NA"&(("`by'"=="")|(("`by'"!="")&("`samebinsby'"!="")))) { + local selectfullON "T" + } + + if ("`selectfullON'"=="T") { + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xvec', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + } + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + local eN=min(`eN', `Nclust') /* effective sample size */ + } + + + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + * Check effective sample size + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used and by() option ignored." + local by "" + local byvals "" + local fullfewobs "T" + local binspos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `Ntotal'>5000) { + local randcut1k=max(5000/`Ntotal', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + if (("`selectfullON'"=="T"|("`selection'"=="NA"&"`samebinsby'"!=""))&"`fullfewobs'"=="") { + * Save in a knot list + local knotlistON "T" + local nbins_full=`nbins' + if ("`binspos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `fullkmat'=(nullmat(`fullkmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else if ("`binspos'"=="QS") { + if (`nbins'==1) mat `fullkmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `wt', nq(`nbins') `usegtools' + mat `fullkmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + + *** Placement name, for display ************ + if ("`pos'"=="user") { + local binselectmethod "User-specified" + local placement "User-specified" + } + else if ("`binspos'"=="ES") { + local placement "Evenly-spaced" + } + else if ("`binspos'"=="QS") { + local placement "Quantile-spaced" + } + + * NOTE: ALL checkings are put within the loop + + * Set seed + if ("`simsseed'"!="") set seed `simsseed' + + * alpha quantile (for two-sided CI) + local alpha=(100-(100-`level')/2)/100 + + *************************************************************************** + *************** Preparation before loop************************************ + *************************************************************************** + + ********** Prepare vars for plotting ******************** + * names for mata objects storing graph data + * plotmat: final output (defined outside); + * plotmatby: output for each group + tempname plotmat plotmatby xsub ysub byindex xcatsub + tempname Xm Xm0 mata_fit mata_se /* temp name for mata obj */ + + * count the number of requested columns, record the positions + local ncolplot=1 /* 1st col reserved for group */ + if ("`plot'"=="") { + if (`dotsntot'!=0) { + local dots_start=`ncolplot'+1 + local dots_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + local line_start=`ncolplot'+1 + local line_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`polyregngrid'!=0) { + local poly_start=`ncolplot'+1 + local poly_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + if (`polyregcingrid'!=0) { + local polyci_start=`ncolplot'+1 + local polyci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + if (`cintot'!=0) { + local ci_start=`ncolplot'+1 + local ci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + local cb_start=`ncolplot'+1 + local cb_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + mata: `plotmat'=J(0,`ncolplot',.) + + * mark the (varying) last row (for plotting) + local bylast=0 + ******************************************************************* + * temp var: bin id + tempvar xcat + qui gen `xcat'=. in 1 + + * matrix names, for returns + tempname Nlist nbinslist cvallist + + * local vars, for plotting + local counter_by=1 + local plotnum=0 /* count the number of series, for legend */ + if ("`by'"=="") local noby="noby" + local byvalnamelist "" /* save group name (value) */ + local plotcmd "" /* plotting cmd */ + + *************************************************************************** + ******************* Now, enter the loop *********************************** + *************************************************************************** + foreach byval in `byvals' `noby' { + local conds "" + if ("`by'"!="") { + local conds "if `by'==`byval'" /* with "if" */ + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + local byvalnamelist `" `byvalnamelist' `"`byvalname'"' "' + } + if (`bynum'>1) { + mata: `byindex'=`byvec':==`byval' + mata: `xsub'=select(`xvec',`byindex'); `ysub'=select(`yvec', `byindex') + } + else { + mata: `xsub'=`xvec'; `ysub'=`yvec' + } + + * Subsample size + if ("`wtype'"=="f") sum `x_var' `conds' `wt', meanonly + else sum `x_var' `conds', meanonly + + local xmin=r(min) + local xmax=r(max) + local N=r(N) + mat `Nlist'=(nullmat(`Nlist') \ `N') + + * Effective sample size + if (`bynum'==1) local eN=`nsize' + else { + if ("`wtype'"!="f") local eN=r(N) + else { + qui count `conds' + local eN=r(N) + } + } + + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xsub', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' `conds' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + mat `Ndistlist'[`counter_by',1]=`Ndist' + } + + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if (`bynum'==1) { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(select(`cluvec', `byindex'))))) + } + else { + qui gunique `clustervar' `conds' + local Nclust=r(unique) + } + } + local eN=min(`eN', `Nclust') /* effective SUBsample size */ + mat `Nclustlist'[`counter_by',1]=`Nclust' + } + + ********************************************************* + ************** Prepare bins, within loop **************** + ********************************************************* + if ("`pos'"!="user") local pos `binspos' /* initialize pos */ + * Selection? + if ("`selection'"!="NA"&"`knotlistON'"!="T"&"`fullfewobs'"=="") { + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used." + local fewobs "T" + local nbins=`eN' + local pos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `N'>5000) { + local randcut1k=max(5000/`N', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`pos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + if ("`selection'"=="NA"|"`knotlistON'"=="T") local nbins=`nbins_full' /* add the universal nbins */ + *if ("`knotlistON'"=="T") local nbins=`nbins_full' + if ("`fullfewobs'"!="") { + local fewobs "T" + local nbins=`eN' + } + + ****************************************************** + * Check effective sample size for each case ********** + ****************************************************** + if ("`fewobs'"!="T") { + if ((`nbins'-1)*(`dots_p'-`dots_s'+1)+`dots_p'+1+`dfcheck_n2'>=`eN') { + local fewobs "T" /* even though ROT available, treat it as few obs case */ + local nbins=`eN' + local pos "QS" + di as text in gr "Warning: Too small effective sample size for dots. # of mass points or clusters used." + } + if ("`line_p'"!=".") { + if ((`nbins'-1)*(`line_p'-`line_s'+1)+`line_p'+1+`dfcheck_n2'>=`eN') { + local line_fewobs "T" + di as text in gr "Warning: Too small effective sample size for line." + } + } + if ("`ci_p'"!=".") { + if ((`nbins'-1)*(`ci_p'-`ci_s'+1)+`ci_p'+1+`dfcheck_n2'>=`eN') { + local ci_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CI." + } + } + if ("`cb_p'"!=".") { + if ((`nbins'-1)*(`cb_p'-`cb_s'+1)+`cb_p'+1+`dfcheck_n2'>=`eN') { + local cb_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CB." + } + } + } + + if ("`polyreg'"!="") { + if (`polyreg'+1>=`eN') { + local polyreg_fewobs "T" + di as text in gr "Warning: Too small effective sample size for polynomial fit." + } + } + + * Generate category variable for data and save knot in matrix + tempname kmat + + if ("`knotlistON'"=="T") { + mat `kmat'=`fullkmat' + if ("`fewobs'"=="T"&"`eN'"!="`Ndist'") { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + else { + if ("`fewmasspoints'"==""&("`fewobs'"!="T"|"`eN'"!="`Ndist'")) { + if ("`pos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + } + + * Renew knot list if few mass points + if (("`fewobs'"=="T"&"`eN'"=="`Ndist'")|"`fewmasspoints'"!="") { + qui tab `x_var' `conds', matrow(`kmat') + if ("`fewmasspoints'"!="") { + local nbins=rowsof(`kmat') + local Ndist=`nbins' + local eN=`Ndist' + } + } + else { + mata: st_matrix("`kmat'", (`xmin' \ uniqrows(st_matrix("`kmat'")[|2 \ `=`nbins'+1'|]))) + if (`nbins'!=rowsof(`kmat')-1) { + di as text in gr "Warning: Repeated knots. Some bins dropped." + local nbins=rowsof(`kmat')-1 + } + + binsreg_irecode `x_var' `conds', knotmat(`kmat') bin(`xcat') /// + `usegtools' nbins(`nbins') pos(`pos') knotliston(`knotlistON') + + mata: `xcatsub'=st_data(., "`xcat'") + if (`bynum'>1) { + mata: `xcatsub'=select(`xcatsub', `byindex') + } + } + + ************************************************* + **** Check for empty bins *********************** + ************************************************* + mata: `binedges'=. /* initialize */ + if ("`fewobs'"!="T"&"`localcheck'"=="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(`xcatsub')))) + if (`nbins'==`Ncat') { + mata: `binedges'=binsreg_uniq(`xsub', `xcatsub', `nbins', "uniqmin") + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins. Specify a smaller number in nbins()." + } + + if ("`dots_p'"!=".") { + if (`uniqmin'<`dots_p'+1) { + local dots_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for dots." + } + } + if ("`line_p'"!=".") { + if (`uniqmin'<`line_p'+1) { + local line_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for line." + } + } + if ("`ci_p'"!=".") { + if (`uniqmin'<`ci_p'+1) { + local ci_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CI." + } + } + if ("`cb_p'"!=".") { + if (`uniqmin'<`cb_p'+1) { + local cb_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CB." + } + } + } + + * Now, save nbins in a list !!! + mat `nbinslist'=(nullmat(`nbinslist') \ `nbins') + + ********************************************************** + **** Count the number of rows needed (within loop!) ****** + ********************************************************** + local byfirst=`bylast'+1 + local byrange=0 + if ("`fewobs'"!="T") { + local dots_nr=`dotsngrid_mean'*`nbins' + if (`dotsngrid'!=0) local dots_nr=`dots_nr'+`dotsngrid'*`nbins'+`nbins'-1 + local ci_nr=`cingrid_mean'*`nbins' + if (`cingrid'!=0) local ci_nr=`ci_nr'+`cingrid'*`nbins'+`nbins'-1 + if (`linengrid'!=0) local line_nr=`linengrid'*`nbins'+`nbins'-1 + if (`cbngrid'!=0) local cb_nr=`cbngrid'*`nbins'+`nbins'-1 + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + local byrange=max(`dots_nr'+0,`line_nr'+0,`ci_nr'+0,`cb_nr'+0, `poly_nr'+0, `polyci_nr'+0) + } + else { + if ("`eN'"=="`Ndist'") { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*(`nbins'-1)+`nbins'-1-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*(`nbins'-1)+`nbins'-1-1 + } + } + else { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + } + local byrange=max(`nbins', `poly_nr'+0, `polyci_nr'+0) + } + local bylast=`bylast'+`byrange' + mata: `plotmatby'=J(`byrange',`ncolplot',.) + if ("`byval'"!="noby") { + mata: `plotmatby'[.,1]=J(`byrange',1,`byval') + } + + ************************************************ + **** START: prepare data for plotting*********** + ************************************************ + local plotcmdby "" + + ******************************** + * adjust w vars + tempname wval + if (`nwvar'>0) { + if (`"`at'"'==`"mean"'|`"`at'"'==`"median"') { + matrix `wval'=J(1, `nwvar', 0) + tempname wvaltemp mataobj + mata: `mataobj'=. + foreach wpos in `indexlist' { + local wname: word `wpos' of `w_var' + if ("`usegtools'"=="") { + if ("`wtype'"!="") qui tabstat `wname' `conds' [aw`exp'], stat(`at') save + else qui tabstat `wname' `conds', stat(`at') save + mat `wvaltemp'=r(StatTotal) + } + else { + qui gstats tabstat `wname' `conds' `wt', stat(`at') matasave("`mataobj'") + mata: st_matrix("`wvaltemp'", `mataobj'.getOutputCol(1)) + } + mat `wval'[1,`wpos']=`wvaltemp'[1,1] + } + mata: mata drop `mataobj' + } + else if (`"`at'"'==`"0"') { + matrix `wval'=J(1,`nwvar',0) + } + else if ("`atwout'"=="user") { + matrix `wval'=`wuser' + } + } + + + ************************************************* + ********** dots and ci for few obs. case ******** + ************************************************* + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"=="T") { + di as text in gr "Warning: dots(0 0) is used." + if (`deriv'>0) di as text in gr "Warning: deriv(0 0) is used." + + local dots_first=`byfirst' + local dots_last=`byfirst'-1+`nbins' + + mata: `plotmatby'[|1,`dots_start'+2 \ `nbins',`dots_start'+2|]=range(1,`nbins',1) + + if ("`eN'"=="`Ndist'") { + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`kmat'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,1) + + * Renew knot commalist, each value forms a group + local xknot "" + forvalues i=1/`nbins' { + local xknot `xknot' `kmat'[`i',1] + } + local xknotcommalist : subinstr local xknot " " ",", all + qui replace `xcat'=1+irecode(`x_var',`xknotcommalist') `conds' + } + else { + tempname grid + mat `grid'=(`kmat'[1..`nbins',1]+`kmat'[2..`nbins'+1,1])/2 + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`grid'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,0) + } + + local nseries=`nbins' + capture logit `y_var' ibn.`xcat' `w_var' `conds' `wt', nocon `vce' `logitopt' + tempname fewobs_b fewobs_V + if (_rc==0) { + mat `fewobs_b'=e(b) + mat `fewobs_V'=e(V) + mata: binsreg_checkdrop("`fewobs_b'", "`fewobs_V'", `nseries') + if (`nwvar'>0) { + mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+(`fewobs_b'[1,`=`nseries'+1'..`=`nseries'+`nwvar'']*`wval'')*J(1,`nseries',1) + } + else { + mat `fewobs_b'=`fewobs_b'[1,1..`nseries'] + } + } + else { + error _rc + exit _rc + } + + if ("`transform'"=="T") { + mata: `plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]=logistic(st_matrix("`fewobs_b'"))' + } + else { + mata: `plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]=st_matrix("`fewobs_b'")' + } + + local plotnum=`plotnum'+1 + local legendnum `legendnum' `plotnum' + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond `plotcond' if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + + if (`cintot'!=0) { + di as text in gr "Warning: ci(0 0) is used." + + if (`nwvar'>0) { + mata: `mata_se'=(I(`nseries'), J(`nseries',1,1)#st_matrix("`wval'")) + } + else { + mata: `mata_se'=I(`nseries') + } + + mata: `plotmatby'[|1,`ci_start'+1 \ `nbins',`ci_start'+2|]=`plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+2|]; /// + `mata_se'=sqrt(rowsum((`mata_se'*st_matrix("`fewobs_V'")):*`mata_se')) + if ("`transform'"=="T") { + mata: `mata_se'=`mata_se':*(logisticden(st_matrix("`fewobs_b'"))') + } + mata: `plotmatby'[|1,`ci_start'+3 \ `nbins',`ci_start'+3|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`ci_start'+4 \ `nbins',`ci_start'+4|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]+`mata_se'*invnormal(`alpha') + mata: mata drop `mata_se' + + local plotnum=`plotnum'+1 + local lty: word `counter_by' of `bylpatterns' + local plotcmdby `plotcmdby' (rcap CI_l CI_r dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + ********************************************* + **** The following handles the usual case *** + ********************************************* + * Turn on or off? + local dotsON "" + local lineON "" + local polyON "" + local ciON "" + local cbON "" + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"!="T"&"`dots_fewobs'"!="T") { + local dotsON "T" + } + if (`linengrid'!=0&"`plot'"==""&"`line_fewobs'"!="T"&"`fewobs'"!="T") { + local lineON "T" + } + if (`polyregngrid'!=0&"`plot'"==""&"`polyreg_fewobs'"!="T") { + local polyON "T" + } + if (`cintot'!=0&"`plot'"==""&"`ci_fewobs'"!="T"&"`fewobs'"!="T") { + local ciON "T" + } + if (`cbngrid'!=0&"`plot'"==""&"`cb_fewobs'"!="T"&"`fewobs'"!="T") { + local cbON "T" + } + + + ************************ + ****** Dots ************ + ************************ + tempname xmean + + if ("`dotsON'"=="T") { + local dots_first=`byfirst' + local dots_last=`byfirst'+`dots_nr'-1 + + * fitting + tempname dots_b dots_V + if (("`dots_p'"=="`ci_p'"&"`dots_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`dots_p'"=="`cb_p'"&"`dots_s'"=="`cb_s'"&"`cbON'"=="T")) { + binslogit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' `usegtools' logitopt(`logitopt') + } + else { + binslogit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' logitopt(`logitopt') + } + + mat `dots_b'=e(bmat) + mat `dots_V'=e(Vmat) + if (`dotsngrid_mean'!=0) mat `xmean'=e(xmat) + + * prediction + if (`dotsngrid_mean'==0) { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binslogit_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binslogit_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'", "`xmean'") + } + + * dots + local plotnum=`plotnum'+1 + if ("`cbON'"=="T") local legendnum `legendnum' `=`plotnum'+1' + else { + local legendnum `legendnum' `plotnum' + } + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + } + + ********************************************** + ********************* Line ******************* + ********************************************** + if ("`lineON'"=="T") { + local line_first=`byfirst' + local line_last=`byfirst'-1+`line_nr' + + * fitting + tempname line_b line_V + capture confirm matrix `dots_b' `dots_V' + if ("`line_p'"=="`dots_p'"& "`line_s'"=="`dots_s'" & _rc==0) { + matrix `line_b'=`dots_b' + matrix `line_V'=`dots_V' + } + else { + if (("`line_p'"=="`ci_p'"&"`line_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`line_p'"=="`cb_p'"&"`line_s'"=="`cb_s'"&"`cbON'"=="T")) { + binslogit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' `usegtools' logitopt(`logitopt') + } + else { + binslogit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' logitopt(`logitopt') + } + mat `line_b'=e(bmat) + mat `line_V'=e(Vmat) + } + + * prediction + mata: `plotmatby'[|1,`line_start' \ `line_nr',`line_end'|] = /// + binslogit_plotmat("`line_b'", "`line_V'", ., "`kmat'", /// + `nbins', `line_p', `line_s', `deriv', /// + "line", `linengrid', "`wval'", `nwvar', "`transform'", "`asyvar'") + + * line + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' line_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &line_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' line_fit>=`min_yr' + else local plotcond `plotcond' &line_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(line_fit<=`max_yr'|line_fit==.) + } + } + + local plotcmdby `plotcmdby' (line line_fit line_x /// + `plotcond' in `line_first'/`line_last', sort cmissing(n) /// + lcolor(`col') lpattern(`lty') `lineplotopt') + + } + + *********************************** + ******* Polynomial fit ************ + *********************************** + if ("`polyON'"=="T") { + if (`nwvar'>0) { + di as text "Note: When additional covariates w are included, the polynomial fit may not always be close to the binscatter fit." + } + + local poly_first=`byfirst' + local poly_last=`byfirst'-1+`poly_nr' + + mata:`plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'+2|]=binsreg_grids("`kmat'",`polyregngrid') + + local poly_series "" + forval i=0/`polyreg' { + tempvar x_var_`i' + qui gen `x_var_`i''=`x_var'^`i' `conds' + local poly_series `poly_series' `x_var_`i'' + } + + capture logit `y_var' `poly_series' `w_var' `conds' `wt', nocon `vce' `logitopt' + * store results + tempname poly_b poly_V poly_adjw + if (_rc==0) { + matrix `poly_b'=e(b) + matrix `poly_V'=e(V) + } + else { + error _rc + exit _rc + } + + * Data for derivative + mata: `Xm'=J(`poly_nr',0,.); `Xm0'=J(`poly_nr',0,.) + forval i=`deriv'/`polyreg' { + mata: `Xm'=(`Xm', /// + `plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata: `Xm'=(J(`poly_nr', `deriv',0), `Xm') + if (`nwvar'>0) { + if (`deriv'==0) mata: `Xm'=(`Xm', J(`poly_nr',1,1)#st_matrix("`wval'")) + else mata: `Xm'=(`Xm', J(`poly_nr',`nwvar',0)) + } + + if ("`transform'"=="T") { + if (`deriv'==0) { + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=logistic(`Xm'*st_matrix("`poly_b'")') + } + else if (`deriv'==1) { + forval i=0/`polyreg' { + mata: `Xm0'=(`Xm0', `plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'|]:^`i') + } + if (`nwvar'>0) mata: `Xm0'=(`Xm0', J(`poly_nr',1,1)#st_matrix("`wval'")) + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=logisticden(`Xm0'*st_matrix("`poly_b'")'):* /// + (`Xm'*st_matrix("`poly_b'")') + } + } + else { + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=`Xm'*st_matrix("`poly_b'")' + } + + mata: mata drop `Xm' `Xm0' + + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' poly_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &poly_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' poly_fit>=`min_yr' + else local plotcond `plotcond' &poly_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &poly_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (line poly_fit poly_x /// + `plotcond' in `poly_first'/`poly_last', /// + sort lcolor(`col') lpattern(`lty') `polyregplotopt') + + * add CI for global poly? + if (`polyregcingrid'!=0) { + local polyci_first=`byfirst' + local polyci_last=`byfirst'-1+`polyci_nr' + + mata: `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'+2|]=binsreg_grids("`kmat'", `polyregcingrid') + + mata: `Xm'=J(`polyci_nr',0,.); `Xm0'=J(`polyci_nr',0,.) + forval i=`deriv'/`polyreg' { + mata:`Xm'=(`Xm', /// + `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata: `Xm'=(J(`polyci_nr', `deriv',0), `Xm') + if (`nwvar'>0) { + if (`deriv'==0) mata: `Xm'=(`Xm', J(`polyci_nr',1,1)#st_matrix("`wval'")) + else mata: `Xm'=(`Xm', J(`polyci_nr',`nwvar',0)) + } + + if ("`transform'"=="T") { + if (`deriv'==0) { + mata: `mata_fit'=logistic(`Xm'*st_matrix("`poly_b'")') + mata: `mata_se'=logisticden(`Xm'*st_matrix("`poly_b'")'):* /// + sqrt(rowsum((`Xm'*st_matrix("`poly_V'")):*`Xm')) + } + else if (`deriv'==1) { + forval i=0/`polyreg' { + mata: `Xm0'=(`Xm0', `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'|]:^`i') + } + if (`nwvar'>0) mata: `Xm0'=(`Xm0', J(`polyci_nr',1,1)#st_matrix("`wval'")) + mata:`mata_fit'=logisticden(`Xm0'*st_matrix("`poly_b'")'):* /// + (`Xm'*st_matrix("`poly_b'")') + + tempname tempobj + mata: `tempobj'=`Xm0'*st_matrix("`poly_b'")'; /// + `tempobj'=logisticden(`tempobj'):*(1:-2*logistic(`tempobj')):*(`Xm'*st_matrix("`poly_b'")'):*`Xm0' + /// + logisticden(`tempobj'):*`Xm'; /// + `mata_se'=sqrt(rowsum((`tempobj'*st_matrix("`poly_V'")):*`tempobj')) + mata: mata drop `tempobj' + } + } + else { + mata: `mata_fit'=`Xm'*st_matrix("`poly_b'")'; /// + `mata_se'=sqrt(rowsum((`Xm'*st_matrix("`poly_V'")):*`Xm')) + } + + mata:`plotmatby'[|1,`polyci_start'+3 \ `polyci_nr',`polyci_start'+3|]=`mata_fit'-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`polyci_start'+4 \ `polyci_nr',`polyci_start'+4|]=`mata_fit'+`mata_se'*invnormal(`alpha'); /// + `plotmatby'[selectindex(`plotmatby'[,`=`polyci_start'+1']:==1),(`=`polyci_start'+3',`=`polyci_start'+4')]=J(`=`nbins'-1',2,.) + + mata: mata drop `Xm' `Xm0' `mata_fit' `mata_se' + + * poly ci + local plotnum=`plotnum'+1 + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' polyCI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &polyCI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' polyCI_l>=`min_yr' + else local plotcond `plotcond' &polyCI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &polyCI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap polyCI_l polyCI_r polyCI_x /// + `plotcond' in `polyci_first'/`polyci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + + ********************************** + ******* Confidence Interval ****** + ********************************** + if ("`ciON'"=="T") { + local ci_first=`byfirst' + local ci_last=`byfirst'-1+`ci_nr' + + * fitting + tempname ci_b ci_V + capture confirm matrix `line_b' `line_V' + if ("`ci_p'"=="`line_p'"& "`ci_s'"=="`line_s'" & _rc==0) { + matrix `ci_b'=`line_b' + matrix `ci_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`ci_p'"=="`dots_p'"& "`ci_s'"=="`dots_s'" & _rc==0) { + matrix `ci_b'=`dots_b' + matrix `ci_V'=`dots_V' + } + } + + capture confirm matrix `ci_b' `ci_V' `xmean' + if (_rc!=0) { + binslogit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`ci_p') s(`ci_s') type(ci) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`cingrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' logitopt(`logitopt') + + mat `ci_b'=e(bmat) + mat `ci_V'=e(Vmat) + mat `xmean'=e(xmat) + } + + * prediction + if (`cingrid_mean'==0) { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binslogit_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', "`transform'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binslogit_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'", "`xmean'") + } + + * ci + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CI_l>=`min_yr' + else local plotcond `plotcond' &CI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &CI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap CI_l CI_r CI_x /// + `plotcond' in `ci_first'/`ci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + + } + + ******************************* + ***** Confidence Band ********* + ******************************* + tempname cval + scalar `cval'=. + if ("`cbON'"=="T") { + if (`nsims'<2000|`simsgrid'<50) { + di as text "Note: A larger number random draws/evaluation points is recommended to obtain the final results." + } + * Prepare grid for plotting + local cb_first=`byfirst' + local cb_last=`byfirst'-1+`cb_nr' + + * fitting + tempname cb_b cb_V + capture confirm matrix `ci_b' `ci_V' + if ("`cb_p'"=="`ci_p'"& "`cb_s'"=="`ci_s'" & _rc==0) { + matrix `cb_b'=`ci_b' + matrix `cb_V'=`ci_V' + } + else { + capture confirm matrix `line_b' `line_V' + if ("`cb_p'"=="`line_p'"& "`cb_s'"=="`line_s'" & _rc==0) { + matrix `cb_b'=`line_b' + matrix `cb_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`cb_p'"=="`dots_p'"& "`cb_s'"=="`dots_s'" & _rc==0) { + matrix `cb_b'=`dots_b' + matrix `cb_V'=`dots_V' + } + else { + binslogit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`cb_p') s(`cb_s') type(cb) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' logitopt(`logitopt') + mat `cb_b'=e(bmat) + mat `cb_V'=e(Vmat) + } + } + } + + * Compute critical values + * Prepare grid for simulation + local uni_last=`simsngrid'*`nbins'+`nbins'-1 + local nseries=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + + tempname cb_basis + mata: `cb_basis'=binsreg_grids("`kmat'", `simsngrid'); /// + `cb_basis'=binsreg_spdes(`cb_basis'[,1], "`kmat'", `cb_basis'[,3], `cb_p', `deriv', `cb_s'); /// + `Xm'=binsreg_pred(`cb_basis', st_matrix("`cb_b'")[|1 \ `nseries'|]', /// + st_matrix("`cb_V'")[|1,1 \ `nseries',`nseries'|], "all"); /// + binsreg_pval(`cb_basis', `Xm'[,2], "`cb_V'", ".", `nsims', `nseries', "two", `=`level'/100', ".", "`cval'", "inf") + mata: mata drop `cb_basis' `Xm' + + * prediction + mata: `plotmatby'[|1,`cb_start' \ `cb_nr',`cb_end'|] = /// + binslogit_plotmat("`cb_b'", "`cb_V'", /// + `=`cval'', "`kmat'", /// + `nbins', `cb_p', `cb_s', `deriv', /// + "cb", `cbngrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'") + + * cb + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CB_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CB_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CB_l>=`min_yr' + else local plotcond `plotcond' &CB_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(CB_r<=`max_yr'|CB_r==.) + } + } + + local plotcmdby (rarea CB_l CB_r CB_x /// + `plotcond' in `cb_first'/`cb_last', sort cmissing(n) /// + lcolor(none%0) fcolor(`col'%50) fintensity(50) `cbplotopt') `plotcmdby' + } + mat `cvallist'=(nullmat(`cvallist') \ `cval') + + local plotcmd `plotcmd' `plotcmdby' + mata: `plotmat'=(`plotmat' \ `plotmatby') + + ********************************* + **** display ******************** + ********************************* + di "" + * Plotting + if ("`plot'"=="") { + if (`counter_by'==1) { + di in smcl in gr "Binscatter plot, logit model" + di in smcl in gr "Bin selection method: `binselectmethod'" + di in smcl in gr "Placement: `placement'" + di in smcl in gr "Derivative: `deriv'" + if (`"`savedata'"'!=`""') { + di in smcl in gr `"Output file: `savedata'.dta"' + } + } + di "" + if ("`by'"!="") { + di in smcl in gr "Group: `byvarname' = " in yellow "`byvalname'" + } + di in smcl in gr "{hline 30}{c TT}{hline 15}" + di in smcl in gr "{lalign 1:# of observations}" _col(30) " {c |} " _col(32) as result %7.0f `N' + di in smcl in gr "{lalign 1:# of distinct values}" _col(30) " {c |} " _col(32) as result %7.0f `Ndist' + di in smcl in gr "{lalign 1:# of clusters}" _col(30) " {c |} " _col(32) as result %7.0f `Nclust' + di in smcl in gr "{hline 30}{c +}{hline 15}" + di in smcl in gr "{lalign 1:Bin/Degree selection:}" _col(30) " {c |} " + if ("`selection'"=="P") { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `binsp' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `binss' + } + else { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `dots_p' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `dots_s' + } + + di in smcl in gr "{ralign 29:# of bins}" _col(30) " {c |} " _col(32) as result %7.0f `nbins' + if ("`binselectmethod'"!="User-specified") { + if ("`binsmethod'"=="ROT") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_rot'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_rot'[`counter_by',1]' + } + else if ("`binsmethod'"=="DPI") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_dpi'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_dpi'[`counter_by',1]' + } + } + di in smcl in gr "{hline 30}{c BT}{hline 15}" + di "" + di in smcl in gr "{hline 9}{c TT}{hline 30}" + di in smcl _col(10) "{c |}" in gr _col(17) "p" _col(25) "s" _col(33) "df" + di in smcl in gr "{hline 9}{c +}{hline 30}" + if (`dotsntot'!=0) { + local dots_df=(`dots_p'-`dots_s'+1)*(`nbins'-1)+`dots_p'+1 + di in smcl in gr "{lalign 1: dots}" _col(10) "{c |}" in gr _col(17) "`dots_p'" _col(25) "`dots_s'" _col(33) "`dots_df'" + } + if ("`lineON'"=="T") { + local line_df=(`line_p'-`line_s'+1)*(`nbins'-1)+`line_p'+1 + di in smcl in gr "{lalign 1: line}" _col(10) "{c |}" in gr _col(17) "`line_p'" _col(25) "`line_s'" _col(33) "`line_df'" + } + if (`cintot'!=0) { + local ci_df=(`ci_p'-`ci_s'+1)*(`nbins'-1)+`ci_p'+1 + di in smcl in gr "{lalign 1: CI}" _col(10) "{c |}" in gr _col(17) "`ci_p'" _col(25) "`ci_s'" _col(33) "`ci_df'" + } + if ("`cbON'"=="T") { + local cb_df=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + di in smcl in gr "{lalign 1: CB}" _col(10) "{c |}" in gr _col(17) "`cb_p'" _col(25) "`cb_s'" _col(33) "`cb_df'" + } + if ("`polyON'"=="T") { + local poly_df=`polyreg'+1 + di in smcl in gr "{lalign 1: polyreg}" _col(10) "{c |}" in gr _col(17) "`polyreg'" _col(25) "NA" _col(33) "`poly_df'" + } + di in smcl in gr "{hline 9}{c BT}{hline 30}" + } + + + mata: mata drop `plotmatby' + local ++counter_by + } + mata: mata drop `xsub' `ysub' `binedges' + if (`bynum'>1) mata: mata drop `byindex' + capture mata: mata drop `xcatsub' + ****************** END loop **************************************** + ******************************************************************** + + + + ******************************************* + *************** Plotting ****************** + ******************************************* + clear + if ("`plotcmd'"!="") { + * put data back to STATA + mata: st_local("nr", strofreal(rows(`plotmat'))) + qui set obs `nr' + + * MAKE SURE the orderings match + qui gen group=. in 1 + if (`dotsntot'!=0) { + qui gen dots_x=. in 1 + qui gen dots_isknot=. in 1 + qui gen dots_binid=. in 1 + qui gen dots_fit=. in 1 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + qui gen line_x=. in 1 + qui gen line_isknot=. in 1 + qui gen line_binid=. in 1 + qui gen line_fit=. in 1 + } + if (`polyregngrid'!=0) { + qui gen poly_x=. in 1 + qui gen poly_isknot=. in 1 + qui gen poly_binid=. in 1 + qui gen poly_fit=. in 1 + if (`polyregcingrid'!=0) { + qui gen polyCI_x=. in 1 + qui gen polyCI_isknot=. in 1 + qui gen polyCI_binid=. in 1 + qui gen polyCI_l=. in 1 + qui gen polyCI_r=. in 1 + } + } + if (`cintot'!=0) { + qui gen CI_x=. in 1 + qui gen CI_isknot=. in 1 + qui gen CI_binid=. in 1 + qui gen CI_l=. in 1 + qui gen CI_r=. in 1 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + qui gen CB_x=. in 1 + qui gen CB_isknot=. in 1 + qui gen CB_binid=. in 1 + qui gen CB_l=. in 1 + qui gen CB_r=. in 1 + } + + mata: st_store(.,.,`plotmat') + + * Legend + local plot_legend legend(order( + if ("`by'"!=""&`dotsntot'!=0) { + forval i=1/`bynum' { + local byvalname: word `i' of `byvalnamelist' + local plot_legend `plot_legend' `: word `i' of `legendnum'' "`byvarname'=`byvalname'" + } + local plot_legend `plot_legend' )) + } + else { + local plot_legend legend(off) + } + + * Plot it + local graphcmd twoway `plotcmd', xtitle(`x_varname') ytitle(`y_varname') xscale(range(`xsc')) `plot_legend' `options' + `graphcmd' + } + mata: mata drop `plotmat' `xvec' `yvec' `byvec' `cluvec' + + + * Save graph data ? + * In the normal case + if (`"`savedata'"'!=`""'&`"`plotcmd'"'!=`""') { + * Add labels + if ("`by'"!="") { + if ("`bystring'"=="T") { + label val group `bylabel' + decode group, gen(`byvarname') + } + else { + qui gen `byvarname'=group + if ("`bylabel'"!="") label val `byvarname' `bylabel' + } + label var `byvarname' "Group" + qui drop group + order `byvarname' + } + else qui drop group + + capture confirm variable dots_x dots_binid dots_isknot dots_fit + if (_rc==0) { + label var dots_x "Dots: grid" + label var dots_binid "Dots: indicator of bins" + label var dots_isknot "Dots: indicator of inner knot" + label var dots_fit "Dots: fitted values" + } + capture confirm variable line_x line_binid line_isknot line_fit + if (_rc==0) { + label var line_x "Line: grid" + label var line_binid "Line: indicator of bins" + label var line_isknot "Line: indicator of inner knot" + label var line_fit "Line: fitted values" + } + capture confirm variable poly_x poly_binid poly_isknot poly_fit + if (_rc==0) { + label var poly_x "Poly: grid" + label var poly_binid "Poly: indicator of bins" + label var poly_isknot "Poly: indicator of inner knot" + label var poly_fit "Poly: fitted values" + } + capture confirm variable polyCI_x polyCI_binid polyCI_isknot polyCI_l polyCI_r + if (_rc==0) { + label var polyCI_x "Poly confidence interval: grid" + label var polyCI_binid "Poly confidence interval: indicator of bins" + label var polyCI_isknot "Poly confidence interval: indicator of inner knot" + label var polyCI_l "Poly confidence interval: left boundary" + label var polyCI_r "Poly confidence interval: right boundary" + } + capture confirm variable CI_x CI_binid CI_isknot CI_l CI_r + if (_rc==0) { + label var CI_x "Confidence interval: grid" + label var CI_binid "Confidence interval: indicator of bins" + label var CI_isknot "Confidence interval: indicator of inner knot" + label var CI_l "Confidence interval: left boundary" + label var CI_r "Confidence interval: right boundary" + } + capture confirm variable CB_x CB_binid CB_isknot CB_l CB_r + if (_rc==0) { + label var CB_x "Confidence band: grid" + label var CB_binid "Confidence band: indicator of bins" + label var CB_isknot "Confidence band: indicator of inner knot" + label var CB_l "Confidence band: left boundary" + label var CB_r "Confidence band: right boundary" + } + qui save `"`savedata'"', `replace' + } + *************************************************************************** + + ********************************* + ********** Return *************** + ********************************* + ereturn clear + * # of observations + ereturn scalar N=`Ntotal' + * Options + ereturn scalar level=`level' + ereturn scalar dots_p=`dots_p' + ereturn scalar dots_s=`dots_s' + ereturn scalar line_p=`line_p' + ereturn scalar line_s=`line_s' + ereturn scalar ci_p=`ci_p' + ereturn scalar ci_s=`ci_s' + ereturn scalar cb_p=`cb_p' + ereturn scalar cb_s=`cb_s' + * by group: + *ereturn matrix knot=`kmat' + ereturn matrix cval_by=`cvallist' + ereturn matrix nbins_by=`nbinslist' + ereturn matrix Nclust_by=`Nclustlist' + ereturn matrix Ndist_by=`Ndistlist' + ereturn matrix N_by=`Nlist' + + ereturn matrix imse_var_rot=`mat_imse_var_rot' + ereturn matrix imse_bsq_rot=`mat_imse_bsq_rot' + ereturn matrix imse_var_dpi=`mat_imse_var_dpi' + ereturn matrix imse_bsq_dpi=`mat_imse_bsq_dpi' +end + +* Helper commands +* Estimation +program define binslogit_fit, eclass + version 13 + syntax varlist(min=2 numeric ts fv) [if] [in] [fw aw pw] [, deriv(integer 0) /// + p(integer 0) s(integer 0) type(string) vce(passthru) /// + xcat(varname numeric) kmat(name) dotsmean(integer 0) /// /* xmean: report x-mean? */ + xname(name) yname(name) catname(name) edge(name) /// + usereg sorted usegtools logitopt(string asis)] /* usereg: force the command to use reg; sored: sorted data? */ + + preserve + marksample touse + qui keep if `touse' + + if ("`weight'"!="") local wt [`weight'`exp'] + + tokenize `varlist' + local y_var `1' + local x_var `2' + macro shift 2 + local w_var "`*'" + local nbins=rowsof(`kmat')-1 + + tempname matxmean temp_b temp_V + mat `matxmean'=. + mat `temp_b'=. + mat `temp_V'=. + + if (`dotsmean'!=0) { + if ("`sorted'"==""|"`weight'"!=""|"`usegtools'"!="") { + if ("`usegtools'"=="") { + tempfile tmpfile + qui save `tmpfile', replace + + collapse (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + + use `tmpfile', clear + } + else { + tempname obj + qui gstats tabstat `x_var' `wt', stats(mean) by(`xcat') matasave("`obj'") + mata: st_matrix("`matxmean'", (`obj'.getnum(.,1), `obj'.getOutputVar("`x_var'"))) + mata: mata drop `obj' + } + } + else { + tempname output + mata: `output'=binsreg_stat(`xname', `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`matxmean'", `output') + mata: mata drop `output' + } + } + + * Regression? + if (`p'==0) { + capture logit `y_var' ibn.`xcat' `w_var' `wt', nocon `vce' `logitopt' + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + } + else { + error _rc + exit _rc + } + } + else { + local nseries=(`p'-`s'+1)*(`nbins'-1)+`p'+1 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`xname', "`series'", "`kmat'", `catname', `p', 0, `s') + + capture logit `y_var' `series' `w_var' `wt', nocon `vce' `logitopt' + * store results + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nseries') + } + else { + error _rc + exit _rc + } + } + + + ereturn clear + ereturn matrix bmat=`temp_b' + ereturn matrix Vmat=`temp_V' + ereturn matrix xmat=`matxmean' /* xcat, xbar */ +end + +mata: + + // Prediction for plotting + real matrix binslogit_plotmat(string scalar eb, string scalar eV, real scalar cval, /// + string scalar knotname, real scalar J, /// + real scalar p, real scalar s, real scalar deriv, /// + string scalar type, real scalar ngrid, string scalar muwmat, /// + real scalar nw, string scalar transform, string scalar avar, | string scalar muxmat) + { + real matrix coef, bmat, rmat, vmat, knot, xmean, wval, eval, out, fit, fit0, se, semat, Xm, Xm0, result + real scalar nseries + + nseries=(p-s+1)*(J-1)+p+1 + coef=st_matrix(eb)' + bmat=coef[|1\nseries|] + if (nw>0) rmat=coef[|(nseries+1)\rows(coef)|] + + if (type=="ci"|type=="cb") { + vfull=st_matrix(eV) + vmat=vfull[|1,1\nseries,nseries|] + } + + // Prepare evaluation points + eval=J(0,3,.) + if (args()==15) { + xmean=st_matrix(muxmat) + eval=(eval \ (xmean[,2], J(J, 1, 0), xmean[,1])) + } + if (ngrid!=0) { + eval=(eval \ binsreg_grids(knotname, ngrid)) + } + + // adjust w variables + if (nw>0) { + wvec=st_matrix(muwmat) + wval=wvec*rmat + } + else wval=0 + + fit=J(0,1,.) + se=J(0,1,.) + if (p==0) { + if (args()==15) fit=(fit \ bmat) + if (ngrid!=0) { + fit=(fit \ (bmat#(J(ngrid,1,1)\.))) + fit=fit[|1 \ (rows(fit)-1)|] + } + if (type=="ci"|type=="cb") { + if (avar=="on") semat=sqrt(diagonal(vmat)) + else { + if (nw>0) { + Xm=(I(nseries), J(nseries,1,1)#wvec) + semat=sqrt(rowsum((Xm*vfull):*Xm)) + } + else semat=sqrt(diagonal(vmat)) + } + if (args()==15) se=(se \ semat) + if (ngrid!=0) { + se=(se \ (semat#(J(ngrid,1,1)\.))) + se=se[|1 \ (rows(se)-1)|] + } + } + if (type=="dots"|type=="line") { + if (transform=="T") out=(eval, logistic(fit:+wval)) + else out=(eval, fit:+wval) + } + else { + if (transform=="T") out=(eval, logistic(fit:+wval)-(logisticden(fit:+wval):*se)*cval, /// + logistic(fit:+wval)+(logisticden(fit:+wval):*se)*cval) + else out=(eval, (fit:+wval)-se*cval, (fit:+wval)+se*cval) + } + } + else { + Xm=binsreg_spdes(eval[,1], knotname, eval[,3], p, deriv, s) + if (type=="dots"|type=="line") { + if (transform=="T") { + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + if (deriv==0) { + fit=logistic(fit:+wval) + } + if (deriv==1) { + Xm0=binsreg_spdes(eval[,1], knotname, eval[,3], p, 0, s) + fit0=binsreg_pred(Xm0, bmat, ., "xb")[,1] + fit=logisticden(fit0:+wval):*fit + } + out=(eval, fit) + } + else { + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + if (deriv==0) out=(eval, fit:+wval) + else out=(eval, fit) + } + } + else { + if (avar=="on") { + result=binsreg_pred(Xm, bmat, vmat, "all") + if (transform=="T") { + Xm0=binsreg_spdes(eval[,1], knotname, eval[,3], p, 0, s) + fit0=binsreg_pred(Xm0, bmat, ., "xb")[,1] + result[,2]=logisticden(fit0:+wval):*result[,2] + + if (deriv==0) { + result[,1]=logistic(result[,1]:+wval) + } + else if (deriv==1) { + result[,1]=logisticden(fit0:+wval):*result[,1] + } + + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + else { + if (deriv==0) out=(eval, (result[,1]:+wval)-cval*result[,2], (result[,1]:+wval)+cval*result[,2]) + else out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + else { + result=binsreg_pred(Xm, bmat, vmat, "all") + if (transform=="T") { + if (deriv==0) { + if (nw>0) Xm=(Xm, J(rows(Xm),1,1)#wvec) + result[,2]=logisticden(result[,1]:+wval):*sqrt(rowsum((Xm*vfull):*Xm)) + result[,1]=logistic(result[,1]:+wval) + } + if (deriv==1) { + Xm0=binsreg_spdes(eval[,1], knotname, eval[,3], p, 0, s) + if (nw>0) { + Xm0=(Xm0, J(rows(Xm0),1,1)#wvec) + Xm=(Xm, J(rows(Xm),nw,0)) + } + fit0=binsreg_pred(Xm0, coef, ., "xb")[,1] + Xm=logisticden(fit0):*(1:-2*logistic(fit0)):*result[,1]:*Xm0 + /// + logisticden(fit0):*Xm + result[,2]=sqrt(rowsum((Xm*vfull):*Xm)) + result[,1]=logisticden(fit0):*result[,1] + } + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + else { + if (nw>0) { + if (deriv==0) Xm=(Xm, J(rows(Xm),1,1)#wvec) + else Xm=(Xm, J(rows(Xm),nw,0)) + } + result=binsreg_pred(Xm, coef, vfull, "all") + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + } + } + + if (type=="dots"|(type=="line"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4]=J(sum(out[,2]),1,.) + } + if (type=="ci"|(type=="cb"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4..5]=J(sum(out[,2]),2,.) + } + + return(out) + } + + +end + diff --git a/110/replication_package/replication/ado/plus/b/binslogit.sthlp b/110/replication_package/replication/ado/plus/b/binslogit.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..dca448f06c8905aa3993a6f1c8290205d8d29a6f --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binslogit.sthlp @@ -0,0 +1,427 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binslogit##syntax"}{...} +{viewerjumpto "Description" "binslogit##description"}{...} +{viewerjumpto "Options" "binslogit##options"}{...} +{viewerjumpto "Examples" "binslogit##examples"}{...} +{viewerjumpto "Stored results" "binslogit##stored_results"}{...} +{viewerjumpto "References" "binslogit##references"}{...} +{viewerjumpto "Authors" "binslogit##authors"}{...} +{cmd:help binslogit} +{hline} + +{title:Title} + +{p 4 8}{hi:binslogit} {hline 2} Data-Driven Binscatter Logit Estimation with Robust Inference Procedures and Plots.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 14} {cmdab:binslogit} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} [ {cmd:,} {opt deriv(v)} {opt at(position)} {opt nolink}{p_end} +{p 14 14} {opt dots(dotsopt)} {opt dotsgrid(dotsgridoption)} {opt dotsplotopt(dotsoption)}{p_end} +{p 14 14} {opt line(lineopt)} {opt linegrid(#)} {opt lineplotopt(lineoption)}{p_end} +{p 14 14} {opt ci(ciopt)} {opt cigrid(cigridoption)} {opt ciplotopt(rcapoption)}{p_end} +{p 14 14} {opt cb(cbopt)} {opt cbgrid(#)} {opt cbplotopt(rareaoption)}{p_end} +{p 14 14} {opt polyreg(p)} {opt polyreggrid(#)} {opt polyregcigrid(#)} {opt polyregplotopt(lineoption)}{p_end} +{p 14 14} {opth by(varname)} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)}{p_end} +{p 14 14} {opt nbins(nbinsopt)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt samebinsby} {opt randcut(#)}{p_end} +{p 14 14} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 14 14} {opt nsims(#)} {opt simsgrid(#)} {opt simsseed(seed)}{p_end} +{p 14 14} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 14 14} {cmd:vce(}{it:{help vcetype}}{cmd:)} {opt asyvar(on/off)}{p_end} +{p 14 14} {opt level(level)} {opt logitopt(logit_option)} {opt usegtools(on/off)} {opt noplot} {opt savedata(filename)} {opt replace}{p_end} +{p 14 14} {opt plotxrange(min max)} {opt plotyrange(min max)} {it:{help twoway_options}} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} {opt fweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binslogit} implements binscatter logit estimation with robust inference procedures and plots, following the results in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":Cattaneo, Crump, Farrell and Feng (2022a)}. +Binscatter provides a flexible way of describing the mean relationship between two variables, after possibly adjusting for other covariates, based on partitioning/binning of the independent variable of interest. +The main purpose of this command is to generate binned scatter plots with curve estimation with robust pointwise confidence intervals and uniform confidence band. +If the binning scheme is not set by the user, the companion command {help binsregselect:binsregselect} is used to implement binscatter +in a data-driven way. +Hypothesis testing for parametric specifications of and shape restrictions on the regression function can be conducted via the +companion command {help binstest:binstest}. Hypothesis testing for pairwise group comparisons can be conducted via the +companion command {help binspwc: binspwc}. Binscatter estimation based on the least squares method can be conducted via the command {help binsreg: binsreg}. +{p_end} + +{p 4 8} A detailed introduction to this command is given in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Cattaneo, Crump, Farrell and Feng (2022b)}. +Companion R and Python packages with the same capabilities are available (see website below). +{p_end} + +{p 4 8} Companion commands: {help binstest:binstest} for hypothesis testing of parametric specifications and shape restrictions, +{help binspwc:binspwc} for hypothesis testing for pairwise group comparisons, +and {help binsregselect:binsregselect} for data-driven binning selection. +{p_end} + +{p 4 8} Related Stata, R and Python packages are available in the following website:{p_end} + +{p 8 8} {browse "https://nppackages.github.io/":https://nppackages.github.io/}{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself. +{p_end} + +{p 4 8} {opt at(position)} specifies the values of {it:othercovs} at which the estimated function is evaluated for plotting. +The default is {cmd:at(mean)}, which corresponds to the mean of {it:othercovs}. Other options are: {cmd:at(median)} for the median of {it:othercovs}, +{cmd:at(0)} for zeros, and {cmd:at(filename)} for particular values of {it:othercovs} saved in another file. +{p_end} + +{p 4 8} Note: When {cmd:at(mean)} or {cmd:at(median)} is specified, all factor variables in {it:othercovs} (if specified) are excluded from the evaluation (set as zero). +{p_end} + +{p 4 8}{opt nolink} specifies that the function within the inverse link (logistic) function be reported instead of the conditional probability function. +{p_end} + +{dlgtab:Dots} + +{p 4 8} {opt dots(dotsopt)} sets the degree of polynomial and the number of smoothness for point estimation and plotting as "dots". +If {cmd:dots(p s)} is specified, a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints is used. +The default is {cmd:dots(0 0)}, which corresponds to piecewise constant (canonical binscatter). +If {cmd:dots(T)} is specified, the default {cmd:dots(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:dots(F)} is specified, the dots are not included in the plot. +{p_end} + +{p 4 8} {opt dotsgrid(dotsgridoption)} specifies the number and location of dots within each bin to be plotted. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt dotsgrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt dotsgrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt dotsgrid(mean 5)} generates six evaluation points +within each bin containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +Given this choice, the dots are point estimates evaluated over the selected grid within each bin. +The default is {opt dotsgrid(mean)}, which corresponds to one dot per bin evaluated at the sample average of {it:indvar} within each bin (canonical binscatter). +{p_end} + +{p 4 8} {opt dotsplotopt(dotsoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the plotted dots. +{p_end} + +{dlgtab:Line} + +{p 4 8} {opt line(lineopt)} sets the degree of polynomial and the number of smoothness constraints +for plotting as a "line". If {cmd:line(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:line(T)} is specified, {cmd:line(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:line(F)} or {cmd:line()} is specified, the line is not included in the plot. +The default is {cmd:line()}. +{p_end} + +{p 4 8} {opt linegrid(#)} specifies the number of evaluation points of an evenly-spaced grid within +each bin used for evaluation of the point estimate set by the {cmd:line(p s)} option. +The default is {cmd:linegrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for fitting/plotting the line. +{p_end} + +{p 4 8} {opt lineplotopt(lineoption)} standard graphs options to be passed on to +the {help twoway:twoway} command to modify the appearance of the plotted line. +{p_end} + +{dlgtab:Confidence Intervals} + +{p 4 8} {opt ci(ciopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing confidence intervals. If {cmd:ci(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:ci(T)} is specified, {cmd:ci(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:ci(F)} or {cmd:ci()} is specified, the confidence intervals are not included in the plot. +The default is {cmd:ci()}. +{p_end} + +{p 4 8} {opt cigrid(cigridoption)} specifies the number and location of evaluation points in the grid +used to construct the confidence intervals set by the {opt ci(p s)} option. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt cigrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt cigrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt cigrid(mean 5)} generates six evaluation points within each bin containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +The default is {opt cigrid(mean)}, which corresponds to one evaluation point set at the sample average of {it:indvar} within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt ciplotopt(rcapoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the confidence intervals. +{p_end} + +{dlgtab:Confidence Band} + +{p 4 8} {opt cb(cbopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing the confidence band. If {cmd:cb(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If the option {cmd:cb(T)} is specified, {cmd:cb(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:cb(F)} or {cmd:cb()} is specified, the confidence band is not included in the plot. +The default is {cmd:cb()}. +{p_end} + +{p 4 8} {opt cbgrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin +used for evaluation of the point estimate set by the {cmd:cb(p s)} option. +The default is {cmd:cbgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence band construction. +{p_end} + +{p 4 8} {opt cbplotopt(rareaoption)} standard graphs options to be passed on to +the {help twoway:twoway} command to modify the appearance of the confidence band. +{p_end} + +{dlgtab:Global Polynomial Regression} + +{p 4 8} {opt polyreg(p)} sets the degree {it:p} of a global polynomial regression model for plotting. +By default, this fit is not included in the plot unless explicitly specified. +Recommended specification is {cmd:polyreg(3)}, which adds a cubic polynomial fit of the regression function of interest to the binned scatter plot. +{p_end} + +{p 4 8} {opt polyreggrid(#)} specifies the number of evaluation points of an evenly-spaced grid +within each bin used for evaluation of the point estimate set by the {cmd:polyreg(p)} option. +The default is {cmd:polyreggrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt polyregcigrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for constructing confidence intervals based on polynomial regression set by the {cmd:polyreg(p)} option. +The default is {cmd:polyregcigrid(0)}, which corresponds to not plotting confidence intervals for the global polynomial regression approximation. +{p_end} + +{p 4 8} {opt polyregplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the global polynomial regression fit. +{p_end} + +{dlgtab:Subgroup Analysis} + +{p 4 8} {opt by(varname)} specifies the variable containing the group indicator to perform subgroup analysis; +both numeric and string variables are supported. +When {opt by(varname)} is specified, {cmdab:binslogit} implements estimation and inference for each subgroup separately, +but produces a common binned scatter plot. +By default, the binning structure is selected for each subgroup separately, +but see the option {cmd:samebinsby} below for imposing a common binning structure across subgroups. +{p_end} + +{p 4 8} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors +for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} specifies an ordered list of symbols +for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)} specifies an ordered list of line patterns +for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt nbins(nbinsopt)} sets the number of bins for partitioning/binning of {it:indvar}. +If {cmd:nbins(T)} or {cmd:nbins()} (default) is specified, the number of bins is selected via the companion command {help binsregselect:binsregselect} +in a data-driven, optimal way whenever possible. If a {help numlist:numlist} with more than one number is specified, +the number of bins is selected within this list via the companion command {help binsregselect:binsregselect}. +{p_end} + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other options are: {cmd:es} for evenly-spaced binning, or a {help numlist} for manual specification of +the positions of inner knots (which must be within the range of {it:indvar}). +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins via the companion command {help binsregselect:binsregselect}. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt samebinsby} forces a common partitioning/binning structure across all subgroups specified by the option {cmd:by()}. +The knots positions are selected according to the option {cmd:binspos()} and using the full sample. +If {cmd:nbins()} is not specified, then the number of bins is selected via the companion command +{help binsregselect:binsregselect} and using the full sample. +{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample +for bins/degree/smoothness selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. +By default, max(5,000, 0.01n) observations are used if the samples size n>5,000. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} for +point estimation is selected. Piecewise polynomials of the selected optimal degree {it:p} +are used to construct dots or line if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials of degree {it:p+1} are used to construct confidence intervals +or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which +the number of smoothness constraints {it:s} +for point estimation. Piecewise polynomials with the selected optimal +{it:s} smoothness constraints are used to construct dots or line +if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials with {it:s+1} constraints are used to construct +confidence intervals or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:nbins(#)} must be specified. +{p_end} + +{dlgtab:Simulation} + +{p 4 8} {opt nsims(#)} specifies the number of random draws for constructing confidence bands. +The default is {cmd:nsims(500)}, which corresponds to 500 draws from a standard Gaussian random vector of size [(p+1)*J - (J-1)*s]. +A large number of random draws is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid +within each bin used for evaluation of the supremum operation needed to construct confidence bands. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin +for approximating the supremum operator. +A large number of evaluation points is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsseed(#)} sets the seed for simulations. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, +which take into account the number of unique values of {it:indvar} (i.e., adjusting for the number of mass points), +number of clusters, and degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Standard Error} + +{p 4 8} {cmd:vce(}{it:{help vcetype}}{cmd:)} specifies the {it:vcetype} for variance estimation used by +the command {help logit##options:logit}. +The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8} {opt asyvar(on/off)} specifies the method used to compute standard errors. +If {cmd:asyvar(on)} is specified, the standard error of the nonparametric component is used and the uncertainty related to other control variables {it:othercovs} is omitted. +Default is {cmd:asyvar(off)}, that is, the uncertainty related to {it:othercovs} is taken into account. +{p_end} + +{dlgtab:Other Options} + +{p 4 8} {opt level(#)} sets the nominal confidence level for confidence interval and confidence band estimation. Default is {cmd:level(95)}. +{p_end} + +{p 4 8} {opt logitopt(logit_option)} options to be passed on to the command {help logit##options:logit}. +For example, options that control for the optimization process can be added here. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package {cmd:gtools} to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{p 4 8} {opt noplot} omits binscatter plotting. +{p_end} + +{p 4 8} {opt savedata(filename)} specifies a filename for saving all data underlying the binscatter plot (and more). +{p_end} + +{p 4 8} {opt replace} overwrites the existing file when saving the graph data. +{p_end} + +{p 4 8} {opt plotxrange(min max)} specifies the range of the x-axis for plotting. Observations outside the range are dropped in the plot.{p_end} + +{p 4 8} {opt plotyrange(min max)} specifies the range of the y-axis for plotting. Observations outside the range are dropped in the plot.{p_end} + +{p 4 8} {it:{help twoway_options}} any unrecognized options are appended to the end of the twoway command generating the binned scatter plot. +{p_end} + + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto}{p_end} + +{p 4 8} Run a binscatter logit regression and report the plot{p_end} +{p 8 8} . {stata binslogit foreign weight mpg}{p_end} + +{p 4 8} Add confidence intervals and confidence band{p_end} +{p 8 8} . {stata binslogit foreign weight mpg, ci(1 1) nbins(5)}{p_end} + + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 17 tabbed}{...} +{p2col 5 17 21 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(level)}}confidence level{p_end} +{synopt:{cmd:e(dots_p)}}degree of polynomial for dots{p_end} +{synopt:{cmd:e(dots_s)}}smoothness of polynomial for dots{p_end} +{synopt:{cmd:e(line_p)}}degree of polynomial for line{p_end} +{synopt:{cmd:e(line_s)}}smoothness of polynomial for line{p_end} +{synopt:{cmd:e(ci_p)}}degree of polynomial for confidence interval{p_end} +{synopt:{cmd:e(ci_s)}}smoothness of polynomial for confidence interval{p_end} +{synopt:{cmd:e(cb_p)}}degree of polynomial for confidence band{p_end} +{synopt:{cmd:e(cb_s)}}smoothness of polynomial for confidence band{p_end} +{p2col 5 17 21 2: Matrices}{p_end} +{synopt:{cmd:e(N_by)}}number of observations for each group{p_end} +{synopt:{cmd:e(Ndist_by)}}number of distinct values for each group{p_end} +{synopt:{cmd:e(Nclust_by)}}number of clusters for each group{p_end} +{synopt:{cmd:e(nbins_by)}}number of bins for each group{p_end} +{synopt:{cmd:e(cval_by)}}critical value for each group, used for confidence bands{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/b/binsprobit.ado b/110/replication_package/replication/ado/plus/b/binsprobit.ado new file mode 100644 index 0000000000000000000000000000000000000000..c898b58775aedba1499d746e93fd1d49ec752e3e --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsprobit.ado @@ -0,0 +1,2390 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binsprobit +program define binsprobit, eclass + version 13 + + syntax varlist(min=2 numeric fv ts) [if] [in] [fw pw] [, deriv(integer 0) at(string asis) nolink /// + probitopt(string asis) /// + dots(string) dotsgrid(string) dotsplotopt(string asis) /// + line(string) linegrid(integer 20) lineplotopt(string asis) /// + ci(string) cigrid(string) ciplotopt(string asis) /// + cb(string) cbgrid(integer 20) cbplotopt(string asis) /// + polyreg(string) polyreggrid(integer 20) polyregcigrid(integer 0) polyregplotopt(string asis) /// + by(varname) bycolors(string asis) bysymbols(string asis) bylpatterns(string asis) /// + nbins(string) binspos(string) binsmethod(string) nbinsrot(string) /// + pselect(numlist integer >=0) sselect(numlist integer >=0) /// + samebinsby randcut(numlist max=1 >=0 <=1) /// + nsims(integer 500) simsgrid(integer 20) simsseed(numlist integer max=1 >=0) /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) level(real 95) asyvar(string) /// + noplot savedata(string asis) replace /// + plotxrange(numlist asc max=2) plotyrange(numlist asc max=2) *] + + ********************************************* + * Regularization constant (for checking only) + local qrot=2 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + ********************** + ** Extract options *** + ********************** + * report the results for the cond. mean model? + if ("`link'"!="") local transform "F" + else local transform "T" + + * default vce, clustered? + if ("`vce'"=="") local vce "vce(robust)" + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'" + if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"| /// + "`1'"=="cluste"|"`1'"=="cluster") { + local clusterON "T" /* Mark cluster is specified */ + local clustervar `2' + } + if ("`vce'"=="vce(oim)"|"`vce'"=="vce(opg)") local vce_select "vce(ols)" + else local vce_select "`vce'" + + if ("`asyvar'"=="") local asyvar "off" + + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`binsmethod'"=="") local binsmethod "DPI" + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binspos'"=="") local binspos "QS" + + + * analyze options related to degrees ************* + if ("`dots'"!="T"&"`dots'"!="F"&"`dots'"!="") { + numlist "`dots'", integer max(2) range(>=0) + local dots=r(numlist) + } + if ("`line'"!="T"&"`line'"!="F"&"`line'"!="") { + numlist "`line'", integer max(2) range(>=0) + local line=r(numlist) + } + if ("`ci'"!="T"&"`ci'"!="F"&"`ci'"!="") { + numlist "`ci'", integer max(2) range(>=0) + local ci=r(numlist) + } + if ("`cb'"!="T"&"`cb'"!="F"&"`cb'"!="") { + numlist "`cb'", integer max(2) range(>=0) + local cb=r(numlist) + } + + + if ("`dots'"=="F") { /* shut down dots */ + local dots "" + local dotsgrid 0 + } + if ("`line'"=="F") local line "" + if ("`ci'"=="F") local ci "" + if ("`cb'"=="F") local cb "" + + *************************************************************** + * 4 cases: select J, select p, user specified both, and error + local selection "" + + * analyze nbins + if ("`nbins'"=="T") local nbins=0 + local len_nbins=0 + if ("`nbins'"!=""&"`nbins'"!="F") { + numlist "`nbins'", integer sort + local nbins=r(numlist) + local len_nbins: word count `nbins' + } + + * analyze numlist in pselect and sselect + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if (`len_p'==1&`len_s'==0) { + local slist `plist' + local len_s=1 + } + if (`len_p'==0&`len_s'==1) { + local plist `slist' + local len_p=1 + } + + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + if ("`nbins'"!=""|"`pselect'"!=""|"`sselect'"!="") { + di as error "nbins(), pselect() or sselect() incorrectly specified." + exit + } + } + + * 1st case: select J + if (("`nbins'"=="0"|`len_nbins'>1|"`nbins'"=="")&("`binspos'"=="ES"|"`binspos'"=="QS")) local selection "J" + if ("`selection'"=="J") { + if (`len_p'>1|`len_s'>1) { + if ("`nbins'"=="") { + di as error "nbins() must be specified for degree/smoothness selection." + exit + } + else { + di as error "Only one p and one s are allowed to select # of bins." + exit + } + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + if ("`dots'"!=""&"`dots'"!="T"&"`dots'"!="F") { /* respect user-specified dots */ + local plist: word 1 of `dots' + local slist: word 2 of `dots' + if ("`slist'"=="") local slist `plist' + } + if ("`dots'"==""|"`dots'"=="T") local dots `plist' `slist' /* selection is based on dots */ + if ("`line'"=="T") local line `plist' `slist' + if ("`ci'"=="T") local ci `=`plist'+1' `=`slist'+1' + if ("`cb'"=="T") local cb `=`plist'+1' `=`slist'+1' + local len_p=1 + local len_s=1 + } /* e.g., binsreg y x, nbins(a b) or nbins(T) or pselect(a) nbins(T) */ + + + * 2nd case: select P (at least for one object) + if ("`selection'"!="J" & ("`dots'"==""|"`dots'"=="T"|"`line'"=="T"|"`ci'"=="T"|"`cb'"=="T")) { + local pselectOK "T" /* p selection CAN be turned on as long as one of the four is T */ + } + + if ("`pselectOK'"=="T" & `len_nbins'==1 & (`len_p'>1|`len_s'>1)) { + local selection "P" + } /* e.g., binsreg y x, pselect(a b) or pselect() dots(T) */ + + * 3rd case: completely user-specified J and p + if ((`len_p'<=1&`len_s'<=1) & "`selection'"!="J") { + local selection "NA" + if ("`dots'"==""|"`dots'"=="T") { + if (`len_p'==1&`len_s'==1) local dots `plist' `slist' + else local dots `deriv' `deriv' /* e.g., binsreg y x or , dots(0 0) nbins(20) */ + } + tokenize `dots' + if ("`2'"=="") local 2 `1' + if ("`line'"=="T") { + if (`len_p'==1&`len_s'==1) local line `plist' `slist' + else local line `dots' + } + if ("`ci'"=="T") { + if (`len_p'==1&`len_s'==1) local ci `=`plist'+1' `=`slist'+1' + else local ci `=`1'+1' `=`2'+1' + } + if ("`cb'"=="T") { + if (`len_p'==1&`len_s'==1) local cb `=`plist'+1' `=`slist'+1' + else local cb `=`1'+1' `=`2'+1' + } + } + + * exclude all other cases + if ("`selection'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + ****** Now, extract from dots, line, etc. ************ + * dots + tokenize `dots' + local dots_p "`1'" + local dots_s "`2'" + if ("`dots_p'"==""|"`dots_p'"=="T") local dots_p=. + if ("`dots_s'"=="") local dots_s `dots_p' + + if ("`dotsgrid'"=="") local dotsgrid "mean" + local dotsngrid_mean=0 + if (strpos("`dotsgrid'","mean")!=0) { + local dotsngrid_mean=1 + local dotsgrid: subinstr local dotsgrid "mean" "", all + } + if (wordcount("`dotsgrid'")==0) local dotsngrid=0 + else { + confirm integer n `dotsgrid' + local dotsngrid `dotsgrid' + } + local dotsntot=`dotsngrid_mean'+`dotsngrid' + + + * line + tokenize `line' + local line_p "`1'" + local line_s "`2'" + local linengrid `linegrid' + if ("`line'"=="") local linengrid=0 + if ("`line_p'"==""|"`line_p'"=="T") local line_p=. + if ("`line_s'"=="") local line_s `line_p' + + * ci + if ("`cigrid'"=="") local cigrid "mean" + local cingrid_mean=0 + if (strpos("`cigrid'","mean")!=0) { + local cingrid_mean=1 + local cigrid: subinstr local cigrid "mean" "", all + } + if (wordcount("`cigrid'")==0) local cingrid=0 + else { + confirm integer n `cigrid' + local cingrid `cigrid' + } + local cintot=`cingrid_mean'+`cingrid' + + tokenize `ci' + local ci_p "`1'" + local ci_s "`2'" + if ("`ci'"=="") local cintot=0 + if ("`ci_p'"==""|"`ci_p'"=="T") local ci_p=. + if ("`ci_s'"=="") local ci_s `ci_p' + + * cb + tokenize `cb' + local cb_p "`1'" + local cb_s "`2'" + local cbngrid `cbgrid' + if ("`cb'"=="") local cbngrid=0 + if ("`cb_p'"==""|"`cb_p'"=="T") local cb_p=. + if ("`cb_s'"=="") local cb_s `cb_p' + + * Add warnings about degrees for estimation and inference + if ("`selection'"=="J") { + if ("`ci_p'"!=".") { + if (`ci_p'<=`dots_p') { + local ci_p=`dots_p'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the degree for dots()." + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<=`dots_p') { + local cb_p=`dots_p'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the degree for dots()." + } + } + } + if ("`selection'"=="NA") { + if ("`ci'"!=""|"`cb'"!="") { + di as text "Warning: Confidence intervals/bands are valid when nbins() is much larger than IMSE-optimal choice." + } + } + * if selection==P, compare ci_p/cb_p with P_opt later + + * poly fit + local polyregngrid `polyreggrid' + local polyregcingrid `polyregcigrid' + if ("`polyreg'"!="") { + confirm integer n `polyreg' + } + else { + local polyregngrid=0 + } + + * range of x axis and y axis? + tokenize `plotxrange' + local min_xr "`1'" + local max_xr "`2'" + tokenize `plotyrange' + local min_yr "`1'" + local max_yr "`2'" + + + * Simuls + local simsngrid=`simsgrid' + + * Record if nbins specified by users, set default + local nbins_full `nbins' /* local save common nbins */ + if ("`selection'"=="NA") local binselectmethod "User-specified" + else { + if ("`binsmethod'"=="DPI") local binselectmethod "IMSE-optimal plug-in choice" + if ("`binsmethod'"=="ROT") local binselectmethod "IMSE-optimal rule-of-thumb choice" + if ("`selection'"=="J") local binselectmethod "`binselectmethod' (select # of bins)" + if ("`selection'"=="P") local binselectmethod "`binselectmethod' (select degree and smoothness)" + } + + * Mass point check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + local fewmasspoints "T" /* count mass point, but turn off checks */ + } + + * extract dfcheck + if ("`dfcheck'"=="") local dfcheck 20 30 + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * evaluate at w from another dataset? + if (`"`at'"'!=`""'&`"`at'"'!=`"mean"'&`"`at'"'!=`"median"'&`"`at'"'!=`"0"') local atwout "user" + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + local sel_gtools "on" + * use gstats tab instead of tabstat/collapse + * use gquantiles instead of _pctile + * use gunique instead of binsreg_uniq + * use fasterxtile instead of irecode (within binsreg_irecode) + * shut down local checks & do not sort + } + + ************************* + **** error checks ******* + ************************* + if (`deriv'<0) { + di as error "Derivative incorrectly specified." + exit + } + if (`deriv'>1&"`transform'"=="T") { + di as error "deriv cannot be greater than 1 if the conditional probability is requested." + exit + } + if (`dotsngrid'<0|`linengrid'<0|`cingrid'<0|`cbngrid'<0|`simsngrid'<0) { + di as error "Number of evaluation points incorrectly specified." + exit + } + if (`level'>100|`level'<0) { + di as error "Confidence level incorrectly specified." + exit + } + if ("`dots_p'"!=".") { + if (`dots_p'<`dots_s') { + di as error "p cannot be smaller than s." + exit + } + if (`dots_p'<`deriv') { + di as error "p for dots cannot be less than deriv." + exit + } + } + if ("`line_p'"!=".") { + if (`line_p'<`line_s') { + di as error "p cannot be smaller than s." + exit + } + if (`line_p'<`deriv') { + di as error "p for line cannot be less than deriv." + exit + } + } + if ("`ci_p'"!=".") { + if (`ci_p'<`ci_s') { + di as error "p cannot be smaller than s." + exit + } + if (`ci_p'<`deriv') { + di as error "p for CI cannot be less than deriv." + exit + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<`cb_s') { + di as error "p cannot be smaller than s." + exit + } + if (`cb_p'<`deriv') { + di as error "p for CB cannot be less than deriv." + exit + } + } + if ("`polyreg'"!="") { + if (`polyreg'<`deriv') { + di as error "polyreg() cannot be less than deriv()." + exit + } + } + + if (`"`savedata'"'!=`""') { + if ("`replace'"=="") { + confirm new file `"`savedata'.dta"' + } + if ("`plot'"!="") { + di as error "plot cannot be turned off if graph data are requested." + exit + } + } + if (`polyregcingrid'!=0&"`polyreg'"=="") { + di as error "polyreg() is missing." + exit + } + if ("`binsmethod'"!="DPI"&"`binsmethod'"!="ROT") { + di as error "binsmethod incorrectly specified." + exit + } + ******** END error checking *************************** + + * Mark sample + preserve + + * Parse varlist into y_var, x_var and w_var + tokenize `varlist' + fvrevar `1', tsonly + local y_var "`r(varlist)'" + local y_varname "`1'" + fvrevar `2', tsonly + local x_var "`r(varlist)'" + local x_varname "`2'" + + macro shift 2 + local w_var "`*'" + * read eval point for w from another file + if ("`atwout'"=="user") { + append using `at' + } + + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" + local nwvar: word count `w_var' + + * Save the last obs in a vector and then drop it + tempname wuser /* a vector used to keep eval for w */ + if ("`atwout'"=="user") { + mata: st_matrix("`wuser'", st_data(`=_N', "`w_var'")) + qui drop in `=_N' + } + + * Get positions of factor vars + local indexlist "" + local i = 1 + foreach v in `w_var' { + if strpos("`v'", ".") == 0 { + local indexlist `indexlist' `i' + } + local ++i + } + + * add a default for at + if (`"`at'"'==""&`nwvar'>0) { + local at "mean" + } + + marksample touse + markout `touse' `by', strok + qui keep if `touse' + local nsize=_N /* # of rows in the original dataset */ + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") { + di as text in gr "Sorting dataset on `x_varname'..." + di as text in gr "Note: This step is omitted if dataset already sorted by `x_varname'." + sort `x_var', stable + } + local sorted "sorted" + } + + if ("`wtype'"=="f") qui sum `x_var' `wt', meanonly + else qui sum `x_var', meanonly + + local xmin=r(min) + local xmax=r(max) + local Ntotal=r(N) /* total sample size, with wt */ + * define the support of plot + if ("`plotxrange'"!="") { + local xsc `plotxrange' + if (wordcount("`xsc'")==1) local xsc `xsc' `xmax' + } + else local xsc `xmin' `xmax' + + * Effective sample size + local eN=`nsize' + * DO NOT check mass points and clusters outside loop unless needed + + * Check number of unique byvals & create local storing byvals + local byvarname `by' + if "`by'"!="" { + capture confirm numeric variable `by' + if _rc { + local bystring "T" + * generate a numeric version + tempvar by + tempname bylabel + qui egen `by'=group(`byvarname'), lname(`bylabel') + } + + local bylabel `:value label `by'' /* catch value labels for numeric by-vars too */ + + tempname byvalmatrix + qui tab `by', nofreq matrow(`byvalmatrix') + + local bynum=r(r) + forvalues i=1/`bynum' { + local byvals `byvals' `=`byvalmatrix'[`i',1]' + } + } + else local bynum=1 + + * Default colors, symbols, linepatterns + if (`"`bycolors'"'==`""') local bycolors /// + navy maroon forest_green dkorange teal cranberry lavender /// + khaki sienna emidblue emerald brown erose gold bluishgray + if (`"`bysymbols'"'==`""') local bysymbols /// + O D T S + X A a | V o d s t x + if (`"`bylpatterns'"'==`""') { + forval i=1/`bynum' { + local bylpatterns `bylpatterns' solid + } + } + + * Temp name in MATA + tempname xvec yvec byvec cluvec binedges + mata: `xvec'=st_data(., "`x_var'"); `yvec'=st_data(.,"`y_var'"); `byvec'=.; `cluvec'=. + + ******************************************************* + *** Mass point counting ******************************* + tempname Ndistlist Nclustlist mat_imse_var_rot mat_imse_bsq_rot mat_imse_var_dpi mat_imse_bsq_dpi + mat `Ndistlist'=J(`bynum',1,.) + mat `Nclustlist'=J(`bynum',1,.) + * Matrices saving imse + mat `mat_imse_var_rot'=J(`bynum',1,.) + mat `mat_imse_bsq_rot'=J(`bynum',1,.) + mat `mat_imse_var_dpi'=J(`bynum',1,.) + mat `mat_imse_bsq_dpi'=J(`bynum',1,.) + + if (`bynum'>1) mata: `byvec'=st_data(.,"`by'") + if ("`clusterON'"=="T") mata: `cluvec'=st_data(.,"`clustervar'") + + ******************************************************** + ********** Bins, based on FULL sample ****************** + ******************************************************** + * knotlist: inner knot seq; knotlistON: local, knot available before loop + + tempname fullkmat /* matrix name for saving knots based on the full sample */ + + * Extract user-specified knot list + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + capture numlist "`binspos'", ascending + if (_rc==0) { + local knotlistON "T" + local knotlist `binspos' + local nbins: word count `knotlist' + local first: word 1 of `knotlist' + local last: word `nbins' of `knotlist' + if (`first'<=`xmin'|`last'>=`xmax') { + di as error "Inner knots specified out of allowed range." + exit + } + else { + local nbins=`nbins'+1 + local nbins_full `nbins' + local pos "user" + + foreach el of local knotlist { + mat `fullkmat'=(nullmat(`fullkmat') \ `el') + } + mat `fullkmat'=(`xmin' \ `fullkmat' \ `xmax') + } + } + else { + di as error "numeric list incorrectly specified in binspos()." + exit + } + } + + * Discrete x? + if ("`fewmasspoints'"!="") local fullfewobs "T" + + * Bin selection using the whole sample if + if ("`fullfewobs'"==""&"`selection'"!="NA"&(("`by'"=="")|(("`by'"!="")&("`samebinsby'"!="")))) { + local selectfullON "T" + } + + if ("`selectfullON'"=="T") { + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xvec', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + } + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + local eN=min(`eN', `Nclust') /* effective sample size */ + } + + + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + * Check effective sample size + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used and by() option ignored." + local by "" + local byvals "" + local fullfewobs "T" + local binspos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `Ntotal'>5000) { + local randcut1k=max(5000/`Ntotal', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + if (("`selectfullON'"=="T"|("`selection'"=="NA"&"`samebinsby'"!=""))&"`fullfewobs'"=="") { + * Save in a knot list + local knotlistON "T" + local nbins_full=`nbins' + if ("`binspos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `fullkmat'=(nullmat(`fullkmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else if ("`binspos'"=="QS") { + if (`nbins'==1) mat `fullkmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `wt', nq(`nbins') `usegtools' + mat `fullkmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + + *** Placement name, for display ************ + if ("`pos'"=="user") { + local binselectmethod "User-specified" + local placement "User-specified" + } + else if ("`binspos'"=="ES") { + local placement "Evenly-spaced" + } + else if ("`binspos'"=="QS") { + local placement "Quantile-spaced" + } + + * NOTE: ALL checkings are put within the loop + + * Set seed + if ("`simsseed'"!="") set seed `simsseed' + + * alpha quantile (for two-sided CI) + local alpha=(100-(100-`level')/2)/100 + + *************************************************************************** + *************** Preparation before loop************************************ + *************************************************************************** + + ********** Prepare vars for plotting ******************** + * names for mata objects storing graph data + * plotmat: final output (defined outside); + * plotmatby: output for each group + tempname plotmat plotmatby xsub ysub byindex xcatsub + tempname Xm Xm0 mata_fit mata_se /* temp name for mata obj */ + + * count the number of requested columns, record the positions + local ncolplot=1 /* 1st col reserved for group */ + if ("`plot'"=="") { + if (`dotsntot'!=0) { + local dots_start=`ncolplot'+1 + local dots_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + local line_start=`ncolplot'+1 + local line_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`polyregngrid'!=0) { + local poly_start=`ncolplot'+1 + local poly_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + if (`polyregcingrid'!=0) { + local polyci_start=`ncolplot'+1 + local polyci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + if (`cintot'!=0) { + local ci_start=`ncolplot'+1 + local ci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + local cb_start=`ncolplot'+1 + local cb_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + mata: `plotmat'=J(0,`ncolplot',.) + + * mark the (varying) last row (for plotting) + local bylast=0 + ******************************************************************* + * temp var: bin id + tempvar xcat + qui gen `xcat'=. in 1 + + * matrix names, for returns + tempname Nlist nbinslist cvallist + + * local vars, for plotting + local counter_by=1 + local plotnum=0 /* count the number of series, for legend */ + if ("`by'"=="") local noby="noby" + local byvalnamelist "" /* save group name (value) */ + local plotcmd "" /* plotting cmd */ + + *************************************************************************** + ******************* Now, enter the loop *********************************** + *************************************************************************** + foreach byval in `byvals' `noby' { + local conds "" + if ("`by'"!="") { + local conds "if `by'==`byval'" /* with "if" */ + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + local byvalnamelist `" `byvalnamelist' `"`byvalname'"' "' + } + if (`bynum'>1) { + mata: `byindex'=`byvec':==`byval' + mata: `xsub'=select(`xvec',`byindex'); `ysub'=select(`yvec', `byindex') + } + else { + mata: `xsub'=`xvec'; `ysub'=`yvec' + } + + * Subsample size + if ("`wtype'"=="f") sum `x_var' `conds' `wt', meanonly + else sum `x_var' `conds', meanonly + + local xmin=r(min) + local xmax=r(max) + local N=r(N) + mat `Nlist'=(nullmat(`Nlist') \ `N') + + * Effective sample size + if (`bynum'==1) local eN=`nsize' + else { + if ("`wtype'"!="f") local eN=r(N) + else { + qui count `conds' + local eN=r(N) + } + } + + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xsub', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' `conds' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + mat `Ndistlist'[`counter_by',1]=`Ndist' + } + + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if (`bynum'==1) { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(select(`cluvec', `byindex'))))) + } + else { + qui gunique `clustervar' `conds' + local Nclust=r(unique) + } + } + local eN=min(`eN', `Nclust') /* effective SUBsample size */ + mat `Nclustlist'[`counter_by',1]=`Nclust' + } + + ********************************************************* + ************** Prepare bins, within loop **************** + ********************************************************* + if ("`pos'"!="user") local pos `binspos' /* initialize pos */ + * Selection? + if ("`selection'"!="NA"&"`knotlistON'"!="T"&"`fullfewobs'"=="") { + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used." + local fewobs "T" + local nbins=`eN' + local pos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `N'>5000) { + local randcut1k=max(5000/`N', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`pos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + if ("`selection'"=="NA"|"`knotlistON'"=="T") local nbins=`nbins_full' /* add the universal nbins */ + *if ("`knotlistON'"=="T") local nbins=`nbins_full' + if ("`fullfewobs'"!="") { + local fewobs "T" + local nbins=`eN' + } + + ****************************************************** + * Check effective sample size for each case ********** + ****************************************************** + if ("`fewobs'"!="T") { + if ((`nbins'-1)*(`dots_p'-`dots_s'+1)+`dots_p'+1+`dfcheck_n2'>=`eN') { + local fewobs "T" /* even though ROT available, treat it as few obs case */ + local nbins=`eN' + local pos "QS" + di as text in gr "Warning: Too small effective sample size for dots. # of mass points or clusters used." + } + if ("`line_p'"!=".") { + if ((`nbins'-1)*(`line_p'-`line_s'+1)+`line_p'+1+`dfcheck_n2'>=`eN') { + local line_fewobs "T" + di as text in gr "Warning: Too small effective sample size for line." + } + } + if ("`ci_p'"!=".") { + if ((`nbins'-1)*(`ci_p'-`ci_s'+1)+`ci_p'+1+`dfcheck_n2'>=`eN') { + local ci_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CI." + } + } + if ("`cb_p'"!=".") { + if ((`nbins'-1)*(`cb_p'-`cb_s'+1)+`cb_p'+1+`dfcheck_n2'>=`eN') { + local cb_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CB." + } + } + } + + if ("`polyreg'"!="") { + if (`polyreg'+1>=`eN') { + local polyreg_fewobs "T" + di as text in gr "Warning: Too small effective sample size for polynomial fit." + } + } + + * Generate category variable for data and save knot in matrix + tempname kmat + + if ("`knotlistON'"=="T") { + mat `kmat'=`fullkmat' + if ("`fewobs'"=="T"&"`eN'"!="`Ndist'") { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + else { + if ("`fewmasspoints'"==""&("`fewobs'"!="T"|"`eN'"!="`Ndist'")) { + if ("`pos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + } + + * Renew knot list if few mass points + if (("`fewobs'"=="T"&"`eN'"=="`Ndist'")|"`fewmasspoints'"!="") { + qui tab `x_var' `conds', matrow(`kmat') + if ("`fewmasspoints'"!="") { + local nbins=rowsof(`kmat') + local Ndist=`nbins' + local eN=`Ndist' + } + } + else { + mata: st_matrix("`kmat'", (`xmin' \ uniqrows(st_matrix("`kmat'")[|2 \ `=`nbins'+1'|]))) + if (`nbins'!=rowsof(`kmat')-1) { + di as text in gr "Warning: Repeated knots. Some bins dropped." + local nbins=rowsof(`kmat')-1 + } + + binsreg_irecode `x_var' `conds', knotmat(`kmat') bin(`xcat') /// + `usegtools' nbins(`nbins') pos(`pos') knotliston(`knotlistON') + + mata: `xcatsub'=st_data(., "`xcat'") + if (`bynum'>1) { + mata: `xcatsub'=select(`xcatsub', `byindex') + } + } + + ************************************************* + **** Check for empty bins *********************** + ************************************************* + mata: `binedges'=. /* initialize */ + if ("`fewobs'"!="T"&"`localcheck'"=="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(`xcatsub')))) + if (`nbins'==`Ncat') { + mata: `binedges'=binsreg_uniq(`xsub', `xcatsub', `nbins', "uniqmin") + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins. Specify a smaller number in nbins()." + } + + if ("`dots_p'"!=".") { + if (`uniqmin'<`dots_p'+1) { + local dots_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for dots." + } + } + if ("`line_p'"!=".") { + if (`uniqmin'<`line_p'+1) { + local line_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for line." + } + } + if ("`ci_p'"!=".") { + if (`uniqmin'<`ci_p'+1) { + local ci_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CI." + } + } + if ("`cb_p'"!=".") { + if (`uniqmin'<`cb_p'+1) { + local cb_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CB." + } + } + } + + * Now, save nbins in a list !!! + mat `nbinslist'=(nullmat(`nbinslist') \ `nbins') + + ********************************************************** + **** Count the number of rows needed (within loop!) ****** + ********************************************************** + local byfirst=`bylast'+1 + local byrange=0 + if ("`fewobs'"!="T") { + local dots_nr=`dotsngrid_mean'*`nbins' + if (`dotsngrid'!=0) local dots_nr=`dots_nr'+`dotsngrid'*`nbins'+`nbins'-1 + local ci_nr=`cingrid_mean'*`nbins' + if (`cingrid'!=0) local ci_nr=`ci_nr'+`cingrid'*`nbins'+`nbins'-1 + if (`linengrid'!=0) local line_nr=`linengrid'*`nbins'+`nbins'-1 + if (`cbngrid'!=0) local cb_nr=`cbngrid'*`nbins'+`nbins'-1 + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + local byrange=max(`dots_nr'+0,`line_nr'+0,`ci_nr'+0,`cb_nr'+0, `poly_nr'+0, `polyci_nr'+0) + } + else { + if ("`eN'"=="`Ndist'") { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*(`nbins'-1)+`nbins'-1-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*(`nbins'-1)+`nbins'-1-1 + } + } + else { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + } + local byrange=max(`nbins', `poly_nr'+0, `polyci_nr'+0) + } + local bylast=`bylast'+`byrange' + mata: `plotmatby'=J(`byrange',`ncolplot',.) + if ("`byval'"!="noby") { + mata: `plotmatby'[.,1]=J(`byrange',1,`byval') + } + + ************************************************ + **** START: prepare data for plotting*********** + ************************************************ + local plotcmdby "" + + ******************************** + * adjust w vars + tempname wval + if (`nwvar'>0) { + if (`"`at'"'==`"mean"'|`"`at'"'==`"median"') { + matrix `wval'=J(1, `nwvar', 0) + tempname wvaltemp mataobj + mata: `mataobj'=. + foreach wpos in `indexlist' { + local wname: word `wpos' of `w_var' + if ("`usegtools'"=="") { + if ("`wtype'"!="") qui tabstat `wname' `conds' [aw`exp'], stat(`at') save + else qui tabstat `wname' `conds', stat(`at') save + mat `wvaltemp'=r(StatTotal) + } + else { + qui gstats tabstat `wname' `conds' `wt', stat(`at') matasave("`mataobj'") + mata: st_matrix("`wvaltemp'", `mataobj'.getOutputCol(1)) + } + mat `wval'[1,`wpos']=`wvaltemp'[1,1] + } + mata: mata drop `mataobj' + } + else if (`"`at'"'==`"0"') { + matrix `wval'=J(1,`nwvar',0) + } + else if ("`atwout'"=="user") { + matrix `wval'=`wuser' + } + } + + + ************************************************* + ********** dots and ci for few obs. case ******** + ************************************************* + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"=="T") { + di as text in gr "Warning: dots(0 0) is used." + if (`deriv'>0) di as text in gr "Warning: deriv(0 0) is used." + + local dots_first=`byfirst' + local dots_last=`byfirst'-1+`nbins' + + mata: `plotmatby'[|1,`dots_start'+2 \ `nbins',`dots_start'+2|]=range(1,`nbins',1) + + if ("`eN'"=="`Ndist'") { + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`kmat'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,1) + + * Renew knot commalist, each value forms a group + local xknot "" + forvalues i=1/`nbins' { + local xknot `xknot' `kmat'[`i',1] + } + local xknotcommalist : subinstr local xknot " " ",", all + qui replace `xcat'=1+irecode(`x_var',`xknotcommalist') `conds' + } + else { + tempname grid + mat `grid'=(`kmat'[1..`nbins',1]+`kmat'[2..`nbins'+1,1])/2 + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`grid'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,0) + } + + local nseries=`nbins' + capture probit `y_var' ibn.`xcat' `w_var' `conds' `wt', nocon `vce' `probitopt' + tempname fewobs_b fewobs_V + if (_rc==0) { + mat `fewobs_b'=e(b) + mat `fewobs_V'=e(V) + mata: binsreg_checkdrop("`fewobs_b'", "`fewobs_V'", `nseries') + if (`nwvar'>0) { + mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+(`fewobs_b'[1,`=`nseries'+1'..`=`nseries'+`nwvar'']*`wval'')*J(1,`nseries',1) + } + else { + mat `fewobs_b'=`fewobs_b'[1,1..`nseries'] + } + } + else { + error _rc + exit _rc + } + + if ("`transform'"=="T") { + mata: `plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]=normal(st_matrix("`fewobs_b'"))' + } + else { + mata: `plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]=st_matrix("`fewobs_b'")' + } + + local plotnum=`plotnum'+1 + local legendnum `legendnum' `plotnum' + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond `plotcond' if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + + if (`cintot'!=0) { + di as text in gr "Warning: ci(0 0) is used." + + if (`nwvar'>0) { + mata: `mata_se'=(I(`nseries'), J(`nseries',1,1)#st_matrix("`wval'")) + } + else { + mata: `mata_se'=I(`nseries') + } + + mata: `plotmatby'[|1,`ci_start'+1 \ `nbins',`ci_start'+2|]=`plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+2|]; /// + `mata_se'=sqrt(rowsum((`mata_se'*st_matrix("`fewobs_V'")):*`mata_se')) + if ("`transform'"=="T") { + mata: `mata_se'=`mata_se':*(normalden(st_matrix("`fewobs_b'"))') + } + mata: `plotmatby'[|1,`ci_start'+3 \ `nbins',`ci_start'+3|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`ci_start'+4 \ `nbins',`ci_start'+4|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]+`mata_se'*invnormal(`alpha') + mata: mata drop `mata_se' + + local plotnum=`plotnum'+1 + local lty: word `counter_by' of `bylpatterns' + local plotcmdby `plotcmdby' (rcap CI_l CI_r dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + ********************************************* + **** The following handles the usual case *** + ********************************************* + * Turn on or off? + local dotsON "" + local lineON "" + local polyON "" + local ciON "" + local cbON "" + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"!="T"&"`dots_fewobs'"!="T") { + local dotsON "T" + } + if (`linengrid'!=0&"`plot'"==""&"`line_fewobs'"!="T"&"`fewobs'"!="T") { + local lineON "T" + } + if (`polyregngrid'!=0&"`plot'"==""&"`polyreg_fewobs'"!="T") { + local polyON "T" + } + if (`cintot'!=0&"`plot'"==""&"`ci_fewobs'"!="T"&"`fewobs'"!="T") { + local ciON "T" + } + if (`cbngrid'!=0&"`plot'"==""&"`cb_fewobs'"!="T"&"`fewobs'"!="T") { + local cbON "T" + } + + + ************************ + ****** Dots ************ + ************************ + tempname xmean + + if ("`dotsON'"=="T") { + local dots_first=`byfirst' + local dots_last=`byfirst'+`dots_nr'-1 + + * fitting + tempname dots_b dots_V + if (("`dots_p'"=="`ci_p'"&"`dots_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`dots_p'"=="`cb_p'"&"`dots_s'"=="`cb_s'"&"`cbON'"=="T")) { + binsprobit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' `usegtools' probitopt(`probitopt') + } + else { + binsprobit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' probitopt(`probitopt') + } + + mat `dots_b'=e(bmat) + mat `dots_V'=e(Vmat) + if (`dotsngrid_mean'!=0) mat `xmean'=e(xmat) + + * prediction + if (`dotsngrid_mean'==0) { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binsprobit_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binsprobit_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'", "`xmean'") + } + + * dots + local plotnum=`plotnum'+1 + if ("`cbON'"=="T") local legendnum `legendnum' `=`plotnum'+1' + else { + local legendnum `legendnum' `plotnum' + } + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + } + + ********************************************** + ********************* Line ******************* + ********************************************** + if ("`lineON'"=="T") { + local line_first=`byfirst' + local line_last=`byfirst'-1+`line_nr' + + * fitting + tempname line_b line_V + capture confirm matrix `dots_b' `dots_V' + if ("`line_p'"=="`dots_p'"& "`line_s'"=="`dots_s'" & _rc==0) { + matrix `line_b'=`dots_b' + matrix `line_V'=`dots_V' + } + else { + if (("`line_p'"=="`ci_p'"&"`line_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`line_p'"=="`cb_p'"&"`line_s'"=="`cb_s'"&"`cbON'"=="T")) { + binsprobit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' `usegtools' probitopt(`probitopt') + } + else { + binsprobit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' probitopt(`probitopt') + } + mat `line_b'=e(bmat) + mat `line_V'=e(Vmat) + } + + * prediction + mata: `plotmatby'[|1,`line_start' \ `line_nr',`line_end'|] = /// + binsprobit_plotmat("`line_b'", "`line_V'", ., "`kmat'", /// + `nbins', `line_p', `line_s', `deriv', /// + "line", `linengrid', "`wval'", `nwvar', "`transform'", "`asyvar'") + + * line + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' line_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &line_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' line_fit>=`min_yr' + else local plotcond `plotcond' &line_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(line_fit<=`max_yr'|line_fit==.) + } + } + + local plotcmdby `plotcmdby' (line line_fit line_x /// + `plotcond' in `line_first'/`line_last', sort cmissing(n) /// + lcolor(`col') lpattern(`lty') `lineplotopt') + + } + + *********************************** + ******* Polynomial fit ************ + *********************************** + if ("`polyON'"=="T") { + if (`nwvar'>0) { + di as text "Note: When additional covariates w are included, the polynomial fit may not always be close to the binscatter fit." + } + + local poly_first=`byfirst' + local poly_last=`byfirst'-1+`poly_nr' + + mata:`plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'+2|]=binsreg_grids("`kmat'",`polyregngrid') + + local poly_series "" + forval i=0/`polyreg' { + tempvar x_var_`i' + qui gen `x_var_`i''=`x_var'^`i' `conds' + local poly_series `poly_series' `x_var_`i'' + } + + capture probit `y_var' `poly_series' `w_var' `conds' `wt', nocon `vce' `probitopt' + * store results + tempname poly_b poly_V poly_adjw + if (_rc==0) { + matrix `poly_b'=e(b) + matrix `poly_V'=e(V) + } + else { + error _rc + exit _rc + } + + * Data for derivative + mata: `Xm'=J(`poly_nr',0,.); `Xm0'=J(`poly_nr',0,.) + forval i=`deriv'/`polyreg' { + mata: `Xm'=(`Xm', /// + `plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata: `Xm'=(J(`poly_nr', `deriv',0), `Xm') + if (`nwvar'>0) { + if (`deriv'==0) mata: `Xm'=(`Xm', J(`poly_nr',1,1)#st_matrix("`wval'")) + else mata: `Xm'=(`Xm', J(`poly_nr',`nwvar',0)) + } + + if ("`transform'"=="T") { + if (`deriv'==0) { + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=normal(`Xm'*st_matrix("`poly_b'")') + } + else if (`deriv'==1) { + forval i=0/`polyreg' { + mata: `Xm0'=(`Xm0', `plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'|]:^`i') + } + if (`nwvar'>0) mata: `Xm0'=(`Xm0', J(`poly_nr',1,1)#st_matrix("`wval'")) + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=normalden(`Xm0'*st_matrix("`poly_b'")'):* /// + (`Xm'*st_matrix("`poly_b'")') + } + } + else { + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=`Xm'*st_matrix("`poly_b'")' + } + + mata: mata drop `Xm' `Xm0' + + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' poly_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &poly_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' poly_fit>=`min_yr' + else local plotcond `plotcond' &poly_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &poly_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (line poly_fit poly_x /// + `plotcond' in `poly_first'/`poly_last', /// + sort lcolor(`col') lpattern(`lty') `polyregplotopt') + + * add CI for global poly? + if (`polyregcingrid'!=0) { + local polyci_first=`byfirst' + local polyci_last=`byfirst'-1+`polyci_nr' + + mata: `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'+2|]=binsreg_grids("`kmat'", `polyregcingrid') + + mata: `Xm'=J(`polyci_nr',0,.); `Xm0'=J(`polyci_nr',0,.) + forval i=`deriv'/`polyreg' { + mata:`Xm'=(`Xm', /// + `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata: `Xm'=(J(`polyci_nr', `deriv',0), `Xm') + if (`nwvar'>0) { + if (`deriv'==0) mata: `Xm'=(`Xm', J(`polyci_nr',1,1)#st_matrix("`wval'")) + else mata: `Xm'=(`Xm', J(`polyci_nr',`nwvar',0)) + } + + if ("`transform'"=="T") { + if (`deriv'==0) { + mata: `mata_fit'=normal(`Xm'*st_matrix("`poly_b'")') + mata: `mata_se'=normalden(`Xm'*st_matrix("`poly_b'")'):* /// + sqrt(rowsum((`Xm'*st_matrix("`poly_V'")):*`Xm')) + } + else if (`deriv'==1) { + forval i=0/`polyreg' { + mata: `Xm0'=(`Xm0', `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'|]:^`i') + } + if (`nwvar'>0) mata: `Xm0'=(`Xm0', J(`polyci_nr',1,1)#st_matrix("`wval'")) + mata:`mata_fit'=normalden(`Xm0'*st_matrix("`poly_b'")'):* /// + (`Xm'*st_matrix("`poly_b'")') + + tempname tempobj + mata: `tempobj'=`Xm0'*st_matrix("`poly_b'")'; /// + `tempobj'=(-`tempobj'):*normalden(`tempobj'):*(`Xm'*st_matrix("`poly_b'")'):*`Xm0' + /// + normalden(`tempobj'):*`Xm'; /// + `mata_se'=sqrt(rowsum((`tempobj'*st_matrix("`poly_V'")):*`tempobj')) + mata: mata drop `tempobj' + } + } + else { + mata: `mata_fit'=`Xm'*st_matrix("`poly_b'")'; /// + `mata_se'=sqrt(rowsum((`Xm'*st_matrix("`poly_V'")):*`Xm')) + } + + mata:`plotmatby'[|1,`polyci_start'+3 \ `polyci_nr',`polyci_start'+3|]=`mata_fit'-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`polyci_start'+4 \ `polyci_nr',`polyci_start'+4|]=`mata_fit'+`mata_se'*invnormal(`alpha'); /// + `plotmatby'[selectindex(`plotmatby'[,`=`polyci_start'+1']:==1),(`=`polyci_start'+3',`=`polyci_start'+4')]=J(`=`nbins'-1',2,.) + + mata: mata drop `Xm' `Xm0' `mata_fit' `mata_se' + + * poly ci + local plotnum=`plotnum'+1 + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' polyCI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &polyCI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' polyCI_l>=`min_yr' + else local plotcond `plotcond' &polyCI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &polyCI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap polyCI_l polyCI_r polyCI_x /// + `plotcond' in `polyci_first'/`polyci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + + ********************************** + ******* Confidence Interval ****** + ********************************** + if ("`ciON'"=="T") { + local ci_first=`byfirst' + local ci_last=`byfirst'-1+`ci_nr' + + * fitting + tempname ci_b ci_V + capture confirm matrix `line_b' `line_V' + if ("`ci_p'"=="`line_p'"& "`ci_s'"=="`line_s'" & _rc==0) { + matrix `ci_b'=`line_b' + matrix `ci_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`ci_p'"=="`dots_p'"& "`ci_s'"=="`dots_s'" & _rc==0) { + matrix `ci_b'=`dots_b' + matrix `ci_V'=`dots_V' + } + } + + capture confirm matrix `ci_b' `ci_V' `xmean' + if (_rc!=0) { + binsprobit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`ci_p') s(`ci_s') type(ci) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`cingrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' probitopt(`probitopt') + + mat `ci_b'=e(bmat) + mat `ci_V'=e(Vmat) + mat `xmean'=e(xmat) + } + + * prediction + if (`cingrid_mean'==0) { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binsprobit_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', "`transform'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binsprobit_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'", "`xmean'") + } + + * ci + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CI_l>=`min_yr' + else local plotcond `plotcond' &CI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &CI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap CI_l CI_r CI_x /// + `plotcond' in `ci_first'/`ci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + + } + + ******************************* + ***** Confidence Band ********* + ******************************* + tempname cval + scalar `cval'=. + if ("`cbON'"=="T") { + if (`nsims'<2000|`simsgrid'<50) { + di as text "Note: A larger number random draws/evaluation points is recommended to obtain the final results." + } + * Prepare grid for plotting + local cb_first=`byfirst' + local cb_last=`byfirst'-1+`cb_nr' + + * fitting + tempname cb_b cb_V + capture confirm matrix `ci_b' `ci_V' + if ("`cb_p'"=="`ci_p'"& "`cb_s'"=="`ci_s'" & _rc==0) { + matrix `cb_b'=`ci_b' + matrix `cb_V'=`ci_V' + } + else { + capture confirm matrix `line_b' `line_V' + if ("`cb_p'"=="`line_p'"& "`cb_s'"=="`line_s'" & _rc==0) { + matrix `cb_b'=`line_b' + matrix `cb_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`cb_p'"=="`dots_p'"& "`cb_s'"=="`dots_s'" & _rc==0) { + matrix `cb_b'=`dots_b' + matrix `cb_V'=`dots_V' + } + else { + binsprobit_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`cb_p') s(`cb_s') type(cb) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' probitopt(`probitopt') + mat `cb_b'=e(bmat) + mat `cb_V'=e(Vmat) + } + } + } + + * Compute critical values + * Prepare grid for simulation + local uni_last=`simsngrid'*`nbins'+`nbins'-1 + local nseries=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + + tempname cb_basis + mata: `cb_basis'=binsreg_grids("`kmat'", `simsngrid'); /// + `cb_basis'=binsreg_spdes(`cb_basis'[,1], "`kmat'", `cb_basis'[,3], `cb_p', `deriv', `cb_s'); /// + `Xm'=binsreg_pred(`cb_basis', st_matrix("`cb_b'")[|1 \ `nseries'|]', /// + st_matrix("`cb_V'")[|1,1 \ `nseries',`nseries'|], "all"); /// + binsreg_pval(`cb_basis', `Xm'[,2], "`cb_V'", ".", `nsims', `nseries', "two", `=`level'/100', ".", "`cval'", "inf") + mata: mata drop `cb_basis' `Xm' + + * prediction + mata: `plotmatby'[|1,`cb_start' \ `cb_nr',`cb_end'|] = /// + binsprobit_plotmat("`cb_b'", "`cb_V'", /// + `=`cval'', "`kmat'", /// + `nbins', `cb_p', `cb_s', `deriv', /// + "cb", `cbngrid', "`wval'", `nwvar', /// + "`transform'", "`asyvar'") + + * cb + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CB_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CB_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CB_l>=`min_yr' + else local plotcond `plotcond' &CB_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(CB_r<=`max_yr'|CB_r==.) + } + } + + local plotcmdby (rarea CB_l CB_r CB_x /// + `plotcond' in `cb_first'/`cb_last', sort cmissing(n) /// + lcolor(none%0) fcolor(`col'%50) fintensity(50) `cbplotopt') `plotcmdby' + } + mat `cvallist'=(nullmat(`cvallist') \ `cval') + + local plotcmd `plotcmd' `plotcmdby' + mata: `plotmat'=(`plotmat' \ `plotmatby') + + ********************************* + **** display ******************** + ********************************* + di "" + * Plotting + if ("`plot'"=="") { + if (`counter_by'==1) { + di in smcl in gr "Binscatter plot, probit model" + di in smcl in gr "Bin selection method: `binselectmethod'" + di in smcl in gr "Placement: `placement'" + di in smcl in gr "Derivative: `deriv'" + if (`"`savedata'"'!=`""') { + di in smcl in gr `"Output file: `savedata'.dta"' + } + } + di "" + if ("`by'"!="") { + di in smcl in gr "Group: `byvarname' = " in yellow "`byvalname'" + } + di in smcl in gr "{hline 30}{c TT}{hline 15}" + di in smcl in gr "{lalign 1:# of observations}" _col(30) " {c |} " _col(32) as result %7.0f `N' + di in smcl in gr "{lalign 1:# of distinct values}" _col(30) " {c |} " _col(32) as result %7.0f `Ndist' + di in smcl in gr "{lalign 1:# of clusters}" _col(30) " {c |} " _col(32) as result %7.0f `Nclust' + di in smcl in gr "{hline 30}{c +}{hline 15}" + di in smcl in gr "{lalign 1:Bin/Degree selection:}" _col(30) " {c |} " + if ("`selection'"=="P") { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `binsp' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `binss' + } + else { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `dots_p' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `dots_s' + } + di in smcl in gr "{ralign 29:# of bins}" _col(30) " {c |} " _col(32) as result %7.0f `nbins' + if ("`binselectmethod'"!="User-specified") { + if ("`binsmethod'"=="ROT") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_rot'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_rot'[`counter_by',1]' + } + else if ("`binsmethod'"=="DPI") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_dpi'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_dpi'[`counter_by',1]' + } + } + di in smcl in gr "{hline 30}{c BT}{hline 15}" + di "" + di in smcl in gr "{hline 9}{c TT}{hline 30}" + di in smcl _col(10) "{c |}" in gr _col(17) "p" _col(25) "s" _col(33) "df" + di in smcl in gr "{hline 9}{c +}{hline 30}" + if (`dotsntot'!=0) { + local dots_df=(`dots_p'-`dots_s'+1)*(`nbins'-1)+`dots_p'+1 + di in smcl in gr "{lalign 1: dots}" _col(10) "{c |}" in gr _col(17) "`dots_p'" _col(25) "`dots_s'" _col(33) "`dots_df'" + } + if ("`lineON'"=="T") { + local line_df=(`line_p'-`line_s'+1)*(`nbins'-1)+`line_p'+1 + di in smcl in gr "{lalign 1: line}" _col(10) "{c |}" in gr _col(17) "`line_p'" _col(25) "`line_s'" _col(33) "`line_df'" + } + if (`cintot'!=0) { + local ci_df=(`ci_p'-`ci_s'+1)*(`nbins'-1)+`ci_p'+1 + di in smcl in gr "{lalign 1: CI}" _col(10) "{c |}" in gr _col(17) "`ci_p'" _col(25) "`ci_s'" _col(33) "`ci_df'" + } + if ("`cbON'"=="T") { + local cb_df=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + di in smcl in gr "{lalign 1: CB}" _col(10) "{c |}" in gr _col(17) "`cb_p'" _col(25) "`cb_s'" _col(33) "`cb_df'" + } + if ("`polyON'"=="T") { + local poly_df=`polyreg'+1 + di in smcl in gr "{lalign 1: polyreg}" _col(10) "{c |}" in gr _col(17) "`polyreg'" _col(25) "NA" _col(33) "`poly_df'" + } + di in smcl in gr "{hline 9}{c BT}{hline 30}" + } + + + mata: mata drop `plotmatby' + local ++counter_by + } + mata: mata drop `xsub' `ysub' `binedges' + if (`bynum'>1) mata: mata drop `byindex' + capture mata: mata drop `xcatsub' + ****************** END loop **************************************** + ******************************************************************** + + + + ******************************************* + *************** Plotting ****************** + ******************************************* + clear + if ("`plotcmd'"!="") { + * put data back to STATA + mata: st_local("nr", strofreal(rows(`plotmat'))) + qui set obs `nr' + + * MAKE SURE the orderings match + qui gen group=. in 1 + if (`dotsntot'!=0) { + qui gen dots_x=. in 1 + qui gen dots_isknot=. in 1 + qui gen dots_binid=. in 1 + qui gen dots_fit=. in 1 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + qui gen line_x=. in 1 + qui gen line_isknot=. in 1 + qui gen line_binid=. in 1 + qui gen line_fit=. in 1 + } + if (`polyregngrid'!=0) { + qui gen poly_x=. in 1 + qui gen poly_isknot=. in 1 + qui gen poly_binid=. in 1 + qui gen poly_fit=. in 1 + if (`polyregcingrid'!=0) { + qui gen polyCI_x=. in 1 + qui gen polyCI_isknot=. in 1 + qui gen polyCI_binid=. in 1 + qui gen polyCI_l=. in 1 + qui gen polyCI_r=. in 1 + } + } + if (`cintot'!=0) { + qui gen CI_x=. in 1 + qui gen CI_isknot=. in 1 + qui gen CI_binid=. in 1 + qui gen CI_l=. in 1 + qui gen CI_r=. in 1 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + qui gen CB_x=. in 1 + qui gen CB_isknot=. in 1 + qui gen CB_binid=. in 1 + qui gen CB_l=. in 1 + qui gen CB_r=. in 1 + } + + mata: st_store(.,.,`plotmat') + + * Legend + local plot_legend legend(order( + if ("`by'"!=""&`dotsntot'!=0) { + forval i=1/`bynum' { + local byvalname: word `i' of `byvalnamelist' + local plot_legend `plot_legend' `: word `i' of `legendnum'' "`byvarname'=`byvalname'" + } + local plot_legend `plot_legend' )) + } + else { + local plot_legend legend(off) + } + + * Plot it + local graphcmd twoway `plotcmd', xtitle(`x_varname') ytitle(`y_varname') xscale(range(`xsc')) `plot_legend' `options' + `graphcmd' + } + mata: mata drop `plotmat' `xvec' `yvec' `byvec' `cluvec' + + + * Save graph data ? + * In the normal case + if (`"`savedata'"'!=`""'&`"`plotcmd'"'!=`""') { + * Add labels + if ("`by'"!="") { + if ("`bystring'"=="T") { + label val group `bylabel' + decode group, gen(`byvarname') + } + else { + qui gen `byvarname'=group + if ("`bylabel'"!="") label val `byvarname' `bylabel' + } + label var `byvarname' "Group" + qui drop group + order `byvarname' + } + else qui drop group + + capture confirm variable dots_x dots_binid dots_isknot dots_fit + if (_rc==0) { + label var dots_x "Dots: grid" + label var dots_binid "Dots: indicator of bins" + label var dots_isknot "Dots: indicator of inner knot" + label var dots_fit "Dots: fitted values" + } + capture confirm variable line_x line_binid line_isknot line_fit + if (_rc==0) { + label var line_x "Line: grid" + label var line_binid "Line: indicator of bins" + label var line_isknot "Line: indicator of inner knot" + label var line_fit "Line: fitted values" + } + capture confirm variable poly_x poly_binid poly_isknot poly_fit + if (_rc==0) { + label var poly_x "Poly: grid" + label var poly_binid "Poly: indicator of bins" + label var poly_isknot "Poly: indicator of inner knot" + label var poly_fit "Poly: fitted values" + } + capture confirm variable polyCI_x polyCI_binid polyCI_isknot polyCI_l polyCI_r + if (_rc==0) { + label var polyCI_x "Poly confidence interval: grid" + label var polyCI_binid "Poly confidence interval: indicator of bins" + label var polyCI_isknot "Poly confidence interval: indicator of inner knot" + label var polyCI_l "Poly confidence interval: left boundary" + label var polyCI_r "Poly confidence interval: right boundary" + } + capture confirm variable CI_x CI_binid CI_isknot CI_l CI_r + if (_rc==0) { + label var CI_x "Confidence interval: grid" + label var CI_binid "Confidence interval: indicator of bins" + label var CI_isknot "Confidence interval: indicator of inner knot" + label var CI_l "Confidence interval: left boundary" + label var CI_r "Confidence interval: right boundary" + } + capture confirm variable CB_x CB_binid CB_isknot CB_l CB_r + if (_rc==0) { + label var CB_x "Confidence band: grid" + label var CB_binid "Confidence band: indicator of bins" + label var CB_isknot "Confidence band: indicator of inner knot" + label var CB_l "Confidence band: left boundary" + label var CB_r "Confidence band: right boundary" + } + qui save `"`savedata'"', `replace' + } + *************************************************************************** + + ********************************* + ********** Return *************** + ********************************* + ereturn clear + * # of observations + ereturn scalar N=`Ntotal' + * Options + ereturn scalar level=`level' + ereturn scalar dots_p=`dots_p' + ereturn scalar dots_s=`dots_s' + ereturn scalar line_p=`line_p' + ereturn scalar line_s=`line_s' + ereturn scalar ci_p=`ci_p' + ereturn scalar ci_s=`ci_s' + ereturn scalar cb_p=`cb_p' + ereturn scalar cb_s=`cb_s' + * by group: + *ereturn matrix knot=`kmat' + ereturn matrix cval_by=`cvallist' + ereturn matrix nbins_by=`nbinslist' + ereturn matrix Nclust_by=`Nclustlist' + ereturn matrix Ndist_by=`Ndistlist' + ereturn matrix N_by=`Nlist' + + ereturn matrix imse_var_rot=`mat_imse_var_rot' + ereturn matrix imse_bsq_rot=`mat_imse_bsq_rot' + ereturn matrix imse_var_dpi=`mat_imse_var_dpi' + ereturn matrix imse_bsq_dpi=`mat_imse_bsq_dpi' +end + +* Helper commands +* Estimation +program define binsprobit_fit, eclass + version 13 + syntax varlist(min=2 numeric ts fv) [if] [in] [fw aw pw] [, deriv(integer 0) /// + p(integer 0) s(integer 0) type(string) vce(passthru) /// + xcat(varname numeric) kmat(name) dotsmean(integer 0) /// /* xmean: report x-mean? */ + xname(name) yname(name) catname(name) edge(name) /// + usereg sorted usegtools probitopt(string asis)] /* usereg: force the command to use reg; sored: sorted data? */ + + preserve + marksample touse + qui keep if `touse' + + if ("`weight'"!="") local wt [`weight'`exp'] + + tokenize `varlist' + local y_var `1' + local x_var `2' + macro shift 2 + local w_var "`*'" + local nbins=rowsof(`kmat')-1 + + tempname matxmean temp_b temp_V + mat `matxmean'=. + mat `temp_b'=. + mat `temp_V'=. + + if (`dotsmean'!=0) { + if ("`sorted'"==""|"`weight'"!=""|"`usegtools'"!="") { + if ("`usegtools'"=="") { + tempfile tmpfile + qui save `tmpfile', replace + + collapse (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + + use `tmpfile', clear + } + else { + tempname obj + qui gstats tabstat `x_var' `wt', stats(mean) by(`xcat') matasave("`obj'") + mata: st_matrix("`matxmean'", (`obj'.getnum(.,1), `obj'.getOutputVar("`x_var'"))) + mata: mata drop `obj' + } + } + else { + tempname output + mata: `output'=binsreg_stat(`xname', `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`matxmean'", `output') + mata: mata drop `output' + } + } + + * Regression? + if (`p'==0) { + capture probit `y_var' ibn.`xcat' `w_var' `wt', nocon `vce' `probitopt' + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + } + else { + error _rc + exit _rc + } + } + else { + local nseries=(`p'-`s'+1)*(`nbins'-1)+`p'+1 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`xname', "`series'", "`kmat'", `catname', `p', 0, `s') + + capture probit `y_var' `series' `w_var' `wt', nocon `vce' `probitopt' + * store results + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nseries') + } + else { + error _rc + exit _rc + } + } + + + ereturn clear + ereturn matrix bmat=`temp_b' + ereturn matrix Vmat=`temp_V' + ereturn matrix xmat=`matxmean' /* xcat, xbar */ +end + +mata: + + // Prediction for plotting + real matrix binsprobit_plotmat(string scalar eb, string scalar eV, real scalar cval, /// + string scalar knotname, real scalar J, /// + real scalar p, real scalar s, real scalar deriv, /// + string scalar type, real scalar ngrid, string scalar muwmat, /// + real scalar nw, string scalar transform, string scalar avar, | string scalar muxmat) + { + real matrix coef, bmat, rmat, vmat, knot, xmean, wval, eval, out, fit, fit0, se, semat, Xm, Xm0, result + real scalar nseries + + nseries=(p-s+1)*(J-1)+p+1 + coef=st_matrix(eb)' + bmat=coef[|1\nseries|] + if (nw>0) rmat=coef[|(nseries+1)\rows(coef)|] + + if (type=="ci"|type=="cb") { + vfull=st_matrix(eV) + vmat=vfull[|1,1\nseries,nseries|] + } + + // Prepare evaluation points + eval=J(0,3,.) + if (args()==15) { + xmean=st_matrix(muxmat) + eval=(eval \ (xmean[,2], J(J, 1, 0), xmean[,1])) + } + if (ngrid!=0) eval=(eval \ binsreg_grids(knotname, ngrid)) + + // adjust w variables + if (nw>0) { + wvec=st_matrix(muwmat) + wval=wvec*rmat + } + else wval=0 + + fit=J(0,1,.) + se=J(0,1,.) + if (p==0) { + if (args()==15) fit=(fit \ bmat) + if (ngrid!=0) { + fit=(fit \ (bmat#(J(ngrid,1,1)\.))) + fit=fit[|1 \ (rows(fit)-1)|] + } + if (type=="ci"|type=="cb") { + if (avar=="on") semat=sqrt(diagonal(vmat)) + else { + if (nw>0) { + Xm=(I(nseries), J(nseries,1,1)#wvec) + semat=sqrt(rowsum((Xm*vfull):*Xm)) + } + else semat=sqrt(diagonal(vmat)) + } + if (args()==15) se=(se \ semat) + if (ngrid!=0) { + se=(se \ (semat#(J(ngrid,1,1)\.))) + se=se[|1 \ (rows(se)-1)|] + } + } + if (type=="dots"|type=="line") { + if (transform=="T") out=(eval, normal(fit:+wval)) + else out=(eval, fit:+wval) + } + else { + if (transform=="T") out=(eval, normal(fit:+wval)-(normalden(fit:+wval):*se)*cval, /// + normal(fit:+wval)+(normalden(fit:+wval):*se)*cval) + else out=(eval, (fit:+wval)-se*cval, (fit:+wval)+se*cval) + } + } + else { + Xm=binsreg_spdes(eval[,1], knotname, eval[,3], p, deriv, s) + if (type=="dots"|type=="line") { + if (transform=="T") { + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + if (deriv==0) { + fit=normal(fit:+wval) + } + if (deriv==1) { + Xm0=binsreg_spdes(eval[,1], knotname, eval[,3], p, 0, s) + fit0=binsreg_pred(Xm0, bmat, ., "xb")[,1] + fit=normalden(fit0:+wval):*fit + } + + out=(eval, fit) + } + else { + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + if (deriv==0) out=(eval, fit:+wval) + else out=(eval, fit) + } + } + else { + if (avar=="on") { + result=binsreg_pred(Xm, bmat, vmat, "all") + if (transform=="T") { + Xm0=binsreg_spdes(eval[,1], knotname, eval[,3], p, 0, s) + fit0=binsreg_pred(Xm0, bmat, ., "xb")[,1] + result[,2]=normalden(fit0:+wval):*result[,2] + + if (deriv==0) { + result[,1]=normal(result[,1]:+wval) + } + if (deriv==1) { + result[,1]=normalden(fit0:+wval):*result[,1] + } + + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + else { + if (deriv==0) out=(eval, (result[,1]:+wval)-cval*result[,2], (result[,1]:+wval)+cval*result[,2]) + else out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + else { + result=binsreg_pred(Xm, bmat, vmat, "all") + if (transform=="T") { + if (deriv==0) { + if (nw>0) Xm=(Xm, J(rows(Xm),1,1)#wvec) + result[,2]=normalden(result[,1]:+wval):*sqrt(rowsum((Xm*vfull):*Xm)) + result[,1]=normal(result[,1]:+wval) + } + if (deriv==1) { + Xm0=binsreg_spdes(eval[,1], knotname, eval[,3], p, 0, s) + if (nw>0) { + Xm0=(Xm0, J(rows(Xm0),1,1)#wvec) + Xm=(Xm, J(rows(Xm),nw,0)) + } + fit0=binsreg_pred(Xm0, coef, ., "xb")[,1] + Xm=(-fit0):*normalden(fit0):*result[,1]:*Xm0 + /// + normalden(fit0):*Xm + result[,2]=sqrt(rowsum((Xm*vfull):*Xm)) + result[,1]=normalden(fit0):*result[,1] + } + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + else { + if (nw>0) { + if (deriv==0) Xm=(Xm, J(rows(Xm),1,1)#wvec) + else Xm=(Xm, J(rows(Xm),nw,0)) + } + result=binsreg_pred(Xm, coef, vfull, "all") + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + } + } + + if (type=="dots"|(type=="line"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4]=J(sum(out[,2]),1,.) + } + if (type=="ci"|(type=="cb"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4..5]=J(sum(out[,2]),2,.) + } + + return(out) + } + + +end + diff --git a/110/replication_package/replication/ado/plus/b/binsprobit.sthlp b/110/replication_package/replication/ado/plus/b/binsprobit.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..5cdd22b0893dfb5b75abb9c2d6c731421ee0b09b --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsprobit.sthlp @@ -0,0 +1,428 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binsprobit##syntax"}{...} +{viewerjumpto "Description" "binsprobit##description"}{...} +{viewerjumpto "Options" "binsprobit##options"}{...} +{viewerjumpto "Examples" "binsprobit##examples"}{...} +{viewerjumpto "Stored results" "binsprobit##stored_results"}{...} +{viewerjumpto "References" "binsprobit##references"}{...} +{viewerjumpto "Authors" "binsprobit##authors"}{...} +{cmd:help binsprobit} +{hline} + +{title:Title} + +{p 4 8}{hi:binsprobit} {hline 2} Data-Driven Binscatter Probit Estimation with Robust Inference Procedures and Plots.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 15} {cmdab:binsprobit} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} [ {cmd:,} {opt deriv(v)} {opt at(position)} {opt nolink}{p_end} +{p 15 15} {opt dots(dotsopt)} {opt dotsgrid(dotsgridoption)} {opt dotsplotopt(dotsoption)}{p_end} +{p 15 15} {opt line(lineopt)} {opt linegrid(#)} {opt lineplotopt(lineoption)}{p_end} +{p 15 15} {opt ci(ciopt)} {opt cigrid(cigridoption)} {opt ciplotopt(rcapoption)}{p_end} +{p 15 15} {opt cb(cbopt)} {opt cbgrid(#)} {opt cbplotopt(rareaoption)}{p_end} +{p 15 15} {opt polyreg(p)} {opt polyreggrid(#)} {opt polyregcigrid(#)} {opt polyregplotopt(lineoption)}{p_end} +{p 15 15} {opth by(varname)} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)}{p_end} +{p 15 15} {opt nbins(nbinsopt)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt samebinsby} {opt randcut(#)}{p_end} +{p 15 15} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 15 15} {opt nsims(#)} {opt simsgrid(#)} {opt simsseed(seed)}{p_end} +{p 15 15} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 15 15} {cmd:vce(}{it:{help vcetype}}{cmd:)} {opt asyvar(on/off)}{p_end} +{p 15 15} {opt level(level)} {opt probitopt(probit_option)} {opt usegtools(on/off)} {opt noplot} {opt savedata(filename)} {opt replace}{p_end} +{p 15 15} {opt plotxrange(min max)} {opt plotyrange(min max)} {it:{help twoway_options}} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} {opt fweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binsprobit} implements binscatter probit estimation with robust inference procedures and plots, following the results in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":Cattaneo, Crump, Farrell and Feng (2022a)}. +Binscatter provides a flexible way to describe the mean relationship between two variables, after possibly adjusting for other covariates, +based on partitioning/binning of the independent variable of interest. +The main purpose of this command is to generate binned scatter plots with curve estimation with robust pointwise confidence intervals and uniform confidence band. +If the binning scheme is not set by the user, the companion command {help binsregselect:binsregselect} is used to implement binscatter in a data-driven way. +Hypothesis testing for parametric specifications of and shape restrictions on the regression function can be conducted via the +companion command {help binstest:binstest}. Hypothesis testing for pairwise group comparisons can be conducted via the +companion command {help binspwc: binspwc}. Binscatter estimation based on the least squares method can be conducted via the command {help binsreg: binsreg}. +{p_end} + +{p 4 8} A detailed introduction to this command is given in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Cattaneo, Crump, Farrell and Feng (2022b)}. +Companion R and Python packages with the same capabilities are available (see website below). +{p_end} + +{p 4 8} Companion commands: {help binstest:binstest} for hypothesis testing of parametric specifications and shape restrictions, +{help binspwc:binspwc} for hypothesis testing for pairwise group comparisons, and +{help binsregselect:binsregselect} for data-driven binning selection.{p_end} + +{p 4 8} Related Stata, R and Python packages are available in the following website:{p_end} + +{p 8 8} {browse "https://nppackages.github.io/":https://nppackages.github.io/}{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself. +{p_end} + +{p 4 8} {opt at(position)} specifies the values of {it:othercovs} at which the estimated function is evaluated for plotting. +The default is {cmd:at(mean)}, which corresponds to the mean of {it:othercovs}. Other options are: {cmd:at(median)} for the median of {it:othercovs}, +{cmd:at(0)} for zeros, and {cmd:at(filename)} for particular values of {it:othercovs} saved in another file. +{p_end} + +{p 4 8} Note: When {cmd:at(mean)} or {cmd:at(median)} is specified, all factor variables in {it:othercovs} (if specified) are excluded from the evaluation (set as zero). +{p_end} + +{p 4 8}{opt nolink} specifies that the function within the inverse link (logistic) function be reported instead of the conditional probability function. +{p_end} + +{dlgtab:Dots} + +{p 4 8} {opt dots(dotsopt)} sets the degree of polynomial and the number of smoothness for point estimation and plotting as "dots". +If {cmd:dots(p s)} is specified, a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints is used. +The default is {cmd:dots(0 0)}, which corresponds to piecewise constant (canonical binscatter). +If {cmd:dots(T)} is specified, the default {cmd:dots(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:dots(F)} is specified, the dots are not included in the plot. +{p_end} + +{p 4 8} {opt dotsgrid(dotsgridoption)} specifies the number and location of dots within each bin to be plotted. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt dotsgrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt dotsgrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt dotsgrid(mean 5)} generates six evaluation points within each bin +containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +Given this choice, the dots are point estimates evaluated over the selected grid within each bin. +The default is {opt dotsgrid(mean)}, which corresponds to one dot per bin evaluated at +the sample average of {it:indvar} within each bin (canonical binscatter). +{p_end} + +{p 4 8} {opt dotsplotopt(dotsoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the plotted dots. +{p_end} + +{dlgtab:Line} + +{p 4 8} {opt line(lineopt)} sets the degree of polynomial and the number of smoothness constraints +for plotting as a "line". If {cmd:line(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:line(T)} is specified, {cmd:line(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:line(F)} or {cmd:line()} is specified, the line is not included in the plot. +The default is {cmd:line()}. +{p_end} + +{p 4 8} {opt linegrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for evaluation of the point estimate set by the {cmd:line(p s)} option. +The default is {cmd:linegrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for fitting/plotting the line. +{p_end} + +{p 4 8} {opt lineplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the plotted line. +{p_end} + +{dlgtab:Confidence Intervals} + +{p 4 8} {opt ci(ciopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing confidence intervals. If {cmd:ci(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:ci(T)} is specified, {cmd:ci(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:ci(F)} or {cmd:ci()} is specified, the confidence intervals are not included in the plot. +The default is {cmd:ci()}. +{p_end} + +{p 4 8} {opt cigrid(cigridoption)} specifies the number and location of evaluation points in the grid +used to construct the confidence intervals set by the {opt ci(p s)} option. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt cigrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt cigrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt cigrid(mean 5)} generates six evaluation points within each bin +containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +The default is {opt cigrid(mean)}, which corresponds to one evaluation point set at +the sample average of {it:indvar} within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt ciplotopt(rcapoption)} standard graphs options to be passed on to the +{help twoway:twoway} command to modify the appearance of the confidence intervals. +{p_end} + +{dlgtab:Confidence Band} + +{p 4 8} {opt cb(cbopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing the confidence band. If {cmd:cb(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If the option {cmd:cb(T)} is specified, {cmd:cb(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:cb(F)} or {cmd:cb()} is specified, the confidence band is not included in the plot. +The default is {cmd:cb()}. +{p_end} + +{p 4 8} {opt cbgrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for evaluation of the point estimate set by the {cmd:cb(p s)} option. +The default is {cmd:cbgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence band construction. +{p_end} + +{p 4 8} {opt cbplotopt(rareaoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the confidence band. +{p_end} + +{dlgtab:Global Polynomial Regression} + +{p 4 8} {opt polyreg(p)} sets the degree {it:p} of a global polynomial regression model for plotting. +By default, this fit is not included in the plot unless explicitly specified. +Recommended specification is {cmd:polyreg(3)}, which adds a cubic polynomial fit of the regression function of interest to the binned scatter plot. +{p_end} + +{p 4 8} {opt polyreggrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin +used for evaluation of the point estimate set by the {cmd:polyreg(p)} option. +The default is {cmd:polyreggrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt polyregcigrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for +constructing confidence intervals based on polynomial regression set by the {cmd:polyreg(p)} option. +The default is {cmd:polyregcigrid(0)}, which corresponds to not plotting confidence intervals for the global polynomial regression approximation. +{p_end} + +{p 4 8} {opt polyregplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the global polynomial regression fit. +{p_end} + +{dlgtab:Subgroup Analysis} + +{p 4 8} {opt by(varname)} specifies the variable containing the group indicator to perform subgroup analysis; +both numeric and string variables are supported. +When {opt by(varname)} is specified, {cmdab:binsprobit} implements estimation and inference for each subgroup separately, +but produces a common binned scatter plot. +By default, the binning structure is selected for each subgroup separately, +but see the option {cmd:samebinsby} below for imposing a common binning structure across subgroups. +{p_end} + +{p 4 8} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} specifies an ordered list of symbols for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)} specifies an ordered list of line patterns for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt nbins(nbinsopt)} sets the number of bins for partitioning/binning of {it:indvar}. +If {cmd:nbins(T)} or {cmd:nbins()} (default) is specified, the number of bins is selected via the companion command {help binsregselect:binsregselect} +in a data-driven, optimal way whenever possible. If a {help numlist:numlist} with more than one number is specified, +the number of bins is selected within this list via the companion command {help binsregselect:binsregselect}. +{p_end} + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other options are: {cmd:es} for evenly-spaced binning, or a {help numlist} for manual specification of +the positions of inner knots (which must be within the range of {it:indvar}). +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins via +the companion command {help binsregselect:binsregselect}. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt samebinsby} forces a common partitioning/binning structure across all subgroups specified by the option {cmd:by()}. +The knots positions are selected according to the option {cmd:binspos()} and using the full sample. +If {cmd:nbins()} is not specified, then the number of bins is selected via the companion command +{help binsregselect:binsregselect} and using the full sample. +{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample +for bins/degree/smoothness selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. +By default, max(5,000, 0.01n) observations are used if the samples size n>5,000. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} for +point estimation is selected. Piecewise polynomials of the selected optimal degree {it:p} +are used to construct dots or line if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials of degree {it:p+1} are used to construct confidence intervals +or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which +the number of smoothness constraints {it:s} +for point estimation. Piecewise polynomials with the selected optimal +{it:s} smoothness constraints are used to construct dots or line +if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials with {it:s+1} constraints are used to construct +confidence intervals or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:nbins(#)} must be specified. +{p_end} + +{dlgtab:Simulation} + +{p 4 8} {opt nsims(#)} specifies the number of random draws for constructing confidence bands. +The default is {cmd:nsims(500)}, which corresponds to 500 draws from a standard Gaussian random vector of size [(p+1)*J - (J-1)*s]. +A large number of random draws is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid +within each bin used for evaluation of the supremum operation needed to construct confidence bands. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points +within each bin for approximating the supremum operator. +A large number of evaluation points is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsseed(#)} sets the seed for simulations. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, +which take into account the number of unique values of {it:indvar} (i.e., adjusting for the number of mass points), +number of clusters, and degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Standard Error} + +{p 4 8} {cmd:vce(}{it:{help vcetype}}{cmd:)} specifies the {it:vcetype} for variance estimation used by the command {help probit##options:probit}. +The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8} {opt asyvar(on/off)} specifies the method used to compute standard errors. +If {cmd:asyvar(on)} is specified, the standard error of the nonparametric component is used +and the uncertainty related to other control variables {it:othercovs} is omitted. +Default is {cmd:asyvar(off)}, that is, the uncertainty related to {it:othercovs} is taken into account. +{p_end} + +{dlgtab:Other Options} + +{p 4 8} {opt level(#)} sets the nominal confidence level for confidence interval and confidence band estimation. +Default is {cmd:level(95)}. +{p_end} + +{p 4 8} {opt probitopt(probit_option)} options to be passed on to the command {help probit##options:probit}. +For example, options that control for the optimization process can be added here. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package {cmd:gtools} to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{p 4 8} {opt noplot} omits binscatter plotting. +{p_end} + +{p 4 8} {opt savedata(filename)} specifies a filename for saving all data underlying the binscatter plot (and more). +{p_end} + +{p 4 8} {opt replace} overwrites the existing file when saving the graph data. +{p_end} + +{p 4 8} {opt plotxrange(min max)} specifies the range of the x-axis for plotting. Observations outside the range are dropped in the plot. +{p_end} + +{p 4 8} {opt plotyrange(min max)} specifies the range of the y-axis for plotting. Observations outside the range are dropped in the plot. +{p_end} + +{p 4 8} {it:{help twoway_options}} any unrecognized options are appended to the end of the twoway command generating the binned scatter plot. +{p_end} + + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto}{p_end} + +{p 4 8} Run a binscatter probit regression and report the plot{p_end} +{p 8 8} . {stata binsprobit foreign weight mpg}{p_end} + +{p 4 8} Add confidence intervals and confidence band{p_end} +{p 8 8} . {stata binsprobit foreign weight mpg, ci(1 1) nbins(5)}{p_end} + + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 17 tabbed}{...} +{p2col 5 17 21 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(level)}}confidence level{p_end} +{synopt:{cmd:e(dots_p)}}degree of polynomial for dots{p_end} +{synopt:{cmd:e(dots_s)}}smoothness of polynomial for dots{p_end} +{synopt:{cmd:e(line_p)}}degree of polynomial for line{p_end} +{synopt:{cmd:e(line_s)}}smoothness of polynomial for line{p_end} +{synopt:{cmd:e(ci_p)}}degree of polynomial for confidence interval{p_end} +{synopt:{cmd:e(ci_s)}}smoothness of polynomial for confidence interval{p_end} +{synopt:{cmd:e(cb_p)}}degree of polynomial for confidence band{p_end} +{synopt:{cmd:e(cb_s)}}smoothness of polynomial for confidence band{p_end} +{p2col 5 17 21 2: Matrices}{p_end} +{synopt:{cmd:e(N_by)}}number of observations for each group{p_end} +{synopt:{cmd:e(Ndist_by)}}number of distinct values for each group{p_end} +{synopt:{cmd:e(Nclust_by)}}number of clusters for each group{p_end} +{synopt:{cmd:e(nbins_by)}}number of bins for each group{p_end} +{synopt:{cmd:e(cval_by)}}critical value for each group, used for confidence bands{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/b/binspwc.ado b/110/replication_package/replication/ado/plus/b/binspwc.ado new file mode 100644 index 0000000000000000000000000000000000000000..85d0e10172b18c8279f918481c8b541a564b3c9c --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binspwc.ado @@ -0,0 +1,1296 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binspwc +program define binspwc, eclass + version 13 + + syntax varlist(min=2 numeric fv ts) [if] [in] [fw aw pw] , by(varname) [deriv(integer 0) at(string asis) nolink /// + estmethod(string) estmethodopt(string asis) absorb(string asis) reghdfeopt(string asis) /// + pwc(string) testtype(string) lp(string) /// + bins(numlist integer max=2 >=0) bynbins(string) binspos(string) /// + pselect(numlist integer >=0) sselect(numlist integer >=0) /// + binsmethod(string) nbinsrot(string) samebinsby randcut(numlist max=1 >=0 <=1) /// + nsims(integer 500) simsgrid(integer 20) simsseed(numlist integer max=1 >=0) /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) asyvar(string) /// + numdist(string) numclust(string)] + /* last line only for internal use */ + + * Regularization constant (for checking only) + local qrot=2 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + if ("`testtype'"=="") { + local testtype "2" + } + + * which model? + if ("`absorb'"!="") { + if ("`estmethod'"!="") { + if ("`estmethod'"!="reghdfe") { + di as error "absorb() can only be combined with estmethod(reghdfe)." + exit + } + } + else local estmethod "reghdfe" + } + if ("`estmethod'"=="") local estmethod "reg" + tokenize `estmethod' + local estmethod `1' + if ("`estmethod'"=="reg") { + local estcmd "reg" + } + else if ("`estmethod'"=="qreg") { + local estcmd "qreg" + local quantile `2' + if ("`quantile'"=="") local quantile=0.5 + } + else if ("`estmethod'"=="logit") { + local estcmd "logit" + } + else if ("`estmethod'"=="probit") { + local estcmd "probit" + } + else if ("`estmethod'"=="reghdfe") { + local estcmd "reghdfe" + } + + * report the results for the cond. mean model? + if ("`link'"!="") local transform "F" + else local transform "T" + + * Extract options + * default vce + if ("`vce'"=="") local vce "vce(robust)" + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'", parse(", ") + if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"| /// + "`1'"=="cluste"|"`1'"=="cluster") { + if ("`3'"==""|"`3'"==",") local clusterON "T" /* Mark cluster is specified */ + local clustervar `2' + if ("`estmethod'"=="qreg") { + local vce "vce(robust)" + di as text in gr "Warning: vce(cluster) not allowed. vce(robust) used instead." + } + } + + * use bootstrap cmd? + if ("`1'"=="boot" | "`1'"=="bootstrap") { + local boot "on" + local repstemp `3' + if ("`repstemp'"=="") local repstemp reps(20) + local repstemp: subinstr local repstemp "reps(" "", all + local reps: subinstr local repstemp ")" "", all + if ("`estmethod'"=="qreg") { + local estcmd "bsqreg" + if ("`weight'"!="") { + di as error "Weights not allowed for bootstrapping." + exit + } + } + } + else { + local boot "off" + } + + if ("`asyvar'"=="") local asyvar "off" + + + * vce for bin selection + if ("`estmethod'"=="qreg") { + if ("`vce'"=="vce(iid)") local vce_select "vce(ols)" + else local vce_select "vce(robust)" + } + else if ("`estmethod'"=="logit"|"`estmethod'"=="probit") { + if ("`vce'"=="oim"|"`vce'"=="opg") local vce_select "vce(ols)" + else local vce_select "`vce'" + } + else if ("`estmethod'"=="reg"|"`estmethod'"=="reghdfe") { + local vce_select "`vce'" + } + + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binspos'"=="") local binspos "QS" + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`binsmethod'"=="") local binsmethod "DPI" + + * degree, smoothness and binning + if ("`pwc'"!="T"&"`pwc'"!="F"&"`pwc'"!="") { + numlist "`pwc'", integer max(2) range(>=0) + local pwc=r(numlist) + } + + if ("`pwc'"=="F") local pwc "" + if ("`pwc'"=="T"&"`binspos'"!="ES"&"`binspos'"!="QS") local pwc "" + + local selection "" + + * analyze nbins + local lenbynbins=0 + if ("`bynbins'"!="T"&"`bynbins'"!="") { + numlist "`bynbins'", integer range(>=0) + local bynbins=r(numlist) + local lenbynbins: word count `bynbins' + if (`lenbynbins'==1) { + local nbins_all=`bynbins' + } + } + + * analyze numlist in pselect and sselect + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if (`len_p'==1&`len_s'==0) { + local slist `plist' + local len_s=1 + } + if (`len_p'==0&`len_s'==1) { + local plist `slist' + local len_p=1 + } + + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + if ("`bynbins'"!=""|"`pselect'"!=""|"`sselect'"!="") { + di as error "bynbins(), pselect() or sselect() incorrectly specified." + exit + } + } + + tokenize `bins' + local binsp "`1'" + local binss "`2'" + if ("`binsp'"=="") local binsp=. + if ("`binss'"=="") local binss=`binsp' + if ("`bins'"!="") { + if ("`bynbins'"!=""&"`bynbins'"!="T"&"`bynbins'"!="0") { + di as error "bynbins() or bins() is incorrectly specified." + exit + } + } + + * 1st case: select J + if (("`bins'"!=""|"`bynbins'"=="0"|"`bynbins'"=="T"|"`bynbins'"=="")&("`binspos'"=="ES"|"`binspos'"=="QS")) local selection "J" + + if ("`selection'"=="J") { + if (`len_p'>1|`len_s'>1) { + di as error "Only one p and one s are allowed to select # of bins." + exit + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + if ("`bins'"=="") { + local binsp `plist' + local binss `slist' + } + local len_p=1 + local len_s=1 + if ("`pwc'"=="T"|"`pwc'"=="") local pwc `=`binsp'+1' `=`binss'+1' + } + + * 2nd case: select P (the special case with nbins() pselect() will be modified in the next step) + if ("`selection'"!="J" & ("`pwc'"=="T"|"`pwc'"=="")) local pselectOK "T" + + if (("`pselectOK'"=="T") & ("`bynbins'"!=""&"`bynbins'"!="T") & (`len_p'>1|`len_s'>1)) { + local selection "P" + } + + * 3rd case: user-specified J and p + if ((`len_p'<=1&`len_s'<=1) & "`selection'"!="J") { + local selection "NA" + if ("`pwc'"=="") { + if ("`bins'"!="") local pwc `=`binsp'+1' `=`binss'+1' + else { + if (`len_p'==1&`len_s'==1) local pwc `=`plist'+1' `=`slist'+1' + else local pwc `=`deriv'+1' `=`deriv'+1' + } + } + } + + * exclude all other cases + if ("`selection'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + if ("`selection'"=="NA") local binselectmethod "User-specified" + else { + if ("`binsmethod'"=="DPI") local binselectmethod "IMSE-optimal plug-in choice" + if ("`binsmethod'"=="ROT") local binselectmethod "IMSE-optimal rule-of-thumb choice" + if ("`selection'"=="J") local binselectmethod "`binselectmethod' (select # of bins)" + if ("`selection'"=="P") local binselectmethod "`binselectmethod' (select degree and smoothness)" + } + + * option for comparison + tokenize `pwc' + local tsha_p "`1'" + local tsha_s "`2'" + if ("`tsha_p'"==""|"`tsha_p'"=="T") local tsha_p=. + if ("`tsha_s'"=="") local tsha_s `tsha_p' + + + * Add warnings about degrees for estimation and inference + if ("`selection'"=="J") { + if ("`tsha_p'"!=".") { + if (`tsha_p'<=`binsp') { + local tsha_p=`binsp'+1 + local tsha_s=`tsha_p' + di as text "Warning: Degree for pwc() has been changed. It must be greater than the degree for bin selection." + } + } + } + if ("`selection'"=="NA") { + di as text "Warning: Testing procedures are valid when nbins() is much larger than the IMSE-optimal choice." + } + + * mass check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + di as error "veryfew() not allowed for testing." + exit + } + + * extract dfcheck + if ("`dfcheck'"=="") local dfcheck 20 30 + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * evaluate at w from another dataset? + if (`"`at'"'!=`""'&`"`at'"'!=`"mean"'&`"`at'"'!=`"median"'&`"`at'"'!=`"0"') local atwout "user" + + * default for lp metric + if ("`lp'"=="") local lp "inf" + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + local sel_gtools "on" + } + else local sel_gtools "off" + + * use reghdfe? + if ("`absorb'"!="") { + capture which reghdfe + if (_rc) { + di as error "reghdfe not installed." + exit + } + } + + * error check + if (`tsha_p'<`tsha_s'|`binsp'<`binss') { + di as error "p cannot be smaller than s." + exit + } + if ("`tsha_p'"!="."&"`binsp'"!=".") { + if (`tsha_p'<=`binsp') { + di as text in gr "Warning: p for testing <= p for bins() not suggested." + } + } + if (`tsha_p'<`deriv') { + di as error "p for test cannot be smaller than deriv." + exit + } + if (`nsims'<2000|`simsgrid'<50) { + di as text "Note: A larger number random draws/evaluation points is recommended to obtain the final results." + } + + + * Mark sample + preserve + + * Parse varlist into y_var, x_var and w_var + tokenize `varlist' + fvrevar `1', tsonly + local y_var "`r(varlist)'" + fvrevar `2', tsonly + local x_var "`r(varlist)'" + + macro shift 2 + local w_var "`*'" + + * read eval point for w from another file + if ("`atwout'"=="user") { + append using `at' + } + + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" + local nwvar: word count `w_var' + + * Save the last obs in a vector and then drop it + tempname wuser /* a vector used to keep eval for w */ + if ("`atwout'"=="user") { + mata: st_matrix("`wuser'", st_data(`=_N', "`w_var'")) + qui drop in `=_N' + } + + * Get positions of factor vars + local indexlist "" + local i = 1 + foreach v in `w_var' { + if strpos("`v'", ".") == 0 { + local indexlist `indexlist' `i' + } + local ++i + } + + * add a default for at + if (`"`at'"'==""&`nwvar'>0) { + local at "mean" + } + + marksample touse /* now renew the mark to account for missing values */ + markout `touse' `by', strok + qui keep if `touse' + local eN=_N + *local nsize=_N /* # of rows in the original dataset */ + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") sort `x_var', stable + } + + ************************************************************* + * Check number of unique byvals & create local storing byvals + local byvarname `by' + capture confirm numeric variable `by' + if _rc { + local bystring "T" + * generate a numeric version + tempvar by + tempname bylabel + qui egen `by'=group(`byvarname'), lname(`bylabel') + } + + local bylabel `:value label `by'' /* catch value labels for numeric by-vars too */ + + tempname byvalmatrix + qui tab `by', nofreq matrow(`byvalmatrix') + + * save by-value in a local and calculate group mins and maxs + local bynum=r(r) + if (`bynum'==1) { + di as error "More than one group is required." + exit + } + tempname xminmat xmaxmat Nmat + matrix `xminmat'=J(`bynum', 1, .) + matrix `xmaxmat'=`xminmat' + matrix `Nmat'=`xminmat' + local Ntotal=0 + forvalues i=1/`bynum' { + local byv `=`byvalmatrix'[`i',1]' + local byvals `byvals' `byv' + if ("`wtype'"=="f") sum `x_var' if `by'==`byv' `wt', meanonly + else sum `x_var' if `by'==`byv', meanonly + mat `xminmat'[`i',1]=r(min) + mat `xmaxmat'[`i',1]=r(max) + mat `Nmat'[`i',1]=r(N) /* sample size, with wt */ + local Ntotal=`Ntotal'+r(N) + } + mata: st_local("Ntotal", strofreal(sum(st_matrix("`Nmat'")))) + + * define a common support for eval points + mata: st_local("max_xmin", strofreal(max(st_matrix("`xminmat'")))); /// + st_local("min_xmax", strofreal(min(st_matrix("`xmaxmat'")))); /// + st_local("xmin", strofreal(min(st_matrix("`xminmat'")))); /// + st_local("xmax", strofreal(max(st_matrix("`xmaxmat'")))) + + + * Temp name in MATA + tempname xvec yvec byvec cluvec binedges + mata: `xvec'=st_data(., "`x_var'"); `yvec'=st_data(.,"`y_var'"); `byvec'=.; `cluvec'=. + + ******************************************************* + *** Mass point counting ******************************* + tempname Ndistlist Nclustlist mat_imse_var_rot mat_imse_bsq_rot mat_imse_var_dpi mat_imse_bsq_dpi + mat `Ndistlist'=J(`bynum',1,.) + mat `Nclustlist'=J(`bynum',1,.) + * Matrices saving imse + mat `mat_imse_var_rot'=J(`bynum',1,.) + mat `mat_imse_bsq_rot'=J(`bynum',1,.) + mat `mat_imse_var_dpi'=J(`bynum',1,.) + mat `mat_imse_bsq_dpi'=J(`bynum',1,.) + + if (`bynum'>1) mata: `byvec'=st_data(.,"`by'") + if ("`clusterON'"=="T") mata: `cluvec'=st_data(.,"`clustervar'") + + ******************************************************** + ********** Bins, based on FULL sample ****************** + ******************************************************** + * knotlist: inner knot seq; knotlistON: local, knot available before loop + + tempname fullkmat /* matrix name for saving knots based on the full sample */ + + * Extract user-specified knot list + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + capture numlist "`binspos'", ascending + if (_rc==0) { + local knotlistON "T" + local knotlist `binspos' + local nbins_all: word count `knotlist' + local first: word 1 of `knotlist' + local last: word `nbins_all' of `knotlist' + if (`first'<=`max_xmin'|`last'>=`min_xmax') { + di as error "Inner knots specified out of allowed range." + exit + } + else { + local nbins_all=`nbins_all'+1 + local pos "user" + + foreach el of local knotlist { + mat `fullkmat'=(nullmat(`fullkmat') \ `el') + } + mat `fullkmat'=(`xmin' \ `fullkmat' \ `xmax') + } + } + else { + di as error "Numeric list incorrectly specified in binspos()." + exit + } + } + + * Bin selection using the whole sample if + if ("`selection'"!="NA" & "`samebinsby'"!="") { + local selectfullON "T" + } + + if ("`selectfullON'"=="T") { + local Ndist=. + if ("`massadj'"=="T") { + if ("`numdist'"!=""&"`numdist'"!=".") { + local Ndist=`numdist' + } + else { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xvec', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + } + local eN=min(`eN', `Ndist') + } + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if ("`numclust'"!=""&"`numclust'"!=".") { + local Nclust=`numclust' + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + local eN=min(`eN', `Nclust') /* effective sample size */ + } + + * Check effective sample size + if ("`binsp'"==".") local binspcheck=6 + else local binspcheck=`binsp' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`binspcheck'+1+`qrot')) { + * ROT inavailable, exit + di as error "Too few observations for bin selection." + exit + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `Ntotal'>5000) { + local randcut1k=max(5000/`Ntotal', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5000, 0.01n) observations if n>5000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') bins(`binsp' `binss') nbins() /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce_select' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins_all=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local nbins_all=e(nbinsdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`nbins_all'==.) { + local nbins_all=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_all') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce_select' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + local tsha_p=`binsp'+1 + local tsha_s=`binss'+1 + } + } + } + + if ("`selectfullON'"=="T"|("`selection'"=="NA"&"`samebinsby'"!="")) { + * Save in a knot list + local knotlistON "T" + if ("`binspos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins_all' + forvalues i=1/`=`nbins_all'+1' { + mat `fullkmat'=(nullmat(`fullkmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else if ("`binspos'"=="QS") { + if (`nbins_all'==1) mat `fullkmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `wt', nq(`nbins_all') `usegtools' + mat `fullkmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + + *** Placement name, for display ************ + if ("`pos'"=="user") { + local binselectmethod "User-specified" + local placement "User-specified" + } + else if ("`binspos'"=="ES") { + local placement "Evenly-spaced" + } + else if ("`binspos'"=="QS") { + local placement "Quantile-spaced" + } + + + ******************************************************** + * Set seed + if ("`simsseed'"!="") set seed `simsseed' + + * generate eval points + tempname Xm uni_grid uni_grid_bin uni_basis num denom nummat tstat pmat xsub ysub byindex xcatsub /* objects in MATA */ + mata: `tstat'=J(`=`bynum'*(`bynum'-1)/2',3,.); `pmat'=J(`=`bynum'*(`bynum'-1)/2',1,.) + + tempname Xm0 fit fit0 se vcov + mata: `Xm0'=.; `fit'=.; `fit0'=0; `se'=.; `vcov'=. + + tempvar xcat bycond + qui gen `xcat'=. in 1 + qui gen `bycond'=. in 1 + + * matrix names, for returns + tempname nbinslist teststat pvalue pwc_plist pwc_slist + + * prepare grid + mata: `uni_grid'=rangen(`max_xmin', `min_xmax', `simsgrid'+2); /// + `uni_grid'=`uni_grid'[|2 \ `=`simsgrid'+1'|] /* only keep inner points, simsgrid>=1 */ + + * adjust w vars + tempname wval + if (`nwvar'>0) { + if (`"`at'"'==`"mean"'|`"`at'"'==`"median"') { + matrix `wval'=J(1, `nwvar', 0) + tempname wvaltemp mataobj + mata: `mataobj'=. + foreach wpos in `indexlist' { + local wname: word `wpos' of `w_var' + if ("`usegtools'"=="") { + if ("`wtype'"!="") qui tabstat `wname' [aw`exp'], stat(`at') save + else qui tabstat `wname', stat(`at') save + mat `wvaltemp'=r(StatTotal) + } + else { + qui gstats tabstat `wname' `wt', stat(`at') matasave("`mataobj'") + mata: st_matrix("`wvaltemp'", `mataobj'.getOutputCol(1)) + } + mat `wval'[1,`wpos']=`wvaltemp'[1,1] + } + mata: mata drop `mataobj' + } + else if (`"`at'"'==`"0"') { + matrix `wval'=J(1,`nwvar',0) + } + else if ("`atwout'"=="user") { + matrix `wval'=`wuser' + } + } + + * define a w vector (possibly a constant) in MATA + tempname wvec wvec0 + mata: `wvec'=J(1,0,.); `wvec0'=J(1,0,.) + if (`nwvar'>0) { + mata: `wvec0'=st_matrix("`wval'") + if (`deriv'==0&"`asyvar'"=="off") mata: `wvec'=(`wvec', `wvec0') + else mata: `wvec'=(`wvec', J(1,`nwvar',0)) + } + if ("`estmethod'"=="qreg"|"`estmethod'"=="reghdfe") { + mata: `wvec0'=(`wvec0', 1) + if (`deriv'==0) mata: `wvec'=(`wvec', 1) + else mata: `wvec'=(`wvec', 0) + } + + + local byvalnamelist "" /* save group name (value) */ + local counter=1 + local counter2=1 + *************************************************************************** + ******************* Now, enter the loop *********************************** + *************************************************************************** + foreach byval in `byvals' { + local conds "if `by'==`byval'" /* with "if" */ + qui replace `bycond'=(`by'==`byval') + + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + local byvalnamelist `" `byvalnamelist' `"`byvalname'"' "' + + mata: `byindex'=`byvec':==`byval' + mata: `xsub'=select(`xvec',`byindex'); `ysub'=select(`yvec', `byindex') + + ************************************ + * Calculate various sample sizes + * Subsample size + if ("`wtype'"=="f") sum `x_var' `conds' `wt', meanonly + else sum `x_var' `conds', meanonly + + local xmin=r(min) + local xmax=r(max) + local N=r(N) + + * Effective sample size + if ("`wtype'"!="f") local eN=r(N) + else { + qui count `conds' + local eN=r(N) + } + + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xsub', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' `conds' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + mat `Ndistlist'[`counter',1]=`Ndist' + } + + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(select(`cluvec', `byindex'))))) + } + else { + qui gunique `clustervar' `conds' + local Nclust=r(unique) + } + local eN=min(`eN', `Nclust') /* effective SUBsample size */ + mat `Nclustlist'[`counter',1]=`Nclust' + } + + ********************************************************* + ************** Prepare bins, within loop **************** + ********************************************************* + if ("`pos'"!="user") local pos `binspos' /* initialize pos */ + + * Selection? + local nbins "" + if (`lenbynbins'>1) local nbins: word `counter' of `bynbins' + if ("`nbins_all'"!="") local nbins=`nbins_all' /* add the universal nbins */ + + if ("`selection'"!="NA"&"`knotlistON'"!="T") { + * Check effective sample size + if ("`binsp'"==".") local binspcheck=6 + else local binspcheck=`binsp' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`binspcheck'+1+`qrot')) { + di as error "Too few observations for bin selection." + exit + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `N'>5000) { + local randcut1k=max(5000/`N', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5000, 0.01n) observations if n>5000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + bins(`binsp' `binss') nbins() /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`pos') nbinsrot(`nbinsrot') /// + `vce_select' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_bsq_dpi'[`counter',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter',1]=e(imse_var_dpi) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce_select' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_bsq_dpi'[`counter',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter',1]=e(imse_var_dpi) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + local tsha_p=`binsp'+1 + local tsha_s=`binss'+1 + } + } + } + + ******************************************************* + * Check if eff. sample size is large enough for testing + if ((`nbins'-1)*(`tsha_p'-`tsha_s'+1)+`tsha_p'+1+`dfcheck_n2'>=`eN') { + di as text in gr "Warning: Too small effective sample size for testing shape." + } + + * Generate category variable for data and save knot in matrix + tempname kmat + if ("`knotlistON'"=="T") { + mat `kmat'=`fullkmat' + } + else { + if ("`pos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + + * Renew knot list + mata: st_matrix("`kmat'", (`xmin' \ uniqrows(st_matrix("`kmat'")[|2 \ `=`nbins'+1'|]))) + if (`nbins'!=rowsof(`kmat')-1) { + di as text in gr "Warning: Repeated knots. Some bins dropped." + local nbins=rowsof(`kmat')-1 + } + binsreg_irecode `x_var' `conds', knotmat(`kmat') bin(`xcat') /// + `usegtools' nbins(`nbins') pos(`pos') knotliston(`knotlistON') + + mata: `xcatsub'=st_data(., "`xcat'") + mata: `xcatsub'=select(`xcatsub', `byindex') + + * Now, save nbins, p and s in a matrix !!! + mat `nbinslist'=(nullmat(`nbinslist') \ `nbins') + mat `pwc_plist'=(nullmat(`pwc_plist') \ `tsha_p') + mat `pwc_slist'=(nullmat(`pwc_slist') \ `tsha_s') + + * Check for empty bins + if ("`localcheck'"=="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(`xcatsub')))) + if (`nbins'==`Ncat') { + mata: `binedges'=binsreg_uniq(`xsub', `xcatsub', `nbins', "uniqmin") + mata: mata drop `binedges' + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins. Specify a smaller number in nbins()." + } + + if (`uniqmin'<`tsha_p'+1) { + di as text in gr "Warning: Some bins have too few distinct x-values for testing." + } + } + + ************************************************************ + ************************************************************ + * Regression + local nseries=(`tsha_p'-`tsha_s'+1)*(`nbins'-1)+`tsha_p'+1 + local tsha_series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local tsha_series `tsha_series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + tempname tsha_b tsha_V + mata: binsreg_st_spdes(`xsub', "`tsha_series'", "`kmat'", `xcatsub', `tsha_p', 0, `tsha_s', "`bycond'") + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + capture `estcmd' `y_var' `tsha_series' `w_var' `wt', nocon `vce' `estmethodopt' + } + else if ("`estmethod'"=="qreg") { + if ("`boot'"=="on") capture bsqreg `y_var' `tsha_series' `w_var', quantile(`quantile') reps(`reps') + else capture qreg `y_var' `tsha_series' `w_var' `wt', quantile(`quantile') `vce' `estmethodopt' + } + else { + capture `estcmd' `y_var' `tsha_series' `w_var' `wt', absorb(`absorb') `reghdfeopt' `vce' + } + + * store results + if (_rc==0) { + matrix `tsha_b'=e(b) + matrix `tsha_V'=e(V) + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") mata: binsreg_checkdrop("`tsha_b'", "`tsha_V'", `nseries') + else mata: binsreg_checkdrop("`tsha_b'", "`tsha_V'", `nseries', "T") + matrix `tsha_b'=`tsha_b'' + } + else { + error _rc + exit _rc + } + + * Predict + mata: `uni_grid_bin'`counter'=binspwc_locate(`uni_grid', st_matrix("`kmat'")) + + * fitted values + mata: `uni_basis'=binsreg_spdes(`uni_grid', "`kmat'", `uni_grid_bin'`counter', `tsha_p', `deriv', `tsha_s') + if (("`estmethod'"=="logit"|"`estmethod'"=="probit")&"`transform'"=="T") { + if (`deriv'==0) { + mata: `fit0'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec0')*st_matrix("`tsha_b'") + if ("`estmethod'"=="logit") { + mata: `fit'=logistic(`fit0'); /// + `se'=logisticden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tsha_V'"),"se")[,2] + } + else { + mata: `fit'=normal(`fit0'); /// + `se'=normalden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tsha_V'"),"se")[,2] + } + } + if (`deriv'==1) { + mata: `Xm0'=binsreg_spdes(`uni_grid', "`kmat'", `uni_grid_bin'`counter', `tsha_p', 0, `tsha_s'); /// + `Xm0'=(`Xm0', J(rows(`Xm0'),1,1)#`wvec0'); /// + `fit0'=`Xm0'*st_matrix("`tsha_b'"); /// + `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec') + if ("`estmethod'"=="logit") { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tsha_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata: `Xm'=logisticden(`fit0'):*(1:-2*logistic(`fit0')):*`fit':*`Xm0' + /// + logisticden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tsha_V'")):*`Xm')) + } + else { + mata: `se'=logisticden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tsha_V'"),"se")[,2]) + } + mata: `fit'=logisticden(`fit0'):*`fit' + } + else { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tsha_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata:`Xm'=(-`fit0'):*normalden(`fit0'):*`fit':*`Xm0' + /// + normalden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tsha_V'")):*`Xm')) + } + else { + mata: `se'=normalden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tsha_V'"),"se")[,2]) + } + mata: `fit'=normalden(`fit0'):*`fit' + } + } + mata: `Xm'=(`fit', `se') + } + else { + mata: `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'); /// + `Xm'=binsreg_pred(`Xm', st_matrix("`tsha_b'"), st_matrix("`tsha_V'"), "all") + } + + + * num: fitted value; denom: standard error + mata: `num'`counter'=`Xm'[,1]; /// + `denom'`counter'=`Xm'[,2] + + * For p value + if ("`estmethod'"=="qreg"|"`estmethod'"=="reghdfe") { + if (`deriv'==0) mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,1)) + else mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,0)) + mata: `vcov'=st_matrix("`tsha_V'"); /// + `vcov'= (`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + st_matrix("`vcov'", `vcov') + } + + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + mata: `se'`counter'=binsreg_pred(`uni_basis', ., st_matrix("`tsha_V'")[|1,1 \ `nseries',`nseries'|], "se")[,2]; /// + `nummat'`counter'=binspwc_nummat(`uni_basis', "`tsha_V'", `nseries') + } + else { + mata: `se'`counter'=binsreg_pred(`uni_basis', ., `vcov', "se")[,2]; /// + `nummat'`counter'=binspwc_nummat(`uni_basis', "`vcov'", `=`nseries'+1') + } + + + * pairwise comparison + if (`counter'>1) { + forval gr=1/`=`counter'-1' { + * calculate test stat + if ("`testtype'"=="l") { + mata: `tstat'[`counter2',.]=(max((`num'`counter'-`num'`gr'):/ /// + sqrt((`denom'`counter':^2)+(`denom'`gr':^2))), `counter', `gr') + } + else if ("`testtype'"=="r") { + mata: `tstat'[`counter2',.]=(min((`num'`counter'-`num'`gr'):/ /// + sqrt((`denom'`counter':^2)+(`denom'`gr':^2))), `counter', `gr') + } + else { + if ("`lp'"=="inf") { + mata: `tstat'[`counter2',.]=(max(abs((`num'`counter'-`num'`gr'):/ /// + sqrt((`denom'`counter':^2)+(`denom'`gr':^2)))), `counter', `gr') + } + else { + mata: `tstat'[`counter2',.]=(mean(((`num'`counter'-`num'`gr'):/ /// + sqrt((`denom'`counter':^2)+(`denom'`gr':^2))):^`lp')^(1/`lp'), `counter', `gr') + } + } + + * calculate p val + mata: `pmat'[`counter2',1]=binspwc_pval(`nummat'`counter', `nummat'`gr', `se'`counter', `se'`gr', /// + `tstat'[`counter2',1], `nsims', "`testtype'", "`lp'") + + local ++counter2 + } + } + + drop `tsha_series' + + local ++counter + + } + + mata: st_matrix("`teststat'", `tstat'); st_matrix("`pvalue'", `pmat') + + * drop objects in MATA + mata: mata drop `Xm' `uni_grid' `uni_basis' `tstat' `pmat' `xsub' `ysub' `byindex' `xcatsub' /// + `xvec' `yvec' `byvec' `cluvec' `Xm0' `fit' `fit0' `se' `vcov' `wvec' `wvec0' + + mata: mata drop `uni_grid_bin'* `num'* `denom'* `nummat'* `se'* + + + ****************************** + ******* Display ************** + ****************************** + di "" + di in smcl in gr "Pairwise group comparison based on binscatter estimates" + di in smcl in gr "Estimation method: `estmethod'" + di in smcl in gr "Derivative: `deriv'" + di in smcl in gr "Group variable: `byvarname'" + di in smcl in gr "Bin/Degree selection method: `binselectmethod'" + di in smcl in gr "Placement: `placement'" + + *di in smcl in gr "{hline 30}{c TT}{hline 15}" + *di in smcl in gr "{lalign 1:Bin/Degree selection:}" _col(30) " {c |} " + *di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `binsp' + *di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `binss' + *di in smcl in gr "{hline 30}{c +}{hline 15}" + *di in smcl in gr "{lalign 1:Hypothesis test:}" _col(30) " {c |} " + *di in smcl in gr "Pairwise Group Comparison, by `byvarname':" + *di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `tsha_p' + *di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `tsha_s' + *di in smcl in gr "{hline 30}{c BT}{hline 15}" + + + forval i=1/`=`counter2'-1' { + local g1=`=`teststat'[`i',2]' + local g2=`=`teststat'[`i',3]' + local group1: word `g1' of `byvalnamelist' + local group2: word `g2' of `byvalnamelist' + + di "" + di in smcl in gr "Group `group1' vs. Group `group2'" + di in smcl in gr "{hline 30}{c TT}{hline 10}{c TT}{hline 10}" + di in smcl in gr "{lalign 1:Group `byvarname'=}" _col(30) " {c |}" _col(38) "`group1'" _col(42) "{c |}" _col(48) "`group2'" + di in smcl in gr "{hline 30}{c +}{hline 10}{c +}{hline 10}" + di in smcl in gr "{lalign 1:# of observations}" _col(30) " {c |} " _col(32) as result %7.0f `=`Nmat'[`g1',1]' _col(42) in gr "{c |}" _col(44) as result %7.0f `=`Nmat'[`g2',1]' + di in smcl in gr "{lalign 1:# of distinct values}" _col(30) " {c |} " _col(32) as result %7.0f `=`Ndistlist'[`g1',1]' _col(42) in gr "{c |}" _col(44) as result %7.0f `=`Ndistlist'[`g2',1]' + di in smcl in gr "{lalign 1:# of clusters}" _col(30) " {c |} " _col(32) as result %7.0f `=`Nclustlist'[`g1',1]' _col(42) in gr "{c |}" _col(44) as result %7.0f `=`Nclustlist'[`g2',1]' + di in smcl in gr "{lalign 1:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `=`pwc_plist'[`g1',1]' _col(42) in gr "{c |}" _col(44) as result %7.0f `=`pwc_plist'[`g2',1]' + di in smcl in gr "{lalign 1:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `=`pwc_slist'[`g1',1]' _col(42) in gr "{c |}" _col(44) as result %7.0f `=`pwc_slist'[`g2',1]' + di in smcl in gr "{lalign 1:# of bins}" _col(30) " {c |} " _col(32) as result %7.0f `=`nbinslist'[`g1',1]' _col(42) in gr "{c |}" _col(44) as result %7.0f `=`nbinslist'[`g2',1]' + di in smcl in gr "{hline 30}{c BT}{hline 21}" + + di "" + di in smcl in gr "diff = group `group1' - group `group2'" + if ("`testtype'"=="l") { + di in smcl in gr "{hline 19}{c TT}{hline 30}" + di in smcl in gr "H0:" _col(20) in gr /// + "{c |}" _col(22) "sup T" _col(40) "p value" + di in smcl in gr "{hline 19}{c +}{hline 30}" + local stat=`teststat'[`i',1] + local pval=`pvalue'[`i',1] + di in smcl in gr "diff<=0" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + else if ("`testtype'"=="r") { + di in smcl in gr "{hline 19}{c TT}{hline 30}" + di in smcl in gr "H0:" _col(20) in gr /// + "{c |}" _col(22) "inf T" _col(40) "p value" + di in smcl in gr "{hline 19}{c +}{hline 30}" + local stat=`teststat'[`i',1] + local pval=`pvalue'[`i',1] + di in smcl in gr "diff>=0" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + else { + di in smcl in gr "{hline 19}{c TT}{hline 30}" + if ("`lp'"=="inf") { + di in smcl in gr "H0:" _col(20) in gr /// + "{c |}" _col(22) "sup |T|" _col(40) "p value" + } + else { + di in smcl in gr "H0:" _col(20) in gr /// + "{c |}" _col(22) "L`lp' of T" _col(40) "p value" + } + di in smcl in gr "{hline 19}{c +}{hline 30}" + local stat=`teststat'[`i',1] + local pval=`pvalue'[`i',1] + di in smcl in gr "diff=0" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + di "" + } + + ******************************************************* + + ********** Return *************** + ********************************* + ereturn clear + * # of observations + ereturn scalar N=`Ntotal' + ereturn scalar p=`binsp' + ereturn scalar s=`binss' + + * by pair + ereturn matrix pval=`pvalue' + ereturn matrix stat=`teststat' + + * by group: + ereturn matrix nbins_by=`nbinslist' + ereturn matrix pwc_plist=`pwc_plist' + ereturn matrix pwc_slist=`pwc_slist' + ereturn matrix Nclust_by=`Nclustlist' + ereturn matrix Ndist_by=`Ndistlist' + ereturn matrix N_by=`Nmat' + + ereturn matrix imse_var_rot=`mat_imse_var_rot' + ereturn matrix imse_bsq_rot=`mat_imse_bsq_rot' + ereturn matrix imse_var_dpi=`mat_imse_var_dpi' + ereturn matrix imse_bsq_dpi=`mat_imse_bsq_dpi' + + + * local: corresponding by-values + ereturn local byvalue `byvalnamelist' +end + +mata: + // calculate numerator matrix for simulation + real matrix binspwc_nummat(real matrix X, string scalar covname, real scalar k) + { + real matrix cov, num, U, V, sv + + cov=st_matrix(covname)[|1,1\k,k|] + if (rank(cov)==k) { + num=X*cholesky(cov) + } + else { + svd(cov, U=., sv=., V=.) + pragma unused V + num=X*U*diag(sv:^0.5)*U' + } + + return(num) + } + + + // calculate p val for pairwise comparison + real scalar binspwc_pval(real matrix nummatA, real matrix nummatB, /// + real vector denomA, real vector denomB, + real scalar stat, real scalar rep, /// + string scalar type, string scalar metric) + { + real scalar kA, kB, i, lp, pval + real vector t + + kA=cols(nummatA) + kB=cols(nummatB) + if (metric!="inf") { + lp=strtoreal(metric) + } + pval=0 + + for (i=1; i<=rep; i++) { + t=(nummatA*rnormal(kA,1,0,1)-nummatB*rnormal(kB,1,0,1)):/sqrt(denomA:^2+denomB:^2) + if (type=="l") { + pval=pval+(max(t)>=stat) + } + else if (type=="r") { + pval=pval+(min(t)<=stat) + } + else { + if (metric=="inf") { + pval=pval+(max(abs(t))>=stat) + } + else { + pval=pval+(mean(abs(t):^lp)^(1/lp)>=stat) + } + } + } + + return(pval/rep) + } + + + // locate a point + real vector binspwc_locate(real vector x, real vector kmat) + { + real vector bin, index + real scalar n, nbin, i, lb, ub + + n=rows(x) + nbin=rows(kmat)-1 + bin=J(n,1,.) + for (i=1; i<=nbin; i++) { + lb=kmat[i,1] + ub=kmat[i+1,1] + if (i=lb):&(x:=lb):&(x:<=ub)) + bin[index]=J(rows(index),1,i) + } + } + + return(bin) + } + +end diff --git a/110/replication_package/replication/ado/plus/b/binspwc.sthlp b/110/replication_package/replication/ado/plus/b/binspwc.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..bf6bdd2810db504dd6c09513258b3dea7b996d6c --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binspwc.sthlp @@ -0,0 +1,327 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binspwc##syntax"}{...} +{viewerjumpto "Description" "binspwc##description"}{...} +{viewerjumpto "Options" "binspwc##options"}{...} +{viewerjumpto "Examples" "binspwc##examples"}{...} +{viewerjumpto "Stored results" "binspwc##stored_results"}{...} +{viewerjumpto "References" "binspwc##references"}{...} +{viewerjumpto "Authors" "binspwc##authors"}{...} +{cmd:help binspwc} +{hline} + +{title:Title} + +{p 4 8}{hi:binspwc} {hline 2} Data-Driven Nonparametric Pairwise Group Comparison using Binscatter.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 12} {cmdab:binspwc} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} {cmd:,} {opt by(varname)} [{p_end} +{p 12 12} {opt estmethod(cmdname)} {opt deriv(v)} {opt at(position)} {opt nolink}{p_end} +{p 12 12} {opt absorb(absvars)} {opt reghdfeopt(reghdfe_option)}{p_end} +{p 12 12} {opt pwc(pwcopt)} {opt testtype(type)} {opt lp(metric)}{p_end} +{p 12 12} {opt bins(p s)} {opt bynbins(bynbinsopt)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt samebinsby} {opt randcut(#)}{p_end} +{p 12 12} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 12 12} {opt nsims(#)} {opt simsgrid(#)} {opt simsseed(seed)}{p_end} +{p 12 12} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 12 12} {cmd:vce(}{it:{help vcetype}}{cmd:)} {opt asyvar(on/off)} {opt estmethodopt(cmd_option)} {opt usegtools(on/off)} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} {opt fweight}s, {opt aweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binspwc} implements binscatter-based hypothesis testing procedures for pairwise group comparison of binscatter estimators, following the results in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":Cattaneo, Crump, Farrell and Feng (2022a)}. +If the binning scheme is not set by the user, the companion command {help binsregselect:binsregselect} is used to implement binscatter +in a data-driven (optimal) way and inference procedures are based on robust bias correction. +Binned scatter plots based on different models can be constructed using the companion commands {help binsreg:binsreg}, +{help binsqreg: binsqreg}, {help binslogit:binslogit} and {help binsprobit:binsprobit}. +{p_end} + +{p 4 8} A detailed introduction to this command is given in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Cattaneo, Crump, Farrell and Feng (2022b)}. +Companion R and Python packages with the same capabilities are available (see website below). +{p_end} + +{p 4 8} Companion commands: {help binsreg:binsreg} for binscatter least squares regression with robust inference procedures and plots, +{help binsqreg:binsqreg} for binscatter quantile regression with robust inference procedures and plots, +{help binslogit:binslogit} for binscatter logit estimation with robust inference procedures and plots, +{help binsprobit:binsprobit} for binscatter probit estimation with robust inference procedures and plots, and +{help binsregselect:binsregselect} for data-driven (optimal) binning selection.{p_end} + +{p 4 8} Related Stata, R and Python packages are available in the following website:{p_end} + +{p 8 8} {browse "https://nppackages.github.io/":https://nppackages.github.io/}{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt by(varname)} specifies the variable containing the group indicator to perform subgroup analysis; both numeric and string variables are supported. +When {opt by(varname)} is specified, {cmdab:binspwc} implements estimation for each subgroup separately and then conduct {it:all} pairwise comparison tests. +By default, the binning structure is selected for each subgroup separately, but see the option +{cmd:samebinsby} below for imposing a common binning structure across subgroups. +This option is required. +{p_end} + +{p 4 8} {opt estmethod(cmdname)} specifies the binscatter model. The default is {cmd:estmethod(reg)}, +which corresponds to the binscatter least squares regression. Other options are: +{cmd:estmethod(qreg #)} for binscatter quantile regression where # is the quantile to be estimated, +{cmd:estmethod(logit)} for binscatter logistic regression and {cmd:estmethod(probit)} for binscatter probit regression. +{p_end} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation, testing and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself. +{p_end} + +{p 4 8} {opt at(position)} specifies the values of {it:othercovs} at which the estimated function is evaluated for plotting. +The default is {cmd:at(mean)}, which corresponds to the mean of {it:othercovs}. Other options are: {cmd:at(median)} for the +median of {it:othercovs}, {cmd:at(0)} for zeros, and {cmd:at(filename)} for particular values of {it:othercovs} saved in another file. +{p_end} + +{p 4 8} Note: When {cmd:at(mean)} or {cmd:at(median)} is specified, all factor variables in {it:othercovs} (if specified) +are excluded from the evaluation (set as zero). +{p_end} + +{p 4 8}{opt nolink} specifies that the function within the inverse link (logistic) function be reported instead of +the conditional probability function. This option is used only if logit or probit model is specified in {cmd:estmethod()}. +{p_end} + +{dlgtab:Reghdfe} + +{p 4 8} {opt absorb(absvars)} specifies categorical variables (or interactions) representing the fixed effects to be absorbed. +This is equivalent to including an indicator/dummy variable for each category of each {it:absvar}. +When {cmd:absorb()} is specified, the community-contributed command {cmd:reghdfe} instead of the command {cmd:regress} is used. +{p_end} + +{p 4 8} {opt reghdfeopt(reghdfe_option)} options to be passed on to the command {cmd:reghdfe}. +Important: {cmd:absorb()} and {cmd:vce()} should not be specified within this option. +{p_end} + +{p 4 8} For more information about the community-contributed command {cmd:reghdfe}, +please see {browse "http://scorreia.com/software/reghdfe/":http://scorreia.com/software/reghdfe/}. + +{dlgtab:Pairwise Group Comparison Testing} + +{p 4 8} {opt pwc(pwcopt)} sets the degree of polynomial and the number of smoothness constraints +for pairwise group comparison. If {cmd:pwc(p s)} is specified, a piecewise polynomial of degree +{it:p} with {it:s} smoothness constraints is used. +If {cmd:pwc(T)} or {cmd:pwc()} is specified, +{cmd:pwc(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +The default is {cmd:pwc()}. +{p_end} + +{p 4 8} {opt testtype(type)} specifies the type of pairwise comparison test. The default is {opt testtype(2)}, +which corresponds to a two-sided test of the form H0: {it:mu_1(x)=mu_2(x)}. Other options are: {opt testtype(l)} +for the one-sided test of the form H0: {it:mu_1(x)<=mu_2(x)} and {opt testtype(r)} for the one-sided test of the form H0: {it:mu_1(x)>=mu_2(x)}. +{p_end} + +{p 4 8} {opt lp(metric)} specifies an Lp metric used for a (two-sided) test for the difference between two groups. The default is {cmd:lp(inf)}, +which corresponds to the sup-norm. Other options are {cmd:Lp(q)} for a positive integer {cmd:q}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt bins(p s)} sets a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints for data-driven (IMSE-optimal) +selection of the partitioning/binning scheme. +The default is {cmd:bins(0 0)}, which corresponds to the piecewise constant. + +{p 4 8} {opt bynbins(bynbinsopt)} sets the number of bins for partitioning/binning of {it:indvar}. +If {cmd:bynbins(}{help numlist}{cmd:)} is specified, the number in the {help numlist} +is applied to the binscatter estimation for each group. The ordering of the group follows +the result of {help tabulate oneway:tabulate}. If a single number of bins is specified, it applies to the estimation for all groups. +If {cmd:bynbins(T)} or {cmd:bynbins()} (default) is specified, the number of bins is selected via the companion command +{help binsregselect:binsregselect} in a data-driven, optimal way whenever possible. +{p_end} + +{p 4 8} Note: If a {it:numlist} with more than one number is supplied within {cmd:bynbins()}, it is understood as the number of bins applied to binscatter estimation for each subgroup rather than the range for selecting the number of bins. +{p_end} + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other options are: {cmd:es} for evenly-spaced binning, or a {help numlist} for manual specification of +the positions of inner knots (which must be within the range of {it:indvar}). +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins via +the companion command {help binsregselect:binsregselect}. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt samebinsby} forces a common partitioning/binning structure across all subgroups specified by the option {cmd:by()}. +The knots positions are selected according to the option {cmd:binspos()} and using the full sample. +If {cmd:nbins()} is not specified, then the number of bins is selected via the companion +command {help binsregselect:binsregselect} and using the full sample.{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample +for bins/degree/smoothness selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. +By default, max(5,000, 0.01n) observations are used if the samples size n>5,000. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} +for point estimation is selected. If the selected optimal degree is {it:p}, +then piecewise polynomials of degree {it:p+1} are used to conduct pairwise +group comparison. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which the number of smoothness constraints {it:s} +for point estimation is selected. If the selected optimal smoothness is {it:s}, +then piecewise polynomials with {it:s+1} smoothness constraints are used to conduct pairwise +group comparison. +If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:bynbins(}{help numlist}{cmd:)} must be specified. +{p_end} + +{dlgtab:Simulation} + +{p 4 8} {opt nsims(#)} specifies the number of random draws for hypothesis testing. +The default is {cmd:nsims(500)}, which corresponds to 500 draws from a standard Gaussian random vector of size [(p+1)*J - (J-1)*s]. +A large number of random draws is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid +within each bin used for evaluation of the supremum (infimum or Lp metric) operation needed to +construct confidence bands and hypothesis testing procedures. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points +within each bin for approximating the supremum (infimum or Lp metric) operator. +A large number of evaluation points is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsseed(#)} sets the seed for simulations. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, +which take into account the number of unique values of {it:indvar} (i.e., adjusting for the number of mass points), +number of clusters, and degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Other Options} + +{p 4 8} {cmd:vce(}{it:{help vcetype}}{cmd:)} specifies the {it:vcetype} for variance estimation used by +the commands {help regress##options:regress}, +{help logit##options:logit}, {help logit##options:logit}, {help qreg##qreg_options:qreg} or {cmd:reghdfe}. +The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8} {opt asyvar(on/off)} specifies the method used to compute standard errors. +If {cmd:asyvar(on)} is specified, the standard error of the nonparametric component is used and the uncertainty +related to other control variables {it:othercovs} is omitted. +Default is {cmd:asyvar(off)}, that is, the uncertainty related to {it:othercovs} is taken into account. +{p_end} + +{p 4 8} {opt estmethodopt(cmd_option)} options to be passed on to the estimation command specified in {cmd:estmethod()}. +For example, options that control for the optimization process can be added here. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package +{cmd:gtools} to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto}{p_end} + +{p 4 8} Generate two groups{p_end} +{p 8 8} . {stata gen group=price>5000}{p_end} + +{p 4 8} Test for the difference between two groups{p_end} +{p 8 8} . {stata binspwc mpg weight foreign, by(group)}{p_end} + + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 17 tabbed}{...} +{p2col 5 17 21 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(p)}}degree of polynomial for bin selection{p_end} +{synopt:{cmd:e(s)}}smoothness of polynomial for bin selection{p_end} +{synopt:{cmd:e(pwc_p)}}degree of polynomial for testing{p_end} +{synopt:{cmd:e(pwc_s)}}smoothness of polynomial for testing{p_end} +{p2col 5 17 21 2: Macros}{p_end} +{synopt:{cmd:e(byvalue)}}name of groups found in {cmd:by()}{p_end} +{p2col 5 17 21 2: Matrices}{p_end} +{synopt:{cmd:e(N_by)}}number of observations for each group{p_end} +{synopt:{cmd:e(Ndist_by)}}number of distinct values for each group{p_end} +{synopt:{cmd:e(Nclust_by)}}number of clusters for each group{p_end} +{synopt:{cmd:e(nbins_by)}}number of bins for each group{p_end} +{synopt:{cmd:e(stat)}}test statistics for all pairwise comparisons{p_end} +{synopt:{cmd:e(pval)}}p values for all pairwise comparisons{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/b/binsqreg.ado b/110/replication_package/replication/ado/plus/b/binsqreg.ado new file mode 100644 index 0000000000000000000000000000000000000000..11173c518677b724b48ca963280f57b33ac23e55 --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsqreg.ado @@ -0,0 +1,2392 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binsqreg +program define binsqreg, eclass + version 13 + + syntax varlist(min=2 numeric fv ts) [if] [in] [fw pw] [, /// + quantile(numlist max=1 >0 <1) deriv(integer 0) at(string asis) /// + qregopt(string asis) /// + dots(string) dotsgrid(string) dotsplotopt(string asis) /// + line(string) linegrid(integer 20) lineplotopt(string asis) /// + ci(string) cigrid(string) ciplotopt(string asis) /// + cb(string) cbgrid(integer 20) cbplotopt(string asis) /// + polyreg(string) polyreggrid(integer 20) polyregcigrid(integer 0) polyregplotopt(string asis) /// + by(varname) bycolors(string asis) bysymbols(string asis) bylpatterns(string asis) /// + nbins(string) binspos(string) binsmethod(string) nbinsrot(string) /// + pselect(numlist integer >=0) sselect(numlist integer >=0) /// + samebinsby randcut(numlist max=1 >=0 <=1) /// + nsims(integer 500) simsgrid(integer 20) simsseed(numlist integer max=1 >=0) /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) level(real 95) asyvar(string) /// + noplot savedata(string asis) replace /// + plotxrange(numlist asc max=2) plotyrange(numlist asc max=2) *] + + ********************************************* + * Regularization constant (for checking only) + local qrot=2 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + ********************** + ** Extract options *** + ********************** + * default vce, clustered? + if ("`vce'"=="") local vce "vce(robust)" + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'", parse(", ") + if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"| /// + "`1'"=="cluste"|"`1'"=="cluster") { + local clusterON "T" /* Mark cluster is specified */ + local clustervar `2' + local vce "vce(robust)" + di as text in gr "Warning: vce(cluster) not allowed. vce(robust) used instead." + } + + * use bootstrap cmd? + if ("`1'"=="boot" | "`1'"=="bootstrap") { + local boot "on" + local repstemp `3' + if ("`repstemp'"=="") local repstemp reps(20) + local repstemp: subinstr local repstemp "reps(" "", all + local reps: subinstr local repstemp ")" "", all + if ("`weight'"!="") { + di as error "Weights not allowed for bootstrapping." + exit + } + } + else { + local boot "off" + } + + if ("`asyvar'"=="") local asyvar "off" + + * vce for bin selection purpose + if ("`vce'"=="vce(iid)") local vce_select "vce(ols)" + else local vce_select "vce(robust)" + + + * default for quantile() + if ("`quantile'"=="") local quantile=0.5 + + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`binsmethod'"=="") local binsmethod "DPI" + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binspos'"=="") local binspos "QS" + + + * analyze options related to degrees ************* + if ("`dots'"!="T"&"`dots'"!="F"&"`dots'"!="") { + numlist "`dots'", integer max(2) range(>=0) + local dots=r(numlist) + } + if ("`line'"!="T"&"`line'"!="F"&"`line'"!="") { + numlist "`line'", integer max(2) range(>=0) + local line=r(numlist) + } + if ("`ci'"!="T"&"`ci'"!="F"&"`ci'"!="") { + numlist "`ci'", integer max(2) range(>=0) + local ci=r(numlist) + } + if ("`cb'"!="T"&"`cb'"!="F"&"`cb'"!="") { + numlist "`cb'", integer max(2) range(>=0) + local cb=r(numlist) + } + + + if ("`dots'"=="F") { /* shut down dots */ + local dots "" + local dotsgrid 0 + } + if ("`line'"=="F") local line "" + if ("`ci'"=="F") local ci "" + if ("`cb'"=="F") local cb "" + + + *************************************************************** + * 4 cases: select J, select p, user specified both, and error + local selection "" + + * analyze nbins + if ("`nbins'"=="T") local nbins=0 + local len_nbins=0 + if ("`nbins'"!=""&"`nbins'"!="F") { + numlist "`nbins'", integer sort + local nbins=r(numlist) + local len_nbins: word count `nbins' + } + + * analyze numlist in pselect and sselect + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if (`len_p'==1&`len_s'==0) { + local slist `plist' + local len_s=1 + } + if (`len_p'==0&`len_s'==1) { + local plist `slist' + local len_p=1 + } + + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + if ("`nbins'"!=""|"`pselect'"!=""|"`sselect'"!="") { + di as error "nbins(), pselect() or sselect() incorrectly specified." + exit + } + } + + * 1st case: select J + if (("`nbins'"=="0"|`len_nbins'>1|"`nbins'"=="")&("`binspos'"=="ES"|"`binspos'"=="QS")) local selection "J" + if ("`selection'"=="J") { + if (`len_p'>1|`len_s'>1) { + if ("`nbins'"=="") { + di as error "nbins() must be specified for degree/smoothness selection." + exit + } + else { + di as error "only one p and one s are allowed to select # of bins." + exit + } + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + if ("`dots'"!=""&"`dots'"!="T"&"`dots'"!="F") { /* respect user-specified dots */ + local plist: word 1 of `dots' + local slist: word 2 of `dots' + if ("`slist'"=="") local slist `plist' + } + if ("`dots'"==""|"`dots'"=="T") local dots `plist' `slist' /* selection is based on dots */ + if ("`line'"=="T") local line `plist' `slist' + if ("`ci'"=="T") local ci `=`plist'+1' `=`slist'+1' + if ("`cb'"=="T") local cb `=`plist'+1' `=`slist'+1' + local len_p=1 + local len_s=1 + } /* e.g., binsreg y x, nbins(a b) or nbins(T) or pselect(a) nbins(T) */ + + + * 2nd case: select P (at least for one object) + if ("`selection'"!="J" & ("`dots'"==""|"`dots'"=="T"|"`line'"=="T"|"`ci'"=="T"|"`cb'"=="T")) { + local pselectOK "T" /* p selection CAN be turned on as long as one of the four is T */ + } + + if ("`pselectOK'"=="T" & `len_nbins'==1 & (`len_p'>1|`len_s'>1)) { + local selection "P" + } /* e.g., binsreg y x, pselect(a b) or pselect() dots(T) */ + + * 3rd case: completely user-specified J and p + if ((`len_p'<=1&`len_s'<=1) & "`selection'"!="J") { + local selection "NA" + if ("`dots'"==""|"`dots'"=="T") { + if (`len_p'==1&`len_s'==1) local dots `plist' `slist' + else local dots `deriv' `deriv' /* e.g., binsreg y x or , dots(0 0) nbins(20) */ + } + tokenize `dots' + if ("`2'"=="") local 2 `1' + if ("`line'"=="T") { + if (`len_p'==1&`len_s'==1) local line `plist' `slist' + else local line `dots' + } + if ("`ci'"=="T") { + if (`len_p'==1&`len_s'==1) local ci `=`plist'+1' `=`slist'+1' + else local ci `=`1'+1' `=`2'+1' + } + if ("`cb'"=="T") { + if (`len_p'==1&`len_s'==1) local cb `=`plist'+1' `=`slist'+1' + else local cb `=`1'+1' `=`2'+1' + } + } + + * exclude all other cases + if ("`selection'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + ****** Now, extract from dots, line, etc. ************ + * dots + tokenize `dots' + local dots_p "`1'" + local dots_s "`2'" + if ("`dots_p'"==""|"`dots_p'"=="T") local dots_p=. + if ("`dots_s'"=="") local dots_s `dots_p' + + if ("`dotsgrid'"=="") local dotsgrid "mean" + local dotsngrid_mean=0 + if (strpos("`dotsgrid'","mean")!=0) { + local dotsngrid_mean=1 + local dotsgrid: subinstr local dotsgrid "mean" "", all + } + if (wordcount("`dotsgrid'")==0) local dotsngrid=0 + else { + confirm integer n `dotsgrid' + local dotsngrid `dotsgrid' + } + local dotsntot=`dotsngrid_mean'+`dotsngrid' + + + * line + tokenize `line' + local line_p "`1'" + local line_s "`2'" + local linengrid `linegrid' + if ("`line'"=="") local linengrid=0 + if ("`line_p'"==""|"`line_p'"=="T") local line_p=. + if ("`line_s'"=="") local line_s `line_p' + + * ci + if ("`cigrid'"=="") local cigrid "mean" + local cingrid_mean=0 + if (strpos("`cigrid'","mean")!=0) { + local cingrid_mean=1 + local cigrid: subinstr local cigrid "mean" "", all + } + if (wordcount("`cigrid'")==0) local cingrid=0 + else { + confirm integer n `cigrid' + local cingrid `cigrid' + } + local cintot=`cingrid_mean'+`cingrid' + + tokenize `ci' + local ci_p "`1'" + local ci_s "`2'" + if ("`ci'"=="") local cintot=0 + if ("`ci_p'"==""|"`ci_p'"=="T") local ci_p=. + if ("`ci_s'"=="") local ci_s `ci_p' + + * cb + tokenize `cb' + local cb_p "`1'" + local cb_s "`2'" + local cbngrid `cbgrid' + if ("`cb'"=="") local cbngrid=0 + if ("`cb_p'"==""|"`cb_p'"=="T") local cb_p=. + if ("`cb_s'"=="") local cb_s `cb_p' + + * Add warnings about degrees for estimation and inference + if ("`selection'"=="J") { + if ("`ci_p'"!=".") { + if (`ci_p'<=`dots_p') { + local ci_p=`dots_p'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the degree for dots()." + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<=`dots_p') { + local cb_p=`dots_p'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the degree for dots()." + } + } + } + if ("`selection'"=="NA") { + if ("`ci'"!=""|"`cb'"!="") { + di as text "Warning: Confidence intervals/bands are valid when nbins() is much larger than IMSE-optimal choice." + } + } + * if selection==P, compare ci_p/cb_p with P_opt later + + * poly fit + local polyregngrid `polyreggrid' + local polyregcingrid `polyregcigrid' + if ("`polyreg'"!="") { + confirm integer n `polyreg' + } + else { + local polyregngrid=0 + } + + * range of x axis and y axis? + tokenize `plotxrange' + local min_xr "`1'" + local max_xr "`2'" + tokenize `plotyrange' + local min_yr "`1'" + local max_yr "`2'" + + + * Simuls + local simsngrid=`simsgrid' + + * Record if nbins specified by users, set default + local nbins_full `nbins' /* local save common nbins */ + if ("`selection'"=="NA") local binselectmethod "User-specified" + else { + if ("`binsmethod'"=="DPI") local binselectmethod "IMSE-optimal plug-in choice" + if ("`binsmethod'"=="ROT") local binselectmethod "IMSE-optimal rule-of-thumb choice" + if ("`selection'"=="J") local binselectmethod "`binselectmethod' (select # of bins)" + if ("`selection'"=="P") local binselectmethod "`binselectmethod' (select degree and smoothness)" + } + + * Mass point check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + local fewmasspoints "T" /* count mass point, but turn off checks */ + } + + * extract dfcheck + if ("`dfcheck'"=="") local dfcheck 20 30 + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * evaluate at w from another dataset? + if (`"`at'"'!=`""'&`"`at'"'!=`"mean"'&`"`at'"'!=`"median"'&`"`at'"'!=`"0"') local atwout "user" + + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + local sel_gtools "on" + * use gstats tab instead of tabstat/collapse + * use gquantiles instead of _pctile + * use gunique instead of binsreg_uniq + * use fasterxtile instead of irecode (within binsreg_irecode) + * shut down local checks & do not sort + } + else local sel_gtools "off" + + + ************************* + **** error checks ******* + ************************* + if (`deriv'<0) { + di as error "Derivative incorrectly specified." + exit + } + if (`dotsngrid'<0|`linengrid'<0|`cingrid'<0|`cbngrid'<0|`simsngrid'<0) { + di as error "Number of evaluation points incorrectly specified." + exit + } + if (`level'>100|`level'<0) { + di as error "Confidence level incorrectly specified." + exit + } + if ("`dots_p'"!=".") { + if (`dots_p'<`dots_s') { + di as error "p cannot be smaller than s." + exit + } + if (`dots_p'<`deriv') { + di as error "p for dots cannot be less than deriv." + exit + } + } + if ("`line_p'"!=".") { + if (`line_p'<`line_s') { + di as error "p cannot be smaller than s." + exit + } + if (`line_p'<`deriv') { + di as error "p for line cannot be less than deriv." + exit + } + } + if ("`ci_p'"!=".") { + if (`ci_p'<`ci_s') { + di as error "p cannot be smaller than s." + exit + } + if (`ci_p'<`deriv') { + di as error "p for CI cannot be less than deriv." + exit + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<`cb_s') { + di as error "p cannot be smaller than s." + exit + } + if (`cb_p'<`deriv') { + di as error "p for CB cannot be less than deriv." + exit + } + } + if ("`polyreg'"!="") { + if (`polyreg'<`deriv') { + di as error "polyreg() cannot be less than deriv()." + exit + } + } + if (`"`savedata'"'!=`""') { + if ("`replace'"=="") { + confirm new file `"`savedata'.dta"' + } + if ("`plot'"!="") { + di as error "Plot cannot be turned off if graph data are requested." + exit + } + } + if (`polyregcingrid'!=0&"`polyreg'"=="") { + di as error "polyreg() is missing." + exit + } + if ("`binsmethod'"!="DPI"&"`binsmethod'"!="ROT") { + di as error "binsmethod incorrectly specified." + exit + } + ******** END error checking *************************** + + * Mark sample + preserve + + * Parse varlist into y_var, x_var and w_var + tokenize `varlist' + + fvrevar `1', tsonly + local y_var "`r(varlist)'" + local y_varname "`1'" + + fvrevar `2', tsonly + local x_var "`r(varlist)'" + local x_varname "`2'" + + macro shift 2 + local w_var "`*'" + * read eval point for w from another file + if ("`atwout'"=="user") { + append using `at' + } + + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" + local nwvar: word count `w_var' + + * Save the last obs in a vector and then drop it + tempname wuser /* a vector used to keep eval for w */ + if ("`atwout'"=="user") { + mata: st_matrix("`wuser'", st_data(`=_N', "`w_var'")) + qui drop in `=_N' + } + + * Get positions of factor vars + local indexlist "" + local i = 1 + foreach v in `w_var' { + if strpos("`v'", ".") == 0 { + local indexlist `indexlist' `i' + } + local ++i + } + + * add a default for at + if (`"`at'"'==""&`nwvar'>0) { + local at "mean" + } + + * Now, mark sample + marksample touse + markout `touse' `by', strok + qui keep if `touse' + local nsize=_N /* # of rows in the original dataset */ + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") { + di as text in gr "Sorting dataset on `x_varname'..." + di as text in gr "Note: This step is omitted if dataset already sorted by `x_varname'." + sort `x_var', stable + } + local sorted "sorted" + } + + if ("`wtype'"=="f") qui sum `x_var' `wt', meanonly + else qui sum `x_var', meanonly + + local xmin=r(min) + local xmax=r(max) + local Ntotal=r(N) /* total sample size, with wt */ + * define the support of plot + if ("`plotxrange'"!="") { + local xsc `plotxrange' + if (wordcount("`xsc'")==1) local xsc `xsc' `xmax' + } + else local xsc `xmin' `xmax' + + * Effective sample size + local eN=`nsize' + * DO NOT check mass points and clusters outside loop unless needed + + * Check number of unique byvals & create local storing byvals + local byvarname `by' + if "`by'"!="" { + capture confirm numeric variable `by' + if _rc { + local bystring "T" + * generate a numeric version + tempvar by + tempname bylabel + qui egen `by'=group(`byvarname'), lname(`bylabel') + } + + local bylabel `:value label `by'' /* catch value labels for numeric by-vars too */ + + tempname byvalmatrix + qui tab `by', nofreq matrow(`byvalmatrix') + + local bynum=r(r) + forvalues i=1/`bynum' { + local byvals `byvals' `=`byvalmatrix'[`i',1]' + } + } + else local bynum=1 + + * Default colors, symbols, linepatterns + if (`"`bycolors'"'==`""') local bycolors /// + navy maroon forest_green dkorange teal cranberry lavender /// + khaki sienna emidblue emerald brown erose gold bluishgray + if (`"`bysymbols'"'==`""') local bysymbols /// + O D T S + X A a | V o d s t x + if (`"`bylpatterns'"'==`""') { + forval i=1/`bynum' { + local bylpatterns `bylpatterns' solid + } + } + + * Temp name in MATA + tempname xvec yvec byvec cluvec binedges + mata: `xvec'=st_data(., "`x_var'"); `yvec'=st_data(.,"`y_var'"); `byvec'=.; `cluvec'=. + + ******************************************************* + *** Mass point counting ******************************* + tempname Ndistlist Nclustlist mat_imse_var_rot mat_imse_bsq_rot mat_imse_var_dpi mat_imse_bsq_dpi + mat `Ndistlist'=J(`bynum',1,.) + mat `Nclustlist'=J(`bynum',1,.) + * Matrices saving imse + mat `mat_imse_var_rot'=J(`bynum',1,.) + mat `mat_imse_bsq_rot'=J(`bynum',1,.) + mat `mat_imse_var_dpi'=J(`bynum',1,.) + mat `mat_imse_bsq_dpi'=J(`bynum',1,.) + + if (`bynum'>1) mata: `byvec'=st_data(.,"`by'") + if ("`clusterON'"=="T") mata: `cluvec'=st_data(.,"`clustervar'") + + ******************************************************** + ********** Bins, based on FULL sample ****************** + ******************************************************** + * knotlist: inner knot seq; knotlistON: local, knot available before loop + + tempname fullkmat /* matrix name for saving knots based on the full sample */ + + * Extract user-specified knot list + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + capture numlist "`binspos'", ascending + if (_rc==0) { + local knotlistON "T" + local knotlist `binspos' + local nbins: word count `knotlist' + local first: word 1 of `knotlist' + local last: word `nbins' of `knotlist' + if (`first'<=`xmin'|`last'>=`xmax') { + di as error "Inner knots specified out of allowed range." + exit + } + else { + local nbins=`nbins'+1 + local nbins_full `nbins' + local pos "user" + + foreach el of local knotlist { + mat `fullkmat'=(nullmat(`fullkmat') \ `el') + } + mat `fullkmat'=(`xmin' \ `fullkmat' \ `xmax') + } + } + else { + di as error "Numeric list incorrectly specified in binspos()." + exit + } + } + + * Discrete x? + if ("`fewmasspoints'"!="") local fullfewobs "T" + + * Bin selection using the whole sample if + if ("`fullfewobs'"==""&"`selection'"!="NA"&(("`by'"=="")|(("`by'"!="")&("`samebinsby'"!="")))) { + local selectfullON "T" + } + + if ("`selectfullON'"=="T") { + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xvec', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + } + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + local eN=min(`eN', `Nclust') /* effective sample size */ + } + + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + * Check effective sample size + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used and by() option ignored." + local by "" + local byvals "" + local fullfewobs "T" + local binspos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `Ntotal'>5000) { + local randcut1k=max(5000/`Ntotal', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + if (("`selectfullON'"=="T"|("`selection'"=="NA"&"`samebinsby'"!=""))&"`fullfewobs'"=="") { + * Save in a knot list + local knotlistON "T" + local nbins_full=`nbins' + if ("`binspos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `fullkmat'=(nullmat(`fullkmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else if ("`binspos'"=="QS") { + if (`nbins'==1) mat `fullkmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `wt', nq(`nbins') `usegtools' + mat `fullkmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + + *** Placement name, for display ************ + if ("`pos'"=="user") { + local binselectmethod "User-specified" + local placement "User-specified" + } + else if ("`binspos'"=="ES") { + local placement "Evenly-spaced" + } + else if ("`binspos'"=="QS") { + local placement "Quantile-spaced" + } + + * NOTE: ALL checkings are put within the loop + + * Set seed + if ("`simsseed'"!="") set seed `simsseed' + + * alpha quantile (for two-sided CI) + local alpha=(100-(100-`level')/2)/100 + + + *************************************************************************** + *************** Preparation before loop************************************ + *************************************************************************** + + ********** Prepare vars for plotting ******************** + * names for mata objects storing graph data + * plotmat: final output (defined outside); + * plotmatby: output for each group + tempname plotmat plotmatby xsub ysub byindex xcatsub + tempname Xm mata_fit mata_se /* temp name for mata obj */ + + * count the number of requested columns, record the positions + local ncolplot=1 /* 1st col reserved for group */ + if ("`plot'"=="") { + if (`dotsntot'!=0) { + local dots_start=`ncolplot'+1 + local dots_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + local line_start=`ncolplot'+1 + local line_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`polyregngrid'!=0) { + local poly_start=`ncolplot'+1 + local poly_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + if (`polyregcingrid'!=0) { + local polyci_start=`ncolplot'+1 + local polyci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + if (`cintot'!=0) { + local ci_start=`ncolplot'+1 + local ci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + local cb_start=`ncolplot'+1 + local cb_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + mata: `plotmat'=J(0,`ncolplot',.) + + * mark the (varying) last row (for plotting) + local bylast=0 + ******************************************************************* + * temp var: bin id + tempvar xcat + qui gen `xcat'=. in 1 + + * matrix names, for returns + tempname Nlist nbinslist cvallist + + * local vars, for plotting + local counter_by=1 + local plotnum=0 /* count the number of series, for legend */ + if ("`by'"=="") local noby="noby" + local byvalnamelist "" /* save group name (value) */ + local plotcmd "" /* plotting cmd */ + + *************************************************************************** + ******************* Now, enter the loop *********************************** + *************************************************************************** + foreach byval in `byvals' `noby' { + local conds "" + if ("`by'"!="") { + local conds "if `by'==`byval'" /* with "if" */ + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + local byvalnamelist `" `byvalnamelist' `"`byvalname'"' "' + } + + if (`bynum'>1) { + mata: `byindex'=`byvec':==`byval' + mata: `xsub'=select(`xvec',`byindex'); `ysub'=select(`yvec', `byindex') + } + else { + mata: `xsub'=`xvec'; `ysub'=`yvec' + } + + * Subsample size + if ("`wtype'"=="f") sum `x_var' `conds' `wt', meanonly + else sum `x_var' `conds', meanonly + + local xmin=r(min) + local xmax=r(max) + local N=r(N) + mat `Nlist'=(nullmat(`Nlist') \ `N') + + * Effective sample size + if (`bynum'==1) local eN=`nsize' + else { + if ("`wtype'"!="f") local eN=r(N) + else { + qui count `conds' + local eN=r(N) + } + } + + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xsub', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' `conds' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + mat `Ndistlist'[`counter_by',1]=`Ndist' + } + + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if (`bynum'==1) { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(select(`cluvec', `byindex'))))) + } + else { + qui gunique `clustervar' `conds' + local Nclust=r(unique) + } + } + local eN=min(`eN', `Nclust') /* effective SUBsample size */ + mat `Nclustlist'[`counter_by',1]=`Nclust' + } + + ********************************************************* + ************** Prepare bins, within loop **************** + ********************************************************* + if ("`pos'"!="user") local pos `binspos' /* initialize pos */ + * Selection? + if ("`selection'"!="NA"&"`knotlistON'"!="T"&"`fullfewobs'"=="") { + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used." + local fewobs "T" + local nbins=`eN' + local pos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `N'>5000) { + local randcut1k=max(5000/`N', 0.01) + di as text in gr "Warning: to speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`pos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + if ("`selection'"=="NA"|"`knotlistON'"=="T") local nbins=`nbins_full' /* add the universal nbins */ + *if ("`knotlistON'"=="T") local nbins=`nbins_full' + if ("`fullfewobs'"!="") { + local fewobs "T" + local nbins=`eN' + } + + ****************************************************** + * Check effective sample size for each case ********** + ****************************************************** + if ("`fewobs'"!="T") { + if ((`nbins'-1)*(`dots_p'-`dots_s'+1)+`dots_p'+1+`dfcheck_n2'>=`eN') { + local fewobs "T" /* even though ROT available, treat it as few obs case */ + local nbins=`eN' + local pos "QS" + di as text in gr "Warning: Too small effective sample size for dots. # of mass points or clusters used." + } + if ("`line_p'"!=".") { + if ((`nbins'-1)*(`line_p'-`line_s'+1)+`line_p'+1+`dfcheck_n2'>=`eN') { + local line_fewobs "T" + di as text in gr "Warning: Too small effective sample size for line." + } + } + if ("`ci_p'"!=".") { + if ((`nbins'-1)*(`ci_p'-`ci_s'+1)+`ci_p'+1+`dfcheck_n2'>=`eN') { + local ci_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CI." + } + } + if ("`cb_p'"!=".") { + if ((`nbins'-1)*(`cb_p'-`cb_s'+1)+`cb_p'+1+`dfcheck_n2'>=`eN') { + local cb_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CB." + } + } + } + + if ("`polyreg'"!="") { + if (`polyreg'+1>=`eN') { + local polyreg_fewobs "T" + di as text in gr "Warning: Too small effective sample size for polynomial fit." + } + } + + * Generate category variable for data and save knot in matrix + tempname kmat + + if ("`knotlistON'"=="T") { + mat `kmat'=`fullkmat' + if ("`fewobs'"=="T"&"`eN'"!="`Ndist'") { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + else { + if ("`fewmasspoints'"==""&("`fewobs'"!="T"|"`eN'"!="`Ndist'")) { + if ("`pos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + } + + * Renew knot list if few mass points + if (("`fewobs'"=="T"&"`eN'"=="`Ndist'")|"`fewmasspoints'"!="") { + qui tab `x_var' `conds', matrow(`kmat') + if ("`fewmasspoints'"!="") { + local nbins=rowsof(`kmat') + local Ndist=`nbins' + local eN=`Ndist' + } + } + else { + mata: st_matrix("`kmat'", (`xmin' \ uniqrows(st_matrix("`kmat'")[|2 \ `=`nbins'+1'|]))) + if (`nbins'!=rowsof(`kmat')-1) { + di as text in gr "Warning: Repeated knots. Some bins dropped." + local nbins=rowsof(`kmat')-1 + } + + binsreg_irecode `x_var' `conds', knotmat(`kmat') bin(`xcat') /// + `usegtools' nbins(`nbins') pos(`pos') knotliston(`knotlistON') + + mata: `xcatsub'=st_data(., "`xcat'") + if (`bynum'>1) { + mata: `xcatsub'=select(`xcatsub', `byindex') + } + } + + ************************************************* + **** Check for empty bins *********************** + ************************************************* + mata: `binedges'=. /* initialize */ + if ("`fewobs'"!="T"&"`localcheck'"=="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(`xcatsub')))) + if (`nbins'==`Ncat') { + mata: `binedges'=binsreg_uniq(`xsub', `xcatsub', `nbins', "uniqmin") + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins. Specify a smaller number in nbins()." + } + + if ("`dots_p'"!=".") { + if (`uniqmin'<`dots_p'+1) { + local dots_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for dots." + } + } + if ("`line_p'"!=".") { + if (`uniqmin'<`line_p'+1) { + local line_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for line." + } + } + if ("`ci_p'"!=".") { + if (`uniqmin'<`ci_p'+1) { + local ci_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CI." + } + } + if ("`cb_p'"!=".") { + if (`uniqmin'<`cb_p'+1) { + local cb_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CB." + } + } + } + + * Now, save nbins in a list !!! + mat `nbinslist'=(nullmat(`nbinslist') \ `nbins') + + ********************************************************** + **** Count the number of rows needed (within loop!) ****** + ********************************************************** + local byfirst=`bylast'+1 + local byrange=0 + if ("`fewobs'"!="T") { + local dots_nr=`dotsngrid_mean'*`nbins' + if (`dotsngrid'!=0) local dots_nr=`dots_nr'+`dotsngrid'*`nbins'+`nbins'-1 + local ci_nr=`cingrid_mean'*`nbins' + if (`cingrid'!=0) local ci_nr=`ci_nr'+`cingrid'*`nbins'+`nbins'-1 + if (`linengrid'!=0) local line_nr=`linengrid'*`nbins'+`nbins'-1 + if (`cbngrid'!=0) local cb_nr=`cbngrid'*`nbins'+`nbins'-1 + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + local byrange=max(`dots_nr'+0,`line_nr'+0,`ci_nr'+0,`cb_nr'+0, `poly_nr'+0, `polyci_nr'+0) + } + else { + if ("`eN'"=="`Ndist'") { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*(`nbins'-1)+`nbins'-1-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*(`nbins'-1)+`nbins'-1-1 + } + } + else { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + } + local byrange=max(`nbins', `poly_nr'+0, `polyci_nr'+0) + } + local bylast=`bylast'+`byrange' + mata: `plotmatby'=J(`byrange',`ncolplot',.) + if ("`byval'"!="noby") { + mata: `plotmatby'[.,1]=J(`byrange',1,`byval') + } + + ************************************************ + **** START: prepare data for plotting*********** + ************************************************ + local plotcmdby "" + + ******************************** + * adjust w vars + tempname wval + if (`nwvar'>0) { + if (`"`at'"'==`"mean"'|`"`at'"'==`"median"') { + matrix `wval'=J(1, `nwvar', 0) + tempname wvaltemp mataobj + mata: `mataobj'=. + foreach wpos in `indexlist' { + local wname: word `wpos' of `w_var' + if ("`usegtools'"=="") { + if ("`wtype'"!="") qui tabstat `wname' `conds' [aw`exp'], stat(`at') save + else qui tabstat `wname' `conds', stat(`at') save + mat `wvaltemp'=r(StatTotal) + } + else { + qui gstats tabstat `wname' `conds' `wt', stat(`at') matasave("`mataobj'") + mata: st_matrix("`wvaltemp'", `mataobj'.getOutputCol(1)) + } + mat `wval'[1,`wpos']=`wvaltemp'[1,1] + } + mata: mata drop `mataobj' + } + else if (`"`at'"'==`"0"') { + matrix `wval'=J(1,`nwvar',0) + } + else if ("`atwout'"=="user") { + matrix `wval'=`wuser' + } + } + + + ************************************************* + ********** dots and ci for few obs. case ******** + ************************************************* + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"=="T") { + di as text in gr "Warning: dots(0 0) is used." + if (`deriv'>0) di as text in gr "Warning: deriv(0 0) is used." + + local dots_first=`byfirst' + local dots_last=`byfirst'-1+`nbins' + + mata: `plotmatby'[|1,`dots_start'+2 \ `nbins',`dots_start'+2|]=range(1,`nbins',1) + + if ("`eN'"=="`Ndist'") { + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`kmat'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,1) + + * Renew knot commalist, each value forms a group + local xknot "" + forvalues i=1/`nbins' { + local xknot `xknot' `kmat'[`i',1] + } + local xknotcommalist : subinstr local xknot " " ",", all + qui replace `xcat'=1+irecode(`x_var',`xknotcommalist') `conds' + } + else { + tempname grid + mat `grid'=(`kmat'[1..`nbins',1]+`kmat'[2..`nbins'+1,1])/2 + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`grid'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,0) + } + + local nseries=`nbins' + if ("`boot'"=="on") { + capture bsqreg `y_var' ibn.`xcat' `w_var' `conds', quantile(`quantile') reps(`reps') + } + else { + capture qreg `y_var' ibn.`xcat' `w_var' `conds' `wt', quantile(`quantile') `vce' `qregopt' + } + tempname fewobs_b fewobs_V + if (_rc==0) { + mat `fewobs_b'=e(b) + mat `fewobs_V'=e(V) + mata: binsreg_checkdrop("`fewobs_b'", "`fewobs_V'", `nseries', "T") + if (`nwvar'>0) { + mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+(`fewobs_b'[1,`=`nseries'+1'..`=`nseries'+`nwvar'']*`wval''+`fewobs_b'[1,colsof(`fewobs_b')])*J(1,`nseries',1) + } + else { + mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+J(1,`nseries',1)*`fewobs_b'[1,colsof(`fewobs_b')] + } + } + else { + error _rc + exit _rc + } + + mata: `plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]=st_matrix("`fewobs_b'")' + + local plotnum=`plotnum'+1 + local legendnum `legendnum' `plotnum' + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond `plotcond' if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + + if (`cintot'!=0) { + di as text in gr "Warning: ci(0 0) is used." + + tempname tempobj + if (`nwvar'>0) { + mata: `tempobj'=(I(`nseries'), J(`nseries',1,1)#st_matrix("`wval'"), J(`nseries',1,1)) + } + else { + mata: `tempobj'=(I(`nseries'), J(`nseries',1,1)) + } + mata: `mata_se'=sqrt(rowsum((`tempobj'*st_matrix("`fewobs_V'")):*`tempobj')) + mata: mata drop `tempobj' + + mata: `plotmatby'[|1,`ci_start'+1 \ `nbins',`ci_start'+2|]=`plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+2|]; /// + `plotmatby'[|1,`ci_start'+3 \ `nbins',`ci_start'+3|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`ci_start'+4 \ `nbins',`ci_start'+4|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]+`mata_se'*invnormal(`alpha') + mata: mata drop `mata_se' + + local plotnum=`plotnum'+1 + local lty: word `counter_by' of `bylpatterns' + local plotcmdby `plotcmdby' (rcap CI_l CI_r dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + ********************************************* + **** The following handles the usual case *** + ********************************************* + * Turn on or off? + local dotsON "" + local lineON "" + local polyON "" + local ciON "" + local cbON "" + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"!="T"&"`dots_fewobs'"!="T") { + local dotsON "T" + } + if (`linengrid'!=0&"`plot'"==""&"`line_fewobs'"!="T"&"`fewobs'"!="T") { + local lineON "T" + } + if (`polyregngrid'!=0&"`plot'"==""&"`polyreg_fewobs'"!="T") { + local polyON "T" + } + if (`cintot'!=0&"`plot'"==""&"`ci_fewobs'"!="T"&"`fewobs'"!="T") { + local ciON "T" + } + if (`cbngrid'!=0&"`plot'"==""&"`cb_fewobs'"!="T"&"`fewobs'"!="T") { + local cbON "T" + } + + + ************************ + ****** Dots ************ + ************************ + tempname xmean + + if ("`dotsON'"=="T") { + local dots_first=`byfirst' + local dots_last=`byfirst'+`dots_nr'-1 + + * fitting + tempname dots_b dots_V + if (("`dots_p'"=="`ci_p'"&"`dots_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`dots_p'"=="`cb_p'"&"`dots_s'"=="`cb_s'"&"`cbON'"=="T")) { + binsqreg_fit `y_var' `x_var' `w_var' `conds' `wt', quantile(`quantile') deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' boot(`boot') reps(`reps') `usegtools' qregopt(`qregopt') + } + else { + binsqreg_fit `y_var' `x_var' `w_var' `conds' `wt', quantile(`quantile') deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' boot(`boot') reps(`reps') `usegtools' qregopt(`qregopt') + } + + mat `dots_b'=e(bmat) + mat `dots_V'=e(Vmat) + if (`dotsngrid_mean'!=0) mat `xmean'=e(xmat) + + + * prediction + if (`dotsngrid_mean'==0) { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binsqreg_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`=e(spmethod)'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binsqreg_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`=e(spmethod)'", "`asyvar'", "`xmean'") + } + + * dots + local plotnum=`plotnum'+1 + if ("`cbON'"=="T") local legendnum `legendnum' `=`plotnum'+1' + else { + local legendnum `legendnum' `plotnum' + } + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + } + + ********************************************** + ********************* Line ******************* + ********************************************** + if ("`lineON'"=="T") { + local line_first=`byfirst' + local line_last=`byfirst'-1+`line_nr' + + * fitting + tempname line_b line_V + capture confirm matrix `dots_b' `dots_V' + if ("`line_p'"=="`dots_p'"& "`line_s'"=="`dots_s'" & _rc==0) { + matrix `line_b'=`dots_b' + matrix `line_V'=`dots_V' + } + else { + if (("`line_p'"=="`ci_p'"&"`line_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`line_p'"=="`cb_p'"&"`line_s'"=="`cb_s'"&"`cbON'"=="T")) { + binsqreg_fit `y_var' `x_var' `w_var' `conds' `wt', quantile(`quantile') deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' boot(`boot') reps(`reps') `usegtools' qregopt(`qregopt') + } + else { + binsqreg_fit `y_var' `x_var' `w_var' `conds' `wt', quantile(`quantile') deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' boot(`boot') reps(`reps') `usegtools' qregopt(`qregopt') + } + mat `line_b'=e(bmat) + mat `line_V'=e(Vmat) + } + + * prediction + mata: `plotmatby'[|1,`line_start' \ `line_nr',`line_end'|] = /// + binsqreg_plotmat("`line_b'", "`line_V'", ., "`kmat'", /// + `nbins', `line_p', `line_s', `deriv', /// + "line", `linengrid', "`wval'", `nwvar', "`=e(spmethod)'", "`asyvar'") + + * line + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' line_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &line_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' line_fit>=`min_yr' + else local plotcond `plotcond' &line_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(line_fit<=`max_yr'|line_fit==.) + } + } + + local plotcmdby `plotcmdby' (line line_fit line_x /// + `plotcond' in `line_first'/`line_last', sort cmissing(n) /// + lcolor(`col') lpattern(`lty') `lineplotopt') + + } + + *********************************** + ******* Polynomial fit ************ + *********************************** + if ("`polyON'"=="T") { + if (`nwvar'>0) { + di as text "Note: When additional covariates w are included, the polynomial fit may not always be close to the binscatter fit." + } + + local poly_first=`byfirst' + local poly_last=`byfirst'-1+`poly_nr' + + mata:`plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'+2|]=binsreg_grids("`kmat'",`polyregngrid') + + local poly_series "" + forval i=1/`polyreg' { + tempvar x_var_`i' + qui gen `x_var_`i''=`x_var'^`i' `conds' + local poly_series `poly_series' `x_var_`i'' + } + + if ("`boot'"=="on") { + capture bsqreg `y_var' `poly_series' `w_var' `conds', quantile(`quantile') reps(`reps') + } + else { + capture qreg `y_var' `poly_series' `w_var' `conds' `wt', quantile(`quantile') `vce' `qregopt' + } + * store results + tempname poly_b poly_V poly_adjw + if (_rc==0) { + matrix `poly_b'=e(b) + + if (`nwvar'>0&`deriv'==0) { + matrix `poly_adjw'=`wval'*`poly_b'[1, `=`polyreg'+1'..`=`polyreg'+`nwvar'']' + } + else { + matrix `poly_adjw'=0 + } + + if (`deriv'==0) { + if (`polyreg'>0) matrix `poly_b'=(`poly_b'[1, `=`polyreg'+`nwvar'+1'], `poly_b'[1,1..`polyreg']) + else matrix `poly_b'=`poly_b'[1, `=`polyreg'+`nwvar'+1'] + } + else matrix `poly_b'=`poly_b'[1, `deriv'..`polyreg'] + + matrix `poly_V'=e(V) + } + else { + error _rc + exit _rc + } + + * Data for derivative + mata: `Xm'=J(`poly_nr',0,.) + forval i=`deriv'/`polyreg' { + mata: `Xm'=(`Xm', /// + `plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=(`Xm'*st_matrix("`poly_b'")'):+st_matrix("`poly_adjw'") + + mata: mata drop `Xm' + + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' poly_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &poly_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' poly_fit>=`min_yr' + else local plotcond `plotcond' &poly_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &poly_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (line poly_fit poly_x /// + `plotcond' in `poly_first'/`poly_last', /// + sort lcolor(`col') lpattern(`lty') `polyregplotopt') + + * add CI for global poly? + if (`polyregcingrid'!=0) { + local polyci_first=`byfirst' + local polyci_last=`byfirst'-1+`polyci_nr' + + mata: `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'+2|]=binsreg_grids("`kmat'", `polyregcingrid') + + mata: `Xm'=J(`polyci_nr',0,.) + forval i=`deriv'/`polyreg' { + mata:`Xm'=(`Xm', /// + `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata:`mata_fit'=(`Xm'*st_matrix("`poly_b'")'):+st_matrix("`poly_adjw'") + if (`deriv'==0) { + if (`polyreg'>0) { + if (`nwvar'>0) mata: `Xm'=(`Xm'[|1,2 \ ., cols(`Xm')|], J(`polyci_nr',1,1)#st_matrix("`wval'"),`Xm'[.,1]) + else mata: `Xm'=(`Xm'[|1,2 \ ., cols(`Xm')|], `Xm'[.,1]) + } + else { + if (`nwvar'>0) mata: `Xm'=(J(`polyci_nr',1,1)#st_matrix("`wval'"),`Xm'[.,1]) + else mata: `Xm'=`Xm'[.,1] + + } + } + else { + matrix `poly_V'=`poly_V'[`deriv'..`polyreg',`deriv'..`polyreg'] + } + + mata: `mata_se'=sqrt(rowsum((`Xm':*(st_matrix("`poly_V'")*`Xm'')'))) + + mata: `plotmatby'[|1,`polyci_start'+3 \ `polyci_nr',`polyci_start'+3|]=`mata_fit'-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`polyci_start'+4 \ `polyci_nr',`polyci_start'+4|]=`mata_fit'+`mata_se'*invnormal(`alpha'); /// + `plotmatby'[selectindex(`plotmatby'[,`=`polyci_start'+1']:==1),(`=`polyci_start'+3',`=`polyci_start'+4')]=J(`=`nbins'-1',2,.) + + mata: mata drop `Xm' `mata_fit' `mata_se' + + * poly ci + local plotnum=`plotnum'+1 + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' polyCI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &polyCI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' polyCI_l>=`min_yr' + else local plotcond `plotcond' &polyCI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &polyCI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap polyCI_l polyCI_r polyCI_x /// + `plotcond' in `polyci_first'/`polyci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + + ********************************** + ******* Confidence Interval ****** + ********************************** + if ("`ciON'"=="T") { + local ci_first=`byfirst' + local ci_last=`byfirst'-1+`ci_nr' + + * fitting + tempname ci_b ci_V + capture confirm matrix `line_b' `line_V' + if ("`ci_p'"=="`line_p'"& "`ci_s'"=="`line_s'" & _rc==0) { + matrix `ci_b'=`line_b' + matrix `ci_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`ci_p'"=="`dots_p'"& "`ci_s'"=="`dots_s'" & _rc==0) { + matrix `ci_b'=`dots_b' + matrix `ci_V'=`dots_V' + } + } + + capture confirm matrix `ci_b' `ci_V' `xmean' + if (_rc!=0) { + binsqreg_fit `y_var' `x_var' `w_var' `conds' `wt', quantile(`quantile') deriv(`deriv') /// + p(`ci_p') s(`ci_s') type(ci) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`cingrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' boot(`boot') reps(`reps') `usegtools' qregopt(`qregopt') + + mat `ci_b'=e(bmat) + mat `ci_V'=e(Vmat) + mat `xmean'=e(xmat) + } + + * prediction + if (`cingrid_mean'==0) { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binsqreg_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', "`=e(spmethod)'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binsqreg_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', "`=e(spmethod)'", "`asyvar'", "`xmean'") + } + + * ci + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CI_l>=`min_yr' + else local plotcond `plotcond' &CI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &CI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap CI_l CI_r CI_x /// + `plotcond' in `ci_first'/`ci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + + } + + ******************************* + ***** Confidence Band ********* + ******************************* + tempname cval + scalar `cval'=. + if ("`cbON'"=="T") { + if (`nsims'<2000|`simsgrid'<50) { + di as text "Note: A larger number random draws/evaluation points is recommended to obtain the final results." + } + * Prepare grid for plotting + local cb_first=`byfirst' + local cb_last=`byfirst'-1+`cb_nr' + + * fitting + tempname cb_b cb_V + capture confirm matrix `ci_b' `ci_V' + if ("`cb_p'"=="`ci_p'"& "`cb_s'"=="`ci_s'" & _rc==0) { + matrix `cb_b'=`ci_b' + matrix `cb_V'=`ci_V' + } + else { + capture confirm matrix `line_b' `line_V' + if ("`cb_p'"=="`line_p'"& "`cb_s'"=="`line_s'" & _rc==0) { + matrix `cb_b'=`line_b' + matrix `cb_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`cb_p'"=="`dots_p'"& "`cb_s'"=="`dots_s'" & _rc==0) { + matrix `cb_b'=`dots_b' + matrix `cb_V'=`dots_V' + } + else { + binsqreg_fit `y_var' `x_var' `w_var' `conds' `wt', quantile(`quantile') deriv(`deriv') /// + p(`cb_p') s(`cb_s') type(cb) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' boot(`boot') reps(`reps') `usegtools' qregopt(`qregopt') + mat `cb_b'=e(bmat) + mat `cb_V'=e(Vmat) + } + } + } + + * Compute critical values + * Prepare grid for simulation + local uni_last=`simsngrid'*`nbins'+`nbins'-1 + local nseries=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + + tempname cb_basis coeff vcov vcovtemp + mata: `cb_basis'=binsreg_grids("`kmat'", `simsngrid'); /// + `cb_basis'=binsreg_spdes(`cb_basis'[,1], "`kmat'", `cb_basis'[,3], `cb_p', `deriv', `cb_s'); /// + `cb_basis'=(`cb_basis', J(rows(`cb_basis'),1,1)); /// + `coeff'=st_matrix("`cb_b'"); `coeff'=(`coeff'[|1 \ `nseries'|], `coeff'[cols(`coeff')])'; /// + `vcov'=st_matrix("`cb_V'"); /// + `vcov'= (`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + `Xm'=binsreg_pred(`cb_basis', `coeff', `vcov', "all"); /// + st_matrix("`vcovtemp'", `vcov'); /// + binsreg_pval(`cb_basis', `Xm'[,2], "`vcovtemp'", ".", `nsims', `=`nseries'+1', "two", `=`level'/100', ".", "`cval'", "inf") + mata: mata drop `cb_basis' `Xm' `coeff' `vcov' + + * prediction + mata: `plotmatby'[|1,`cb_start' \ `cb_nr',`cb_end'|] = /// + binsqreg_plotmat("`cb_b'", "`cb_V'", /// + `=`cval'', "`kmat'", /// + `nbins', `cb_p', `cb_s', `deriv', /// + "cb", `cbngrid', "`wval'", `nwvar', "`=e(spmethod)'", "`asyvar'") + + * cb + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CB_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CB_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CB_l>=`min_yr' + else local plotcond `plotcond' &CB_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(CB_r<=`max_yr'|CB_r==.) + } + } + + local plotcmdby (rarea CB_l CB_r CB_x /// + `plotcond' in `cb_first'/`cb_last', sort cmissing(n) /// + lcolor(none%0) fcolor(`col'%50) fintensity(50) `cbplotopt') `plotcmdby' + } + mat `cvallist'=(nullmat(`cvallist') \ `cval') + + local plotcmd `plotcmd' `plotcmdby' + mata: `plotmat'=(`plotmat' \ `plotmatby') + + ********************************* + **** display ******************** + ********************************* + di "" + * Plotting + if ("`plot'"=="") { + if (`counter_by'==1) { + di in smcl in gr "Binscatter plot, quantile" + di in smcl in gr "Bin selection method: `binselectmethod'" + di in smcl in gr "Placement: `placement'" + di in smcl in gr "Derivative: `deriv'" + if (`"`savedata'"'!=`""') { + di in smcl in gr `"Output file: `savedata'.dta"' + } + } + di "" + if ("`by'"!="") { + di in smcl in gr "Group: `byvarname' = " in yellow "`byvalname'" + } + di in smcl in gr "{hline 30}{c TT}{hline 15}" + di in smcl in gr "{lalign 1:# of observations}" _col(30) " {c |} " _col(32) as result %7.0f `N' + di in smcl in gr "{lalign 1:# of distinct values}" _col(30) " {c |} " _col(32) as result %7.0f `Ndist' + di in smcl in gr "{lalign 1:# of clusters}" _col(30) " {c |} " _col(32) as result %7.0f `Nclust' + di in smcl in gr "{hline 30}{c +}{hline 15}" + di in smcl in gr "{lalign 1:Bin/Degree selection:}" _col(30) " {c |} " + if ("`selection'"=="P") { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `binsp' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `binss' + } + else { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `dots_p' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `dots_s' + } + di in smcl in gr "{ralign 29:# of bins}" _col(30) " {c |} " _col(32) as result %7.0f `nbins' + if ("`binselectmethod'"!="User-specified") { + if ("`binsmethod'"=="ROT") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_rot'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_rot'[`counter_by',1]' + } + else if ("`binsmethod'"=="DPI") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_dpi'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_dpi'[`counter_by',1]' + } + } + di in smcl in gr "{hline 30}{c BT}{hline 15}" + di "" + di in smcl in gr "{hline 9}{c TT}{hline 30}" + di in smcl _col(10) "{c |}" in gr _col(17) "p" _col(25) "s" _col(33) "df" + di in smcl in gr "{hline 9}{c +}{hline 30}" + if (`dotsntot'!=0) { + local dots_df=(`dots_p'-`dots_s'+1)*(`nbins'-1)+`dots_p'+1 + di in smcl in gr "{lalign 1: dots}" _col(10) "{c |}" in gr _col(17) "`dots_p'" _col(25) "`dots_s'" _col(33) "`dots_df'" + } + if ("`lineON'"=="T") { + local line_df=(`line_p'-`line_s'+1)*(`nbins'-1)+`line_p'+1 + di in smcl in gr "{lalign 1: line}" _col(10) "{c |}" in gr _col(17) "`line_p'" _col(25) "`line_s'" _col(33) "`line_df'" + } + if (`cintot'!=0) { + local ci_df=(`ci_p'-`ci_s'+1)*(`nbins'-1)+`ci_p'+1 + di in smcl in gr "{lalign 1: CI}" _col(10) "{c |}" in gr _col(17) "`ci_p'" _col(25) "`ci_s'" _col(33) "`ci_df'" + } + if ("`cbON'"=="T") { + local cb_df=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + di in smcl in gr "{lalign 1: CB}" _col(10) "{c |}" in gr _col(17) "`cb_p'" _col(25) "`cb_s'" _col(33) "`cb_df'" + } + if ("`polyON'"=="T") { + local poly_df=`polyreg'+1 + di in smcl in gr "{lalign 1: polyreg}" _col(10) "{c |}" in gr _col(17) "`polyreg'" _col(25) "NA" _col(33) "`poly_df'" + } + di in smcl in gr "{hline 9}{c BT}{hline 30}" + } + + + mata: mata drop `plotmatby' + local ++counter_by + } + mata: mata drop `xsub' `ysub' `binedges' + if (`bynum'>1) mata: mata drop `byindex' + capture mata: mata drop `xcatsub' + ****************** END loop **************************************** + ******************************************************************** + + + ******************************************* + *************** Plotting ****************** + ******************************************* + clear + if ("`plotcmd'"!="") { + * put data back to STATA + mata: st_local("nr", strofreal(rows(`plotmat'))) + qui set obs `nr' + + * MAKE SURE the orderings match + qui gen group=. in 1 + if (`dotsntot'!=0) { + qui gen dots_x=. in 1 + qui gen dots_isknot=. in 1 + qui gen dots_binid=. in 1 + qui gen dots_fit=. in 1 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + qui gen line_x=. in 1 + qui gen line_isknot=. in 1 + qui gen line_binid=. in 1 + qui gen line_fit=. in 1 + } + if (`polyregngrid'!=0) { + qui gen poly_x=. in 1 + qui gen poly_isknot=. in 1 + qui gen poly_binid=. in 1 + qui gen poly_fit=. in 1 + if (`polyregcingrid'!=0) { + qui gen polyCI_x=. in 1 + qui gen polyCI_isknot=. in 1 + qui gen polyCI_binid=. in 1 + qui gen polyCI_l=. in 1 + qui gen polyCI_r=. in 1 + } + } + if (`cintot'!=0) { + qui gen CI_x=. in 1 + qui gen CI_isknot=. in 1 + qui gen CI_binid=. in 1 + qui gen CI_l=. in 1 + qui gen CI_r=. in 1 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + qui gen CB_x=. in 1 + qui gen CB_isknot=. in 1 + qui gen CB_binid=. in 1 + qui gen CB_l=. in 1 + qui gen CB_r=. in 1 + } + + mata: st_store(.,.,`plotmat') + + * Legend + local plot_legend legend(order( + if ("`by'"!=""&`dotsntot'!=0) { + forval i=1/`bynum' { + local byvalname: word `i' of `byvalnamelist' + local plot_legend `plot_legend' `: word `i' of `legendnum'' "`byvarname'=`byvalname'" + } + local plot_legend `plot_legend' )) + } + else { + local plot_legend legend(off) + } + + * Plot it + local graphcmd twoway `plotcmd', xtitle(`x_varname') ytitle(`y_varname') xscale(range(`xsc')) `plot_legend' `options' + `graphcmd' + } + mata: mata drop `plotmat' `xvec' `yvec' `byvec' `cluvec' + + + * Save graph data ? + * In the normal case + if (`"`savedata'"'!=`""'&`"`plotcmd'"'!=`""') { + * Add labels + if ("`by'"!="") { + if ("`bystring'"=="T") { + label val group `bylabel' + decode group, gen(`byvarname') + } + else { + qui gen `byvarname'=group + if ("`bylabel'"!="") label val `byvarname' `bylabel' + } + label var `byvarname' "Group" + qui drop group + order `byvarname' + } + else qui drop group + + capture confirm variable dots_x dots_binid dots_isknot dots_fit + if (_rc==0) { + label var dots_x "Dots: grid" + label var dots_binid "Dots: indicator of bins" + label var dots_isknot "Dots: indicator of inner knot" + label var dots_fit "Dots: fitted values" + } + capture confirm variable line_x line_binid line_isknot line_fit + if (_rc==0) { + label var line_x "Line: grid" + label var line_binid "Line: indicator of bins" + label var line_isknot "Line: indicator of inner knot" + label var line_fit "Line: fitted values" + } + capture confirm variable poly_x poly_binid poly_isknot poly_fit + if (_rc==0) { + label var poly_x "Poly: grid" + label var poly_binid "Poly: indicator of bins" + label var poly_isknot "Poly: indicator of inner knot" + label var poly_fit "Poly: fitted values" + } + capture confirm variable polyCI_x polyCI_binid polyCI_isknot polyCI_l polyCI_r + if (_rc==0) { + label var polyCI_x "Poly confidence interval: grid" + label var polyCI_binid "Poly confidence interval: indicator of bins" + label var polyCI_isknot "Poly confidence interval: indicator of inner knot" + label var polyCI_l "Poly confidence interval: left boundary" + label var polyCI_r "Poly confidence interval: right boundary" + } + capture confirm variable CI_x CI_binid CI_isknot CI_l CI_r + if (_rc==0) { + label var CI_x "Confidence interval: grid" + label var CI_binid "Confidence interval: indicator of bins" + label var CI_isknot "Confidence interval: indicator of inner knot" + label var CI_l "Confidence interval: left boundary" + label var CI_r "Confidence interval: right boundary" + } + capture confirm variable CB_x CB_binid CB_isknot CB_l CB_r + if (_rc==0) { + label var CB_x "Confidence band: grid" + label var CB_binid "Confidence band: indicator of bins" + label var CB_isknot "Confidence band: indicator of inner knot" + label var CB_l "Confidence band: left boundary" + label var CB_r "Confidence band: right boundary" + } + qui save `"`savedata'"', `replace' + } + *************************************************************************** + + ********************************* + ********** Return *************** + ********************************* + ereturn clear + * # of observations + ereturn scalar N=`Ntotal' + * Options + ereturn scalar level=`level' + ereturn scalar dots_p=`dots_p' + ereturn scalar dots_s=`dots_s' + ereturn scalar line_p=`line_p' + ereturn scalar line_s=`line_s' + ereturn scalar ci_p=`ci_p' + ereturn scalar ci_s=`ci_s' + ereturn scalar cb_p=`cb_p' + ereturn scalar cb_s=`cb_s' + * by group: + *ereturn matrix knot=`kmat' + ereturn matrix cval_by=`cvallist' + ereturn matrix nbins_by=`nbinslist' + ereturn matrix Nclust_by=`Nclustlist' + ereturn matrix Ndist_by=`Ndistlist' + ereturn matrix N_by=`Nlist' + + ereturn matrix imse_var_rot=`mat_imse_var_rot' + ereturn matrix imse_bsq_rot=`mat_imse_bsq_rot' + ereturn matrix imse_var_dpi=`mat_imse_var_dpi' + ereturn matrix imse_bsq_dpi=`mat_imse_bsq_dpi' +end + +* Helper commands +* Estimation +program define binsqreg_fit, eclass + version 13 + syntax varlist(min=2 numeric ts fv) [if] [in] [fw aw pw] [, quantile(numlist min=1 max=1 >=0 <=1) /// + deriv(integer 0) p(integer 0) s(integer 0) type(string) vce(passthru) /// + xcat(varname numeric) kmat(name) dotsmean(integer 0) /// /* xmean: report x-mean? */ + xname(name) yname(name) catname(name) edge(name) /// + usereg sorted boot(string) reps(string) usegtools qregopt(string asis)] /* usereg: force the command to use reg; sored: sorted data? */ + + preserve + marksample touse + qui keep if `touse' + + if ("`weight'"!="") local wt [`weight'`exp'] + + tokenize `varlist' + local y_var `1' + local x_var `2' + macro shift 2 + local w_var "`*'" + + if ("`w_var'"==""&`p'==0&("`type'"=="dots"|"`type'"=="line")&"`usereg'"=="") { + local ymeanON "T" + } + else { + local ymeanON "F" + } + local nbins=rowsof(`kmat')-1 + + tempname matxmean temp_b temp_V + mat `matxmean'=. + mat `temp_b'=. + mat `temp_V'=. + if (`dotsmean'!=0|"`ymeanON'"=="T") { + if ("`sorted'"==""|"`weight'"!=""|"`usegtools'"!="") { + local stat="p"+"`=round(`quantile'*100)'" + + if ("`usegtools'"=="") { + tempfile tmpfile + qui save `tmpfile', replace + + if (`dotsmean'!=0&"`ymeanON'"=="T") { + collapse (`stat') `y_var' (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + mkmat `y_var', matrix(`temp_b') + mat `temp_b'=`temp_b'' /* row vector */ + } + else if (`dotsmean'!=0&"`ymeanON'"!="T") { + collapse (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + } + else { + collapse (`stat') `y_var' `wt', by(`xcat') fast + mkmat `y_var', matrix(`temp_b') + mat `temp_b'=`temp_b'' + } + use `tmpfile', clear + } + else { + tempname obj + if (`dotsmean'!=0&"`ymeanON'"=="T") { + tempfile tmpfile + qui save `tmpfile', replace + + gcollapse (`stat') `y_var' (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + mkmat `y_var', matrix(`temp_b') + mat `temp_b'=`temp_b'' /* row vector */ + + use `tmpfile', clear + } + else if (`dotsmean'!=0&"`ymeanON'"!="T") { + qui gstats tabstat `x_var' `wt', stats(mean) by(`xcat') matasave("`obj'") + mata: st_matrix("`matxmean'", (`obj'.getnum(.,1), `obj'.getOutputVar("`x_var'"))) + mata: mata drop `obj' + } + else { + qui gstats tabstat `y_var' `wt', stats(`stat') by(`xcat') matasave("`obj'") + mata: st_matrix("`temp_b'", `obj'.getOutputVar("`y_var'")') + mata: mata drop `obj' + } + } + } + else { + tempname output + if (`dotsmean'!=0&"`ymeanON'"=="T") { + mata: `output'=binsreg_stat(`yname', `catname', `nbins', `edge', "quantile", `quantile'); /// + st_matrix("`temp_b'", `output'[.,2]'); /// + `output'=binsreg_stat(`xname', `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`matxmean'", `output'[.,1..2]) + } + else if (`dotsmean'!=0&"`ymeanON'"!="T") { + mata: `output'=binsreg_stat(`xname', `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`matxmean'", `output') + } + else { + mata: `output'=binsreg_stat(`yname', `catname', `nbins', `edge', "quantile", `quantile'); /// + st_matrix("`temp_b'", `output'[.,2]') + } + mata: mata drop `output' + } + } + + * Regression? + if ("`ymeanON'"!="T") { + if (`p'==0) { + if ("`boot'"=="on") { + capture bsqreg `y_var' ibn.`xcat' `w_var', quantile(`quantile') reps(`reps') + } + else { + capture qreg `y_var' ibn.`xcat' `w_var' `wt', quantile(`quantile') `vce' `qregopt' + } + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nbins', "T") + } + else { + error _rc + exit _rc + } + } + else { + local nseries=(`p'-`s'+1)*(`nbins'-1)+`p'+1 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`xname', "`series'", "`kmat'", `catname', `p', 0, `s') + + if ("`boot'"=="on") { + capture bsqreg `y_var' `series' `w_var', quantile(`quantile') reps(`reps') + } + else { + capture qreg `y_var' `series' `w_var' `wt', quantile(`quantile') `vce' `qregopt' + } + * store results + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nseries', "T") + } + else { + error _rc + exit _rc + } + } + } + + ereturn clear + ereturn matrix bmat=`temp_b' + ereturn matrix Vmat=`temp_V' + ereturn matrix xmat=`matxmean' /* xcat, xbar */ + ereturn local spmethod "`ymeanON'" +end + +mata: + + // Prediction for plotting + real matrix binsqreg_plotmat(string scalar eb, string scalar eV, real scalar cval, /// + string scalar knotname, real scalar J, /// + real scalar p, real scalar s, real scalar deriv, /// + string scalar type, real scalar ngrid, string scalar muwmat, /// + real scalar nw, string scalar spmethod, string scalar avar, | string scalar muxmat) + { + real matrix bmat, vmat, knot, xmean, wvec, eval, out, fit, se, Xm, result + real scalar nseries + + nseries=(p-s+1)*(J-1)+p+1 + bmat=st_matrix(eb)' + + if (type=="ci"|type=="cb") vmat=st_matrix(eV) + + // Prepare evaluation points + eval=J(0,3,.) + if (args()==15) { + xmean=st_matrix(muxmat) + eval=(eval \ (xmean[,2], J(J, 1, 0), xmean[,1])) + } + if (ngrid!=0) eval=(eval \ binsreg_grids(knotname, ngrid)) + + // import w variables and the CONSTANT!!! + if (nw>0) wvec=(st_matrix(muwmat), 1) + else wvec=1 + + fit=J(0,1,.) + se=J(0,1,.) + if (spmethod=="T") { + if (args()==15) fit=(fit \ bmat) + if (ngrid!=0) { + fit=(fit \ (bmat#(J(ngrid,1,1)\.))) + fit=fit[|1 \ (rows(fit)-1)|] + } + out=(eval, fit) + } + else { + Xm=binsreg_spdes(eval[,1], knotname, eval[,3], p, deriv, s) + + if (type=="dots"|type=="line") { + if (deriv==0) { + Xm=(Xm, J(rows(Xm), 1, 1)#wvec) + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + } + else { + fit=binsreg_pred(Xm, bmat[|1 \ nseries|], ., "xb")[,1] + } + out=(eval, fit) + } + else { + if (deriv==0) { + if (avar=="on") { + vmat=(vmat[|1,1 \ nseries, nseries|], vmat[|1,cols(vmat) \ nseries, cols(vmat)|] \ /// + vmat[|rows(vmat),1 \ rows(vmat), nseries|], vmat[rows(vmat), cols(vmat)]) + se=binsreg_pred((Xm, J(rows(Xm),1,1)), ., vmat, "se")[,2] + Xm=(Xm, J(rows(Xm), 1, 1)#wvec) + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + out=(eval, fit-cval*se, fit+cval*se) + } + else { + Xm=(Xm, J(rows(Xm), 1, 1)#wvec) + result=binsreg_pred(Xm, bmat, vmat, "all") + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + else { + result=binsreg_pred(Xm, bmat[|1 \ nseries|], vmat[|1,1 \ nseries, nseries|], "all") + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + } + + if (type=="dots"|(type=="line"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4]=J(sum(out[,2]),1,.) + } + if (type=="ci"|(type=="cb"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4..5]=J(sum(out[,2]),2,.) + } + + return(out) + } + + + + +end + diff --git a/110/replication_package/replication/ado/plus/b/binsqreg.sthlp b/110/replication_package/replication/ado/plus/b/binsqreg.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..34087ed180a9a083b030102845ed1953b3abc4ca --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsqreg.sthlp @@ -0,0 +1,432 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binsqreg##syntax"}{...} +{viewerjumpto "Description" "binsqreg##description"}{...} +{viewerjumpto "Options" "binsqreg##options"}{...} +{viewerjumpto "Examples" "binsqreg##examples"}{...} +{viewerjumpto "Stored results" "binsqreg##stored_results"}{...} +{viewerjumpto "References" "binsqreg##references"}{...} +{viewerjumpto "Authors" "binsqreg##authors"}{...} +{cmd:help binsqreg} +{hline} + +{title:Title} + +{p 4 8}{hi:binsqreg} {hline 2} Data-Driven Binscatter Quantile Regression with Robust Inference Procedures and Plots.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 13} {cmdab:binsqreg} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} [ {cmd:,} {opt quantile(#)} {opt deriv(v)} {opt at(position)}{p_end} +{p 13 13} {opt dots(dotsopt)} {opt dotsgrid(dotsgridoption)} {opt dotsplotopt(dotsoption)}{p_end} +{p 13 13} {opt line(lineopt)} {opt linegrid(#)} {opt lineplotopt(lineoption)}{p_end} +{p 13 13} {opt ci(ciot)} {opt cigrid(cigridoption)} {opt ciplotopt(rcapoption)}{p_end} +{p 13 13} {opt cb(cbopt)} {opt cbgrid(#)} {opt cbplotopt(rareaoption)}{p_end} +{p 13 13} {opt polyreg(p)} {opt polyreggrid(#)} {opt polyregcigrid(#)} {opt polyregplotopt(lineoption)}{p_end} +{p 13 13} {opth by(varname)} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)}{p_end} +{p 13 13} {opt nbins(nbinsopt)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt samebinsby} {opt randcut(#)}{p_end} +{p 13 13} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 13 13} {opt nsims(#)} {opt simsgrid(#)} {opt simsseed(seed)}{p_end} +{p 13 13} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 13 13} {cmd:vce(}{it:{help qreg##qreg_vcetype:vcetype}}{cmd:)} {opt asyvar(on/off)}{p_end} +{p 13 13} {opt level(level)} {opt qregopt(qreg_option)} {opt usegtools(on/off)} {opt noplot} {opt savedata(filename)} {opt replace}{p_end} +{p 13 13} {opt plotxrange(min max)} {opt plotyrange(min max)} {it:{help twoway_options}} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} {opt fweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binsqreg} implements binscatter quantile regression with robust inference procedures and plots, following the results in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":Cattaneo, Crump, Farrell and Feng (2022a)}. +Binscatter provides a flexible way to describe the quantile relationship between two variables, after possibly adjusting for other covariates, based on partitioning/binning of the independent variable of interest. +The main purpose of this command is to generate binned scatter plots with curve estimation with robust +pointwise confidence intervals and uniform confidence band. +If the binning scheme is not set by the user, the companion command {help binsregselect:binsregselect} is used to implement binscatter in a data-driven way. +Hypothesis testing for parametric specifications of and shape restrictions on the regression function can be conducted via the companion command {help binstest:binstest}. +Hypothesis testing for pairwise group comparisons can be conducted via the companion command {help binspwc: binspwc}. Binscatter estimation based on the least squares method can be conducted via the command {help binsreg: binsreg}. +{p_end} + +{p 4 8} A detailed introduction to this command is given in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Cattaneo, Crump, Farrell and Feng (2022b)}. +Companion R and Python packages with the same capabilities are available (see website below). +{p_end} + +{p 4 8} Companion commands: {help binstest:binstest} for hypothesis testing of parametric specifications and shape restrictions, +{help binspwc:binspwc} for hypothesis testing for pairwise group comparisons, +and {help binsregselect:binsregselect} for data-driven binning selection. +{p_end} + +{p 4 8} Related Stata, R and Python packages are available in the following website:{p_end} + +{p 8 8} {browse "https://nppackages.github.io/":https://nppackages.github.io/}{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt quantile(#)} specifies the quantile to be estimated and should be a number between 0 and 1, exclusive. +The default value of 0.5 corresponds to the median. +{p_end} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation, testing and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself. +{p_end} + +{p 4 8} {opt at(position)} specifies the values of {it:othercovs} at which the estimated function is evaluated for plotting. +The default is {cmd:at(mean)}, which corresponds to the mean of {it:othercovs}. +Other options are: {cmd:at(median)} for the median of {it:othercovs}, {cmd:at(0)} for zeros, +and {cmd:at(filename)} for particular values of {it:othercovs} saved in another file. +{p_end} + +{p 4 8} Note: When {cmd:at(mean)} or {cmd:at(median)} is specified, all factor variables in {it:othercovs} (if specified) are excluded from the evaluation (set as zero). +{p_end} + +{dlgtab:Dots} + +{p 4 8} {opt dots(dotsopt)} sets the degree of polynomial and the number of smoothness for point estimation and plotting as "dots". +If {cmd:dots(p s)} is specified, a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints is used. +The default is {cmd:dots(0 0)}, which corresponds to piecewise constant (canonical binscatter). +If {cmd:dots(T)} is specified, the default {cmd:dots(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:dots(F)} is specified, the dots are not included in the plot. +{p_end} + +{p 4 8} {opt dotsgrid(dotsgridoption)} specifies the number and location of dots within each bin to be plotted. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt dotsgrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt dotsgrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt dotsgrid(mean 5)} generates six evaluation points +within each bin containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +Given this choice, the dots are point estimates evaluated over the selected grid within each bin. +The default is {opt dotsgrid(mean)}, which corresponds to one dot per bin evaluated at the sample average of {it:indvar} within each bin (canonical binscatter). +{p_end} + +{p 4 8} {opt dotsplotopt(dotsoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the plotted dots. +{p_end} + +{dlgtab:Line} + +{p 4 8} {opt line(lineopt)} sets the degree of polynomial and the number of smoothness constraints +for plotting as a "line". If {cmd:line(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:line(T)} is specified, {cmd:line(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:line(F)} or {cmd:line()} is specified, the line is not included in the plot. +The default is {cmd:line()}. +{p_end} + +{p 4 8} {opt linegrid(#)} specifies the number of evaluation points of an evenly-spaced grid within +each bin used for evaluation of the point estimate set by the {cmd:line(p s)} option. +The default is {cmd:linegrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for fitting/plotting the line. +{p_end} + +{p 4 8} {opt lineplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the plotted line. +{p_end} + +{dlgtab:Confidence Intervals} + +{p 4 8} {opt ci(ciopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing confidence intervals. If {cmd:ci(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:ci(T)} is specified, {cmd:ci(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:ci(F)} or {cmd:ci()} is specified, the confidence intervals are not included in the plot. +The default is {cmd:ci()}. +{p_end} + +{p 4 8} {opt cigrid(cigridoption)} specifies the number and location of evaluation points in the grid +used to construct the confidence intervals set by the {opt ci(p s)} option. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt cigrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt cigrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt cigrid(mean 5)} generates six evaluation points within each bin +containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +The default is {opt cigrid(mean)}, which corresponds to one evaluation point set at the sample average +of {it:indvar} within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt ciplotopt(rcapoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the confidence intervals. +{p_end} + +{dlgtab:Confidence Band} + +{p 4 8} {opt cb(cbopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing the confidence band. If {cmd:cb(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If the option {cmd:cb(T)} is specified, {cmd:cb(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:cb(F)} or {cmd:cb()} is specified, the confidence band is not included in the plot. +The default is {cmd:cb()}. +{p_end} + +{p 4 8} {opt cbgrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for evaluation of the point estimate set by the {cmd:cb(p s)} option. +The default is {cmd:cbgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence band construction. +{p_end} + +{p 4 8} {opt cbplotopt(rareaoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the confidence band. +{p_end} + +{dlgtab:Global Polynomial Regression} + +{p 4 8} {opt polyreg(p)} sets the degree {it:p} of a global polynomial regression model for plotting. +By default, this fit is not included in the plot unless explicitly specified. +Recommended specification is {cmd:polyreg(3)}, which adds a cubic polynomial fit of the regression function of interest to the binned scatter plot. +{p_end} + +{p 4 8} {opt polyreggrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for +evaluation of the point estimate set by the {cmd:polyreg(p)} option. +The default is {cmd:polyreggrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt polyregcigrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used for +constructing confidence intervals based on polynomial regression set by the {cmd:polyreg(p)} option. +The default is {cmd:polyregcigrid(0)}, which corresponds to not plotting confidence intervals for the global polynomial regression approximation. +{p_end} + +{p 4 8} {opt polyregplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} +command to modify the appearance of the global polynomial regression fit. +{p_end} + +{dlgtab:Subgroup Analysis} + +{p 4 8} {opt by(varname)} specifies the variable containing the group indicator to perform subgroup analysis; +both numeric and string variables are supported. +When {opt by(varname)} is specified, {cmdab:binsreg} implements estimation and inference for each subgroup separately, +but produces a common binned scatter plot. +By default, the binning structure is selected for each subgroup separately, +but see the option {cmd:samebinsby} below for imposing a common binning structure across subgroups. +{p_end} + +{p 4 8} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} specifies an ordered list of symbols for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)} specifies an ordered list of line patterns for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt nbins(nbinsopt)} sets the number of bins for partitioning/binning of {it:indvar}. +If {cmd:nbins(T)} or {cmd:nbins()} (default) is specified, the number of bins is selected via the companion command {help binsregselect:binsregselect} +in a data-driven, optimal way whenever possible. If a {help numlist:numlist} with more than one number is specified, +the number of bins is selected within this list via the companion command {help binsregselect:binsregselect}. +{p_end} + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other options are: {cmd:es} for evenly-spaced binning, or a {help numlist} for manual specification of +the positions of inner knots (which must be within the range of {it:indvar}). +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins via the companion command {help binsregselect:binsregselect}. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt samebinsby} forces a common partitioning/binning structure across all subgroups specified by the option {cmd:by()}. +The knots positions are selected according to the option {cmd:binspos()} and using the full sample. +If {cmd:nbins()} is not specified, then the number of bins is selected via the companion command +{help binsregselect:binsregselect} and using the full sample. +{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample +for bins/degree/smoothness selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. +By default, max(5,000, 0.01n) observations are used if the samples size n>5,000. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} for +point estimation is selected. Piecewise polynomials of the selected optimal degree {it:p} +are used to construct dots or line if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials of degree {it:p+1} are used to construct confidence intervals +or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which +the number of smoothness constraints {it:s} +for point estimation. Piecewise polynomials with the selected optimal +{it:s} smoothness constraints are used to construct dots or line +if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials with {it:s+1} constraints are used to construct +confidence intervals or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:nbins(#)} must be specified. +{p_end} + +{dlgtab:Simulation} + +{p 4 8} {opt nsims(#)} specifies the number of random draws for constructing confidence bands. +The default is {cmd:nsims(500)}, which corresponds to 500 draws from a standard Gaussian random vector of size [(p+1)*J - (J-1)*s]. +A large number of random draws is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid +within each bin used for evaluation of the supremum operation needed to construct confidence bands. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points +within each bin for approximating the supremum operator. +A large number of evaluation points is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsseed(#)} sets the seed for simulations. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, +which take into account the number of unique values of {it:indvar} +(i.e., adjusting for the number of mass points), number of clusters, and +degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Standard Error} + +{p 4 8} {cmd:vce(}{it:{help qreg##qreg_vcetype:vcetype}}{cmd:)} specifies the {it:vcetype} +for variance estimation used by the command {help qreg##qreg_options:qreg}. Bootstrapping-based VCE +can be also be obtained by setting {cmd:vce(boot, reps(#))} where {cmd:reps(#)} specifies +the number of bootstrap replications. Weights are not allowed when bootstrapping VCE is specified. +The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8} {opt asyvar(on/off)} specifies the method used to compute standard errors. +If {cmd:asyvar(on)} is specified, the standard error of the nonparametric component is used and the uncertainty +related to other control variables {it:othercovs} is omitted. Default is {cmd:asyvar(off)}, that is, +the uncertainty related to {it:othercovs} is taken into account. +{p_end} + +{dlgtab:Other Options} + +{p 4 8} {opt level(#)} sets the nominal confidence level for confidence interval and confidence band estimation. Default is {cmd:level(95)}. +{p_end} + +{p 4 8} {opt qregopt(qreg_option)} options to be passed on to the command {help qreg##qreg_options:qreg}. +For example, options that control for the optimization process can be added here. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package {cmd:gtools} +to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{p 4 8} {opt noplot} omits binscatter plotting. +{p_end} + +{p 4 8} {opt savedata(filename)} specifies a filename for saving all data underlying the binscatter plot (and more). +{p_end} + +{p 4 8} {opt replace} overwrites the existing file when saving the graph data. +{p_end} + +{p 4 8} {opt plotxrange(min max)} specifies the range of the x-axis for plotting. Observations outside the range are dropped in the plot. +{p_end} + +{p 4 8} {opt plotyrange(min max)} specifies the range of the y-axis for plotting. Observations outside the range are dropped in the plot. +{p_end} + +{p 4 8} {it:{help twoway_options}} any unrecognized options are appended to the end of the twoway command generating the binned scatter plot. +{p_end} + + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto}{p_end} + +{p 4 8} Run a binscatter median regression and report the plot{p_end} +{p 8 8} . {stata binsqreg price weight length foreign, quantile(0.5)}{p_end} + +{p 4 8} Add confidence intervals and confidence band{p_end} +{p 8 8} . {stata binsqreg price weight length foreign, quantile(0.5) ci(3 3) cb(3 3) nbins(5)}{p_end} + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 17 tabbed}{...} +{p2col 5 17 21 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(level)}}confidence level{p_end} +{synopt:{cmd:e(dots_p)}}degree of polynomial for dots{p_end} +{synopt:{cmd:e(dots_s)}}smoothness of polynomial for dots{p_end} +{synopt:{cmd:e(line_p)}}degree of polynomial for line{p_end} +{synopt:{cmd:e(line_s)}}smoothness of polynomial for line{p_end} +{synopt:{cmd:e(ci_p)}}degree of polynomial for confidence interval{p_end} +{synopt:{cmd:e(ci_s)}}smoothness of polynomial for confidence interval{p_end} +{synopt:{cmd:e(cb_p)}}degree of polynomial for confidence band{p_end} +{synopt:{cmd:e(cb_s)}}smoothness of polynomial for confidence band{p_end} +{p2col 5 17 21 2: Matrices}{p_end} +{synopt:{cmd:e(N_by)}}number of observations for each group{p_end} +{synopt:{cmd:e(Ndist_by)}}number of distinct values for each group{p_end} +{synopt:{cmd:e(Nclust_by)}}number of clusters for each group{p_end} +{synopt:{cmd:e(nbins_by)}}number of bins for each group{p_end} +{synopt:{cmd:e(cval_by)}}critical value for each group, used for confidence bands{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/b/binsreg.ado b/110/replication_package/replication/ado/plus/b/binsreg.ado new file mode 100644 index 0000000000000000000000000000000000000000..fee1d419e52db4149a64b8e572a8e9de119669f2 --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsreg.ado @@ -0,0 +1,2407 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binsreg +program define binsreg, eclass + version 13 + + syntax varlist(min=2 numeric fv ts) [if] [in] [fw aw pw] [, deriv(integer 0) at(string asis) /// + absorb(string asis) reghdfeopt(string asis) /// + dots(string) dotsgrid(string) dotsplotopt(string asis) /// + line(string) linegrid(integer 20) lineplotopt(string asis) /// + ci(string) cigrid(string) ciplotopt(string asis) /// + cb(string) cbgrid(integer 20) cbplotopt(string asis) /// + polyreg(string) polyreggrid(integer 20) polyregcigrid(integer 0) polyregplotopt(string asis) /// + by(varname) bycolors(string asis) bysymbols(string asis) bylpatterns(string asis) /// + nbins(string) binspos(string) binsmethod(string) nbinsrot(string) /// + pselect(numlist integer >=0) sselect(numlist integer >=0) /// + samebinsby randcut(numlist max=1 >=0 <=1) /// + nsims(integer 500) simsgrid(integer 20) simsseed(numlist integer max=1 >=0) /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) level(real 95) asyvar(string) /// + noplot savedata(string asis) replace /// + plotxrange(numlist asc max=2) plotyrange(numlist asc max=2) *] + + ********************************************* + * Regularization constant (for checking only) + local qrot=2 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + ********************** + ** Extract options *** + ********************** + * default vce, clustered? + if ("`vce'"=="") local vce "vce(robust)" + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'", parse(", ") + if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"| /// + "`1'"=="cluste"|"`1'"=="cluster") { + if ("`3'"==""|"`3'"==",") local clusterON "T" /* cluster is specified */ + local clustervar `2' + } + if ("`asyvar'"=="") local asyvar "off" + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`binsmethod'"=="") local binsmethod "DPI" + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binspos'"=="") local binspos "QS" + + * analyze options related to degrees and nbins ************* + if ("`dots'"!="T"&"`dots'"!="F"&"`dots'"!="") { + numlist "`dots'", integer max(2) range(>=0) + local dots=r(numlist) + } + if ("`line'"!="T"&"`line'"!="F"&"`line'"!="") { + numlist "`line'", integer max(2) range(>=0) + local line=r(numlist) + } + if ("`ci'"!="T"&"`ci'"!="F"&"`ci'"!="") { + numlist "`ci'", integer max(2) range(>=0) + local ci=r(numlist) + } + if ("`cb'"!="T"&"`cb'"!="F"&"`cb'"!="") { + numlist "`cb'", integer max(2) range(>=0) + local cb=r(numlist) + } + + + if ("`dots'"=="F") { /* shut down dots */ + local dots "" + local dotsgrid 0 + } + if ("`line'"=="F") local line "" + if ("`ci'"=="F") local ci "" + if ("`cb'"=="F") local cb "" + + + *************************************************************** + * 4 cases: select J, select p, user specified both, and error + local selection "" + + * analyze nbins + if ("`nbins'"=="T") local nbins=0 + local len_nbins=0 + if ("`nbins'"!=""&"`nbins'"!="F") { + numlist "`nbins'", integer sort + local nbins=r(numlist) + local len_nbins: word count `nbins' + } + + * analyze numlist in pselect and sselect + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if (`len_p'==1&`len_s'==0) { + local slist `plist' + local len_s=1 + } + if (`len_p'==0&`len_s'==1) { + local plist `slist' + local len_p=1 + } + + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + if ("`nbins'"!=""|"`pselect'"!=""|"`sselect'"!="") { + di as error "nbins(), pselect() or sselect() incorrectly specified." + exit + } + } + + * 1st case: select J + if (("`nbins'"=="0"|`len_nbins'>1|"`nbins'"=="")&("`binspos'"=="ES"|"`binspos'"=="QS")) { + local selection "J" + } + if ("`selection'"=="J") { + if (`len_p'>1|`len_s'>1) { + if ("`nbins'"=="") { + di as error "nbins() must be specified for degree/smoothness selection." + exit + } + else { + di as error "Only one p and one s are allowed to select # of bins." + exit + } + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + if ("`dots'"!=""&"`dots'"!="T"&"`dots'"!="F") { /* respect user-specified dots */ + local plist: word 1 of `dots' + local slist: word 2 of `dots' + if ("`slist'"=="") local slist `plist' + } + if ("`dots'"==""|"`dots'"=="T") local dots `plist' `slist' /* selection is based on dots */ + if ("`line'"=="T") local line `plist' `slist' + if ("`ci'"=="T") local ci `=`plist'+1' `=`slist'+1' + if ("`cb'"=="T") local cb `=`plist'+1' `=`slist'+1' + local len_p=1 + local len_s=1 + } /* e.g., binsreg y x, nbins(a b) or nbins(T) or pselect(a) nbins(T) */ + + * 2nd case: select P (at least for one object) + if ("`selection'"!="J" & ("`dots'"==""|"`dots'"=="T"|"`line'"=="T"|"`ci'"=="T"|"`cb'"=="T")) { + local pselectOK "T" /* p selection CAN be turned on as long as one of the four is T */ + } + + if ("`pselectOK'"=="T" & `len_nbins'==1 & (`len_p'>1|`len_s'>1)) { + local selection "P" + } /* e.g., binsreg y x, pselect(a b) or pselect() dots(T) */ + + * 3rd case: completely user-specified J and p + if ((`len_p'<=1&`len_s'<=1) & "`selection'"!="J") { + local selection "NA" + if ("`dots'"==""|"`dots'"=="T") { + if (`len_p'==1&`len_s'==1) local dots `plist' `slist' + else local dots `deriv' `deriv' /* e.g., binsreg y x or , dots(0 0) nbins(20) */ + } + tokenize `dots' + if ("`2'"=="") local 2 `1' + if ("`line'"=="T") { + if (`len_p'==1&`len_s'==1) local line `plist' `slist' + else local line `dots' + } + if ("`ci'"=="T") { + if (`len_p'==1&`len_s'==1) local ci `=`plist'+1' `=`slist'+1' + else local ci `=`1'+1' `=`2'+1' + } + if ("`cb'"=="T") { + if (`len_p'==1&`len_s'==1) local cb `=`plist'+1' `=`slist'+1' + else local cb `=`1'+1' `=`2'+1' + } + } + + * exclude all other cases + if ("`selection'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + ****** Now, extract from dots, line, etc. ************ + * dots + tokenize `dots' + local dots_p "`1'" + local dots_s "`2'" + if ("`dots_p'"==""|"`dots_p'"=="T") local dots_p=. + if ("`dots_s'"=="") local dots_s `dots_p' + + if ("`dotsgrid'"=="") local dotsgrid "mean" + local dotsngrid_mean=0 + if (strpos("`dotsgrid'","mean")!=0) { + local dotsngrid_mean=1 + local dotsgrid: subinstr local dotsgrid "mean" "", all + } + if (wordcount("`dotsgrid'")==0) local dotsngrid=0 + else { + confirm integer n `dotsgrid' + local dotsngrid `dotsgrid' + } + local dotsntot=`dotsngrid_mean'+`dotsngrid' + + + * line + tokenize `line' + local line_p "`1'" + local line_s "`2'" + local linengrid `linegrid' + if ("`line'"=="") local linengrid=0 + if ("`line_p'"==""|"`line_p'"=="T") local line_p=. + if ("`line_s'"=="") local line_s `line_p' + + * ci + if ("`cigrid'"=="") local cigrid "mean" + local cingrid_mean=0 + if (strpos("`cigrid'","mean")!=0) { + local cingrid_mean=1 + local cigrid: subinstr local cigrid "mean" "", all + } + if (wordcount("`cigrid'")==0) local cingrid=0 + else { + confirm integer n `cigrid' + local cingrid `cigrid' + } + local cintot=`cingrid_mean'+`cingrid' + + tokenize `ci' + local ci_p "`1'" + local ci_s "`2'" + if ("`ci'"=="") local cintot=0 + if ("`ci_p'"==""|"`ci_p'"=="T") local ci_p=. + if ("`ci_s'"=="") local ci_s `ci_p' + + * cb + tokenize `cb' + local cb_p "`1'" + local cb_s "`2'" + local cbngrid `cbgrid' + if ("`cb'"=="") local cbngrid=0 + if ("`cb_p'"==""|"`cb_p'"=="T") local cb_p=. + if ("`cb_s'"=="") local cb_s `cb_p' + + + * Add warnings about degrees for estimation and inference + if ("`selection'"=="J") { + if ("`ci_p'"!=".") { + if (`ci_p'<=`dots_p') { + local ci_p=`dots_p'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the degree for dots()." + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<=`dots_p') { + local cb_p=`dots_p'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the degree for dots()." + } + } + } + if ("`selection'"=="NA") { + if ("`ci'"!=""|"`cb'"!="") { + di as text "Warning: Confidence intervals/bands are valid when nbins() is much larger than IMSE-optimal choice." + } + } + * if selection==P, compare ci_p/cb_p with P_opt later + + * poly fit + local polyregngrid `polyreggrid' + local polyregcingrid `polyregcigrid' + if ("`polyreg'"!="") confirm integer n `polyreg' + else local polyregngrid=0 + + * range of x axis and y axis? + tokenize `plotxrange' + local min_xr "`1'" + local max_xr "`2'" + tokenize `plotyrange' + local min_yr "`1'" + local max_yr "`2'" + + + * Simuls + local simsngrid=`simsgrid' + + * Record if nbins specified by users + local nbins_full `nbins' + if ("`selection'"=="NA") local binselectmethod "User-specified" + else { + if ("`binsmethod'"=="DPI") local binselectmethod "IMSE-optimal plug-in choice" + if ("`binsmethod'"=="ROT") local binselectmethod "IMSE-optimal rule-of-thumb choice" + if ("`selection'"=="J") local binselectmethod "`binselectmethod' (select # of bins)" + if ("`selection'"=="P") local binselectmethod "`binselectmethod' (select degree and smoothness)" + } + + * Mass point check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + local fewmasspoints "T" /* count mass point, but turn off checks */ + } + + * extract dfcheck + if ("`dfcheck'"=="") local dfcheck 20 30 + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * evaluate at w from another dataset? + if (`"`at'"'!=`""'&`"`at'"'!=`"mean"'&`"`at'"'!=`"median"'&`"`at'"'!=`"0"') local atwout "user" + + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + local sel_gtools "on" + * use gstats tab instead of tabstat/collapse + * use gquantiles instead of _pctile + * use gunique instead of binsreg_uniq + * use fasterxtile instead of irecode (within binsreg_irecode) + * shut down local checks & do not sort + } + else local sel_gtools "off" + + * use reghdfe? + if ("`absorb'"!="") { + capture which reghdfe + if (_rc) { + di as error "reghdfe not installed." + exit + } + local hdmethod "T" + } + + ************************* + **** error checks ******* + ************************* + if (`deriv'<0) { + di as error "Derivative incorrectly specified." + exit + } + if (`dotsngrid'<0|`linengrid'<0|`cingrid'<0|`cbngrid'<0|`simsngrid'<0) { + di as error "Number of evaluation points incorrectly specified." + exit + } + if (`level'>100|`level'<0) { + di as error "Confidence level incorrectly specified." + exit + } + if ("`dots_p'"!=".") { + if (`dots_p'<`dots_s') { + di as error "p cannot be smaller than s." + exit + } + if (`dots_p'<`deriv') { + di as error "p for dots cannot be less than deriv." + exit + } + } + if ("`line_p'"!=".") { + if (`line_p'<`line_s') { + di as error "p cannot be smaller than s." + exit + } + if (`line_p'<`deriv') { + di as error "p for line cannot be less than deriv." + exit + } + } + if ("`ci_p'"!=".") { + if (`ci_p'<`ci_s') { + di as error "p cannot be smaller than s." + exit + } + if (`ci_p'<`deriv') { + di as error "p for CI cannot be less than deriv." + exit + } + } + if ("`cb_p'"!=".") { + if (`cb_p'<`cb_s') { + di as error "p cannot be smaller than s." + exit + } + if (`cb_p'<`deriv') { + di as error "p for CB cannot be less than deriv." + exit + } + } + if ("`polyreg'"!="") { + if (`polyreg'<`deriv') { + di as error "polyreg() cannot be less than deriv()." + exit + } + } + if (`"`savedata'"'!=`""') { + if ("`replace'"=="") { + confirm new file `"`savedata'.dta"' + } + if ("`plot'"!="") { + di as error "Plot cannot be turned off if graph data are requested." + exit + } + } + if (`polyregcingrid'!=0&"`polyreg'"=="") { + di as error "polyreg() is missing." + exit + } + if ("`binsmethod'"!="DPI"&"`binsmethod'"!="ROT") { + di as error "binsmethod incorrectly specified." + exit + } + ******** END error checking *************************** + + * Preserve data + preserve + + * Parse varlist into y_var, x_var and w_var; time series var. generated; + tokenize `varlist' + fvrevar `1', tsonly + local y_var "`r(varlist)'" + local y_varname "`1'" + + fvrevar `2', tsonly + local x_var "`r(varlist)'" + local x_varname "`2'" + + macro shift 2 + local w_var "`*'" + * read eval point for w from another file + if ("`atwout'"=="user") { + append using `at' + } + + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" + local nwvar: word count `w_var' + + * Save the last obs in a vector and then drop it + tempname wuser /* a vector used to keep eval for w */ + if ("`atwout'"=="user") { + mata: st_matrix("`wuser'", st_data(`=_N', "`w_var'")) + qui drop in `=_N' + } + + * Get positions of factor vars + local indexlist "" + local i = 1 + foreach v in `w_var' { + if strpos("`v'", ".") == 0 { + local indexlist `indexlist' `i' + } + local ++i + } + + * add a default for at + if (`"`at'"'==""&`nwvar'>0) { + local at "mean" + } + + * Now, mark sample + marksample touse + markout `touse' `by', strok + qui keep if `touse' + local nsize=_N /* # of rows in the original dataset */ + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") { + di as text in gr "Sorting dataset on `x_varname'..." + di as text in gr "Note: This step is omitted if dataset already sorted by `x_varname'." + sort `x_var', stable + } + local sorted "sorted" + } + + if ("`wtype'"=="f") qui sum `x_var' `wt', meanonly + else qui sum `x_var', meanonly + + local xmin=r(min) + local xmax=r(max) + local Ntotal=r(N) /* total sample size, with wt */ + * define the support of plot + if ("`plotxrange'"!="") { + local xsc `plotxrange' + if (wordcount("`xsc'")==1) local xsc `xsc' `xmax' + } + else local xsc `xmin' `xmax' + + * Effective sample size + local eN=`nsize' + * DO NOT check mass points and clusters outside loop unless needed + + * Check number of unique byvals & create local storing byvals + local byvarname `by' + if "`by'"!="" { + capture confirm numeric variable `by' + if _rc { + local bystring "T" + * generate a numeric version + tempvar by + tempname bylabel + qui egen `by'=group(`byvarname'), lname(`bylabel') + } + + local bylabel `:value label `by'' /* catch value labels for numeric by-vars too */ + + tempname byvalmatrix + qui tab `by', nofreq matrow(`byvalmatrix') + + local bynum=r(r) + forvalues i=1/`bynum' { + local byvals `byvals' `=`byvalmatrix'[`i',1]' + } + } + else local bynum=1 + + * Default colors, symbols, linepatterns + if (`"`bycolors'"'==`""') local bycolors /// + navy maroon forest_green dkorange teal cranberry lavender /// + khaki sienna emidblue emerald brown erose gold bluishgray + if (`"`bysymbols'"'==`""') local bysymbols /// + O D T S + X A a | V o d s t x + if (`"`bylpatterns'"'==`""') { + forval i=1/`bynum' { + local bylpatterns `bylpatterns' solid + } + } + + * Temp name in MATA + tempname xvec yvec byvec cluvec binedges + mata: `xvec'=st_data(., "`x_var'"); `yvec'=st_data(.,"`y_var'"); `byvec'=.; `cluvec'=. + + ******************************************************* + *** Mass point counting ******************************* + tempname Ndistlist Nclustlist mat_imse_var_rot mat_imse_bsq_rot mat_imse_var_dpi mat_imse_bsq_dpi + mat `Ndistlist'=J(`bynum',1,.) + mat `Nclustlist'=J(`bynum',1,.) + * Matrices saving imse + mat `mat_imse_var_rot'=J(`bynum',1,.) + mat `mat_imse_bsq_rot'=J(`bynum',1,.) + mat `mat_imse_var_dpi'=J(`bynum',1,.) + mat `mat_imse_bsq_dpi'=J(`bynum',1,.) + + if (`bynum'>1) mata: `byvec'=st_data(.,"`by'") + if ("`clusterON'"=="T") mata: `cluvec'=st_data(.,"`clustervar'") + + ******************************************************** + ********** Bins, based on FULL sample ****************** + ******************************************************** + * knotlist: inner knot seq; knotlistON: local, knot available before loop + tempname fullkmat /* matrix name for saving knots based on the full sample */ + * Extract user-specified knot list + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + capture numlist "`binspos'", ascending + if (_rc==0) { + local knotlistON "T" + local knotlist `binspos' + local nbins: word count `knotlist' + local first: word 1 of `knotlist' + local last: word `nbins' of `knotlist' + if (`first'<=`xmin'|`last'>=`xmax') { + di as error "Inner knots specified out of allowed range." + exit + } + else { + local nbins=`nbins'+1 + local nbins_full `nbins' + local pos "user" + + foreach el of local knotlist { + mat `fullkmat'=(nullmat(`fullkmat') \ `el') + } + mat `fullkmat'=(`xmin' \ `fullkmat' \ `xmax') + } + } + else { + di as error "Numeric list incorrectly specified in binspos()." + exit + } + } + + * Discrete x? + if ("`fewmasspoints'"!="") local fullfewobs "T" + + * Bin selection using the whole sample if + if ("`fullfewobs'"==""&"`selection'"!="NA"&(("`by'"=="")|(("`by'"!="")&("`samebinsby'"!="")))) { + local selectfullON "T" + } + + if ("`selectfullON'"=="T") { + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xvec', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + } + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + local eN=min(`eN', `Nclust') /* effective sample size */ + } + + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: Too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used and by() option ignored." + local by "" + local byvals "" + local fullfewobs "T" + local binspos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `Ntotal'>5000) { + local randcut1k=max(5000/`Ntotal', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_var_dpi'=J(`bynum',1,e(imse_var_dpi)) + mat `mat_imse_bsq_dpi'=J(`bynum',1,e(imse_bsq_dpi)) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_var_rot'=J(`bynum',1,e(imse_var_rot)) + mat `mat_imse_bsq_rot'=J(`bynum',1,e(imse_bsq_rot)) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + + if (("`selectfullON'"=="T"|("`selection'"=="NA"&"`samebinsby'"!="")) & "`fullfewobs'"=="") { + * Save in a knot list + local knotlistON "T" + local nbins_full=`nbins' + + if ("`binspos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `fullkmat'=(nullmat(`fullkmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else if ("`binspos'"=="QS") { + if (`nbins'==1) mat `fullkmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `wt', nq(`nbins') `usegtools' + mat `fullkmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + + + *** Placement name, for display ************ + if ("`pos'"=="user") { + local binselectmethod "User-specified" + local placement "User-specified" + } + else if ("`binspos'"=="ES") { + local placement "Evenly-spaced" + } + else if ("`binspos'"=="QS") { + local placement "Quantile-spaced" + } + + * NOTE: ALL checkings are put within the loop + + * Set seed + if ("`simsseed'"!="") set seed `simsseed' + + * alpha quantile (for two-sided CI) + local alpha=(100-(100-`level')/2)/100 + + + *************************************************************************** + *************** Preparation before loop************************************ + *************************************************************************** + + ********** Prepare vars for plotting ******************** + * names for mata objects storing graph data + * plotmat: final output (defined outside); + * plotmatby: output for each group + tempname plotmat plotmatby xsub ysub byindex xcatsub + tempname Xm mata_fit mata_se /* temp name for mata obj */ + + * count the number of requested columns, record the positions + local ncolplot=1 /* 1st col reserved for group */ + if ("`plot'"=="") { + if (`dotsntot'!=0) { + local dots_start=`ncolplot'+1 + local dots_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + local line_start=`ncolplot'+1 + local line_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + } + if (`polyregngrid'!=0) { + local poly_start=`ncolplot'+1 + local poly_end=`ncolplot'+4 + local ncolplot=`ncolplot'+4 + if (`polyregcingrid'!=0) { + local polyci_start=`ncolplot'+1 + local polyci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + if (`cintot'!=0) { + local ci_start=`ncolplot'+1 + local ci_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + local cb_start=`ncolplot'+1 + local cb_end=`ncolplot'+5 + local ncolplot=`ncolplot'+5 + } + } + mata: `plotmat'=J(0,`ncolplot',.) + + * mark the (varying) last row (for plotting) + local bylast=0 + ******************************************************************* + * temp var: bin id + tempvar xcat + qui gen `xcat'=. in 1 + + * matrix names, for returns + tempname Nlist nbinslist cvallist + + * local vars, for plotting + local counter_by=1 + local plotnum=0 /* count the number of series, for legend */ + if ("`by'"=="") local noby="noby" + local byvalnamelist "" /* save group name (value) */ + local plotcmd "" /* plotting cmd */ + + *************************************************************************** + ******************* Now, enter the loop *********************************** + *************************************************************************** + foreach byval in `byvals' `noby' { + local conds "" + if ("`by'"!="") { + local conds "if `by'==`byval'" /* with "if" */ + if ("`bylabel'"=="") local byvalname=`byval' + else { + local byvalname `: label `bylabel' `byval'' + } + local byvalnamelist `" `byvalnamelist' `"`byvalname'"' "' + } + + + if (`bynum'>1) { + mata: `byindex'=`byvec':==`byval' + mata: `xsub'=select(`xvec',`byindex'); `ysub'=select(`yvec', `byindex') + } + else { + mata: `xsub'=`xvec'; `ysub'=`yvec' + } + + * Subsample size + if ("`wtype'"=="f") sum `x_var' `conds' `wt', meanonly + else sum `x_var' `conds', meanonly + + local xmin=r(min) + local xmax=r(max) + local N=r(N) + mat `Nlist'=(nullmat(`Nlist') \ `N') + + * Effective sample size + if (`bynum'==1) local eN=`nsize' + else { + if ("`wtype'"!="f") local eN=r(N) + else { + qui count `conds' + local eN=r(N) + } + } + + local Ndist=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xsub', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' `conds' + local Ndist=r(unique) + } + local eN=min(`eN', `Ndist') + mat `Ndistlist'[`counter_by',1]=`Ndist' + } + + * # of clusters + local Nclust=. + if ("`clusterON'"=="T") { + if (`bynum'==1) { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(`cluvec')))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(select(`cluvec', `byindex'))))) + } + else { + qui gunique `clustervar' `conds' + local Nclust=r(unique) + } + } + local eN=min(`eN', `Nclust') /* effective SUBsample size */ + mat `Nclustlist'[`counter_by',1]=`Nclust' + } + + ********************************************************* + ************** Prepare bins, within loop **************** + ********************************************************* + if ("`pos'"!="user") local pos `binspos' /* initialize pos */ + * Selection? + if ("`selection'"!="NA"&"`knotlistON'"!="T"&"`fullfewobs'"=="") { + * Check effective sample size + if ("`dots_p'"==".") local dotspcheck=6 + else local dotspcheck=`dots_p' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`dotspcheck'+1+`qrot')) { + di as text in gr "Warning: too small effective sample size for bin selection." /// + _newline _skip(9) "# of mass points or clusters used." + local fewobs "T" + local nbins=`eN' + local pos "QS" /* forced to be QS */ + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `N'>5000) { + local randcut1k=max(5000/`N', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5,000, 0.01n) observations if the sample size n>5,000. To use the full sample, set randcut(1)." + } + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + bins(`dots_p' `dots_s') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`pos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins_full') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + mat `mat_imse_bsq_dpi'[`counter_by',1]=e(imse_bsq_dpi) + mat `mat_imse_var_dpi'[`counter_by',1]=e(imse_var_dpi) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + mat `mat_imse_bsq_rot'[`counter_by',1]=e(imse_bsq_rot) + mat `mat_imse_var_rot'[`counter_by',1]=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`dots'"=="T"|"`dots'"=="") { + local dots_p=`binsp' + local dots_s=`binss' + } + if ("`line'"=="T") { + local line_p=`binsp' + local line_s=`binss' + } + if ("`ci'"!="T"&"`ci'"!="") { + if (`ci_p'<=`binsp') { + local ci_p=`binsp'+1 + local ci_s=`ci_p' + di as text "Warning: Degree for ci() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`ci'"=="T") { + local ci_p=`binsp'+1 + local ci_s=`binss'+1 + } + if ("`cb'"!="T"&"`cb'"!="") { + if (`cb_p'<=`binsp') { + local cb_p=`binsp'+1 + local cb_s=`cb_p' + di as text "Warning: Degree for cb() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`cb'"=="T") { + local cb_p=`binsp'+1 + local cb_s=`binss'+1 + } + } + } + } + + + if ("`selection'"=="NA"|"`knotlistON'"=="T") local nbins=`nbins_full' /* add the universal nbins */ + *if ("`knotlistON'"=="T") local nbins=`nbins_full' + if ("`fullfewobs'"!="") { + local fewobs "T" + local nbins=`eN' + } + + ****************************************************** + * Check effective sample size for each case ********** + ****************************************************** + if ("`fewobs'"!="T") { + if ((`nbins'-1)*(`dots_p'-`dots_s'+1)+`dots_p'+1+`dfcheck_n2'>=`eN') { + local fewobs "T" /* even though ROT available, treat it as few obs case */ + local nbins=`eN' + local pos "QS" + di as text in gr "Warning: Too small effective sample size for dots. # of mass points or clusters used." + } + if ("`line_p'"!=".") { + if ((`nbins'-1)*(`line_p'-`line_s'+1)+`line_p'+1+`dfcheck_n2'>=`eN') { + local line_fewobs "T" + di as text in gr "Warning: Too small effective sample size for line." + } + } + if ("`ci_p'"!=".") { + if ((`nbins'-1)*(`ci_p'-`ci_s'+1)+`ci_p'+1+`dfcheck_n2'>=`eN') { + local ci_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CI." + } + } + if ("`cb_p'"!=".") { + if ((`nbins'-1)*(`cb_p'-`cb_s'+1)+`cb_p'+1+`dfcheck_n2'>=`eN') { + local cb_fewobs "T" + di as text in gr "Warning: Too small effective sample size for CB." + } + } + } + + if ("`polyreg'"!="") { + if (`polyreg'+1>=`eN') { + local polyreg_fewobs "T" + di as text in gr "Warning: Too small effective sample size for polynomial fit." + } + } + + * Generate category variable for data and save knot in matrix + tempname kmat + if ("`knotlistON'"=="T") { + mat `kmat'=`fullkmat' + if ("`fewobs'"=="T"&"`eN'"!="`Ndist'") { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + else { + if ("`fewmasspoints'"==""&("`fewobs'"!="T"|"`eN'"!="`Ndist'")) { + if ("`pos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `conds' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + } + } + + + * Renew knot list if few mass points + if (("`fewobs'"=="T"&"`eN'"=="`Ndist'")|"`fewmasspoints'"!="") { + qui tab `x_var' `conds', matrow(`kmat') + if ("`fewmasspoints'"!="") { + local nbins=rowsof(`kmat') + local Ndist=`nbins' + local eN=`Ndist' + } + } + else { + mata: st_matrix("`kmat'", (`xmin' \ uniqrows(st_matrix("`kmat'")[|2 \ `=`nbins'+1'|]))) + if (`nbins'!=rowsof(`kmat')-1) { + di as text in gr "Warning: Repeated knots. Some bins dropped." + local nbins=rowsof(`kmat')-1 + } + binsreg_irecode `x_var' `conds', knotmat(`kmat') bin(`xcat') /// + `usegtools' nbins(`nbins') pos(`pos') knotliston(`knotlistON') + + mata: `xcatsub'=st_data(., "`xcat'") + if (`bynum'>1) { + mata: `xcatsub'=select(`xcatsub', `byindex') + } + } + + + ************************************************* + **** Check for empty bins *********************** + ************************************************* + mata: `binedges'=. /* initialize */ + if ("`fewobs'"!="T"&"`localcheck'"=="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(`xcatsub')))) + if (`nbins'==`Ncat') { + mata: `binedges'=binsreg_uniq(`xsub', `xcatsub', `nbins', "uniqmin") + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins. Specify a smaller number in nbins()." + } + if ("`dots_p'"!=".") { + if (`uniqmin'<`dots_p'+1) { + local dots_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for dots." + } + } + if ("`line_p'"!=".") { + if (`uniqmin'<`line_p'+1) { + local line_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for line." + } + } + if ("`ci_p'"!=".") { + if (`uniqmin'<`ci_p'+1) { + local ci_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CI." + } + } + if ("`cb_p'"!=".") { + if (`uniqmin'<`cb_p'+1) { + local cb_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for CB." + } + } + } + + * Now, save nbins in a list !!! + mat `nbinslist'=(nullmat(`nbinslist') \ `nbins') + + ********************************************************** + **** Count the number of rows needed (within loop!) ****** + ********************************************************** + local byfirst=`bylast'+1 + local byrange=0 + if ("`fewobs'"!="T") { + local dots_nr=`dotsngrid_mean'*`nbins' + if (`dotsngrid'!=0) local dots_nr=`dots_nr'+`dotsngrid'*`nbins'+`nbins'-1 + local ci_nr=`cingrid_mean'*`nbins' + if (`cingrid'!=0) local ci_nr=`ci_nr'+`cingrid'*`nbins'+`nbins'-1 + if (`linengrid'!=0) local line_nr=`linengrid'*`nbins'+`nbins'-1 + if (`cbngrid'!=0) local cb_nr=`cbngrid'*`nbins'+`nbins'-1 + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + local byrange=max(`dots_nr'+0,`line_nr'+0,`ci_nr'+0,`cb_nr'+0, `poly_nr'+0, `polyci_nr'+0) + } + else { + if ("`eN'"=="`Ndist'") { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*(`nbins'-1)+`nbins'-1-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*(`nbins'-1)+`nbins'-1-1 + } + } + else { + if (`polyregngrid'!=0) { + local poly_nr=`polyregngrid'*`nbins'+`nbins'-1 + if (`polyregcingrid'!=0) local polyci_nr=`polyregcingrid'*`nbins'+`nbins'-1 + } + } + local byrange=max(`nbins', `poly_nr'+0, `polyci_nr'+0) + } + local bylast=`bylast'+`byrange' + mata: `plotmatby'=J(`byrange',`ncolplot',.) + if ("`byval'"!="noby") { + mata: `plotmatby'[.,1]=J(`byrange',1,`byval') + } + + ************************************************ + **** START: prepare data for plotting*********** + ************************************************ + local plotcmdby "" + + ******************************** + * adjust w vars + tempname wval + if (`nwvar'>0) { + if (`"`at'"'==`"mean"'|`"`at'"'==`"median"') { + matrix `wval'=J(1, `nwvar', 0) + tempname wvaltemp mataobj + mata: `mataobj'=. + foreach wpos in `indexlist' { + local wname: word `wpos' of `w_var' + if ("`usegtools'"=="") { + if ("`wtype'"!="") qui tabstat `wname' `conds' [aw`exp'], stat(`at') save + else qui tabstat `wname' `conds', stat(`at') save + mat `wvaltemp'=r(StatTotal) + } + else { + qui gstats tabstat `wname' `conds' `wt', stat(`at') matasave("`mataobj'") + mata: st_matrix("`wvaltemp'", `mataobj'.getOutputCol(1)) + } + mat `wval'[1,`wpos']=`wvaltemp'[1,1] + } + mata: mata drop `mataobj' + } + else if (`"`at'"'==`"0"') { + matrix `wval'=J(1,`nwvar',0) + } + else if ("`atwout'"=="user") { + matrix `wval'=`wuser' + } + } + + + ************************************************* + ********** dots and ci for few obs. case ******** + ************************************************* + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"=="T") { + di as text in gr "Warning: dots(0 0) is used." + + local dots_first=`byfirst' + local dots_last=`byfirst'-1+`nbins' + + mata: `plotmatby'[|1,`dots_start'+2 \ `nbins',`dots_start'+2|]=range(1,`nbins',1) + + if ("`eN'"=="`Ndist'") { + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`kmat'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,1) + + * Renew knot commalist, each value forms a group + local xknot "" + forvalues i=1/`nbins' { + local xknot `xknot' `kmat'[`i',1] + } + local xknotcommalist : subinstr local xknot " " ",", all + qui replace `xcat'=1+irecode(`x_var',`xknotcommalist') `conds' + } + else { + tempname grid + mat `grid'=(`kmat'[1..`nbins',1]+`kmat'[2..`nbins'+1,1])/2 + mata: `plotmatby'[|1,`dots_start' \ `nbins',`dots_start'|]=st_matrix("`grid'"); /// + `plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+1|]=J(`nbins',1,0) + } + + local nseries=`nbins' + if ("`absorb'"=="") capture reg `y_var' ibn.`xcat' `w_var' `conds' `wt', nocon `vce' + else capture reghdfe `y_var' ibn.`xcat' `w_var' `conds' `wt', absorb(`absorb') `vce' `reghdfeopt' + + tempname fewobs_b fewobs_V + if (_rc==0) { + mat `fewobs_b'=e(b) + mat `fewobs_V'=e(V) + if ("`absorb'"=="") mata: binsreg_checkdrop("`fewobs_b'", "`fewobs_V'", `nseries') + else mata: binsreg_checkdrop("`fewobs_b'", "`fewobs_V'", `nseries', "T") + + if (`nwvar'>0) { + if ("`absorb'"=="") mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+(`fewobs_b'[1,`=`nseries'+1'..`=`nseries'+`nwvar'']*`wval'')*J(1,`nseries',1) + else mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+ /// + (`fewobs_b'[1,`=`nseries'+1'..`=`nseries'+`nwvar'']*`wval''+`fewobs_b'[1, `=`nseries'+`nwvar'+1'])*J(1,`nseries',1) + } + else { + if ("`absorb'"=="") mat `fewobs_b'=`fewobs_b'[1,1..`nseries'] + else mat `fewobs_b'=`fewobs_b'[1,1..`nseries']+`fewobs_b'[1, `=`nseries'+1']*J(1,`nseries',1) + } + } + else { + error _rc + exit _rc + } + + + mata: `plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]=st_matrix("`fewobs_b'")' + + local plotnum=`plotnum'+1 + local legendnum `legendnum' `plotnum' + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond `plotcond' if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + + if (`cintot'!=0) { + di as text in gr "Warning: ci(0 0) is used." + + if (`nwvar'>0) { + if ("`absorb'"=="") mata: `mata_se'=(I(`nseries'), J(`nseries',1,1)#st_matrix("`wval'")) + else mata: `mata_se'=(I(`nseries'), J(`nseries',1,1)#st_matrix("`wval'"), J(`nseries',1,1)) + } + else { + if ("`absorb'"=="") mata: `mata_se'=I(`nseries') + else mata: `mata_se'=(I(`nseries'), J(`nseries',1,1)) + } + mata: `plotmatby'[|1,`ci_start'+1 \ `nbins',`ci_start'+2|]=`plotmatby'[|1,`dots_start'+1 \ `nbins',`dots_start'+2|]; /// + `mata_se'=sqrt(rowsum((`mata_se'*st_matrix("`fewobs_V'")):*`mata_se')); /// + `plotmatby'[|1,`ci_start'+3 \ `nbins',`ci_start'+3|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`ci_start'+4 \ `nbins',`ci_start'+4|]=`plotmatby'[|1,`dots_start'+3 \ `nbins',`dots_start'+3|]+`mata_se'*invnormal(`alpha') + mata: mata drop `mata_se' + + local plotnum=`plotnum'+1 + local lty: word `counter_by' of `bylpatterns' + local plotcmdby `plotcmdby' (rcap CI_l CI_r dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + ********************************************* + **** The following handles the usual case *** + ********************************************* + * Turn on or off? + local dotsON "" + local lineON "" + local polyON "" + local ciON "" + local cbON "" + if (`dotsntot'!=0&"`plot'"==""&"`fewobs'"!="T"&"`dots_fewobs'"!="T") { + local dotsON "T" + } + if (`linengrid'!=0&"`plot'"==""&"`line_fewobs'"!="T"&"`fewobs'"!="T") { + local lineON "T" + } + if (`polyregngrid'!=0&"`plot'"==""&"`polyreg_fewobs'"!="T") { + local polyON "T" + } + if (`cintot'!=0&"`plot'"==""&"`ci_fewobs'"!="T"&"`fewobs'"!="T") { + local ciON "T" + } + if (`cbngrid'!=0&"`plot'"==""&"`cb_fewobs'"!="T"&"`fewobs'"!="T") { + local cbON "T" + } + + + ************************ + ****** Dots ************ + ************************ + tempname xmean + + if ("`dotsON'"=="T") { + local dots_first=`byfirst' + local dots_last=`byfirst'+`dots_nr'-1 + + * fitting + tempname dots_b dots_V + if (("`dots_p'"=="`ci_p'"&"`dots_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`dots_p'"=="`cb_p'"&"`dots_s'"=="`cb_s'"&"`cbON'"=="T")) { + binsreg_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' `usegtools' /// + absorb(`absorb') reghdfeopt(`reghdfeopt') + } + else { + binsreg_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`dots_p') s(`dots_s') type(dots) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`dotsngrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' /// + absorb(`absorb') reghdfeopt(`reghdfeopt') + } + + mat `dots_b'=e(bmat) + mat `dots_V'=e(Vmat) + if (`dotsngrid_mean'!=0) mat `xmean'=e(xmat) + + * prediction + if (`dotsngrid_mean'==0) { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binsreg_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`hdmethod'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`dots_start' \ `dots_nr',`dots_end'|] = /// + binsreg_plotmat("`dots_b'", "`dots_V'", ., "`kmat'", /// + `nbins', `dots_p', `dots_s', `deriv', /// + "dots", `dotsngrid', "`wval'", `nwvar', /// + "`hdmethod'", "`asyvar'", "`xmean'") + } + + * dots + local plotnum=`plotnum'+1 + if ("`cbON'"=="T") local legendnum `legendnum' `=`plotnum'+1' + else { + local legendnum `legendnum' `plotnum' + } + local col: word `counter_by' of `bycolors' + local sym: word `counter_by' of `bysymbols' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' dots_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &dots_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' dots_fit>=`min_yr' + else local plotcond `plotcond' &dots_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &dots_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (scatter dots_fit dots_x /// + `plotcond' in `dots_first'/`dots_last', /// + mcolor(`col') msymbol(`sym') `dotsplotopt') + } + + ********************************************** + ********************* Line ******************* + ********************************************** + if ("`lineON'"=="T") { + local line_first=`byfirst' + local line_last=`byfirst'-1+`line_nr' + + * fitting + tempname line_b line_V + capture confirm matrix `dots_b' `dots_V' + if ("`line_p'"=="`dots_p'"& "`line_s'"=="`dots_s'" & _rc==0) { + matrix `line_b'=`dots_b' + matrix `line_V'=`dots_V' + } + else { + if (("`line_p'"=="`ci_p'"&"`line_s'"=="`ci_s'"&"`ciON'"=="T")| /// + ("`line_p'"=="`cb_p'"&"`line_s'"=="`cb_s'"&"`cbON'"=="T")) { + binsreg_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + usereg `sorted' `usegtools' /// + absorb(`absorb') reghdfeopt(`reghdfeopt') + } + else { + binsreg_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`line_p') s(`line_s') type(line) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' /// + absorb(`absorb') reghdfeopt(`reghdfeopt') + } + mat `line_b'=e(bmat) + mat `line_V'=e(Vmat) + } + + * prediction + mata: `plotmatby'[|1,`line_start' \ `line_nr',`line_end'|] = /// + binsreg_plotmat("`line_b'", "`line_V'", ., "`kmat'", /// + `nbins', `line_p', `line_s', `deriv', /// + "line", `linengrid', "`wval'", `nwvar', "`hdmethod'", "`asyvar'") + + * line + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' line_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &line_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' line_fit>=`min_yr' + else local plotcond `plotcond' &line_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(line_fit<=`max_yr'|line_fit==.) + } + } + + local plotcmdby `plotcmdby' (line line_fit line_x /// + `plotcond' in `line_first'/`line_last', sort cmissing(n) /// + lcolor(`col') lpattern(`lty') `lineplotopt') + + } + + + *********************************** + ******* Polynomial fit ************ + *********************************** + if ("`polyON'"=="T") { + if (`nwvar'>0) { + di as text "Note: When additional covariates w are included, the polynomial fit may not always be close to the binscatter fit." + } + local poly_first=`byfirst' + local poly_last=`byfirst'-1+`poly_nr' + + mata:`plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'+2|]=binsreg_grids("`kmat'",`polyregngrid') + + local poly_series "" + forval i=1/`polyreg' { + tempvar x_var_`i' + qui gen `x_var_`i''=`x_var'^`i' `conds' + local poly_series `poly_series' `x_var_`i'' + } + + * shut down vce if poly ci not required + local vce_poly "" + if (`polyregcingrid'!=0) local vce_poly `vce' + + if ("`absorb'"=="") capture reg `y_var' `poly_series' `w_var' `conds' `wt', `vce_poly' + else capture reghdfe `y_var' `poly_series' `w_var' `conds' `wt', absorb(`absorb') `vce_poly' `reghdfeopt' + + * store results + tempname poly_b poly_V poly_adjw + if (_rc==0) { + matrix `poly_b'=e(b) + + if (`nwvar'>0&`deriv'==0) matrix `poly_adjw'=`wval'*`poly_b'[1, `=`polyreg'+1'..`=`polyreg'+`nwvar'']' + else matrix `poly_adjw'=0 + + if (`deriv'==0) { + if (`polyreg'>0) matrix `poly_b'=(`poly_b'[1, `=`polyreg'+`nwvar'+1'], `poly_b'[1,1..`polyreg']) + else matrix `poly_b'=`poly_b'[1, `=`polyreg'+`nwvar'+1'] + } + else matrix `poly_b'=`poly_b'[1, `deriv'..`polyreg'] + + matrix `poly_V'=e(V) + } + else { + error _rc + exit _rc + } + + * Data for derivative + mata: `Xm'=J(`poly_nr',0,.) + forval i=`deriv'/`polyreg' { + mata: `Xm'=(`Xm', /// + `plotmatby'[|1,`poly_start' \ `poly_nr',`poly_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + mata:`plotmatby'[|1,`poly_start'+3 \ `poly_nr',`poly_start'+3|]=(`Xm'*st_matrix("`poly_b'")'):+st_matrix("`poly_adjw'") + + mata: mata drop `Xm' + + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' poly_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &poly_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' poly_fit>=`min_yr' + else local plotcond `plotcond' &poly_fit>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &poly_fit<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (line poly_fit poly_x /// + `plotcond' in `poly_first'/`poly_last', /// + sort lcolor(`col') lpattern(`lty') `polyregplotopt') + + * add CI for global poly? + if (`polyregcingrid'!=0) { + local polyci_first=`byfirst' + local polyci_last=`byfirst'-1+`polyci_nr' + + mata: `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'+2|]=binsreg_grids("`kmat'", `polyregcingrid') + + mata: `Xm'=J(`polyci_nr',0,.) + forval i=`deriv'/`polyreg' { + mata:`Xm'=(`Xm', /// + `plotmatby'[|1,`polyci_start' \ `polyci_nr',`polyci_start'|]:^(`i'-`deriv')* /// + factorial(`i')/factorial(`i'-`deriv')) + } + + mata:`mata_fit'=(`Xm'*st_matrix("`poly_b'")'):+st_matrix("`poly_adjw'") + if (`deriv'==0) { + if (`polyreg'>0) { + if (`nwvar'>0) mata: `Xm'=(`Xm'[|1,2 \ ., cols(`Xm')|], J(`polyci_nr',1,1)#st_matrix("`wval'"),`Xm'[.,1]) + else mata: `Xm'=(`Xm'[|1,2 \ ., cols(`Xm')|], `Xm'[.,1]) + } + else { + if (`nwvar'>0) mata: `Xm'=(J(`polyci_nr',1,1)#st_matrix("`wval'"),`Xm'[.,1]) + else mata: `Xm'=`Xm'[.,1] + + } + } + else { + matrix `poly_V'=`poly_V'[`deriv'..`polyreg',`deriv'..`polyreg'] + } + + mata:`mata_se'=sqrt(rowsum((`Xm':*(st_matrix("`poly_V'")*`Xm'')'))); /// + `plotmatby'[|1,`polyci_start'+3 \ `polyci_nr',`polyci_start'+3|]=`mata_fit'-`mata_se'*invnormal(`alpha'); /// + `plotmatby'[|1,`polyci_start'+4 \ `polyci_nr',`polyci_start'+4|]=`mata_fit'+`mata_se'*invnormal(`alpha'); /// + `plotmatby'[selectindex(`plotmatby'[,`=`polyci_start'+1']:==1),(`=`polyci_start'+3',`=`polyci_start'+4')]=J(`=`nbins'-1',2,.) + mata: mata drop `Xm' `mata_fit' `mata_se' + + * poly ci + local plotnum=`plotnum'+1 + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' polyCI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &polyCI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' polyCI_l>=`min_yr' + else local plotcond `plotcond' &polyCI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &polyCI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap polyCI_l polyCI_r polyCI_x /// + `plotcond' in `polyci_first'/`polyci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + } + } + + + ********************************** + ******* Confidence Interval ****** + ********************************** + if ("`ciON'"=="T") { + local ci_first=`byfirst' + local ci_last=`byfirst'-1+`ci_nr' + + * fitting + tempname ci_b ci_V + capture confirm matrix `line_b' `line_V' + if ("`ci_p'"=="`line_p'"& "`ci_s'"=="`line_s'" & _rc==0) { + matrix `ci_b'=`line_b' + matrix `ci_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`ci_p'"=="`dots_p'"& "`ci_s'"=="`dots_s'" & _rc==0) { + matrix `ci_b'=`dots_b' + matrix `ci_V'=`dots_V' + } + } + + capture confirm matrix `ci_b' `ci_V' `xmean' + if (_rc!=0) { + binsreg_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`ci_p') s(`ci_s') type(ci) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(`cingrid_mean') /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' /// + absorb(`absorb') reghdfeopt(`reghdfeopt') + + mat `ci_b'=e(bmat) + mat `ci_V'=e(Vmat) + mat `xmean'=e(xmat) + } + + * prediction + if (`cingrid_mean'==0) { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binsreg_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', "`hdmethod'", "`asyvar'") + } + else { + mata: `plotmatby'[|1,`ci_start' \ `ci_nr',`ci_end'|] = /// + binsreg_plotmat("`ci_b'", "`ci_V'", /// + `=invnormal(`alpha')', "`kmat'", /// + `nbins', `ci_p', `ci_s', `deriv', "ci", /// + `cingrid', "`wval'", `nwvar', "`hdmethod'", "`asyvar'", "`xmean'") + } + + * ci + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local lty: word `counter_by' of `bylpatterns' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CI_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CI_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CI_l>=`min_yr' + else local plotcond `plotcond' &CI_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &CI_r<=`max_yr' + } + } + + local plotcmdby `plotcmdby' (rcap CI_l CI_r CI_x /// + `plotcond' in `ci_first'/`ci_last', /// + sort lcolor(`col') lpattern(`lty') `ciplotopt') + + } + + ******************************* + ***** Confidence Band ********* + ******************************* + tempname cval + scalar `cval'=. + if ("`cbON'"=="T") { + if (`nsims'<2000|`simsgrid'<50) { + di as text "Note: A larger number random draws/evaluation points is recommended to obtain the final results." + } + * Prepare grid for plotting + local cb_first=`byfirst' + local cb_last=`byfirst'-1+`cb_nr' + + * fitting + tempname cb_b cb_V + capture confirm matrix `ci_b' `ci_V' + if ("`cb_p'"=="`ci_p'"& "`cb_s'"=="`ci_s'" & _rc==0) { + matrix `cb_b'=`ci_b' + matrix `cb_V'=`ci_V' + } + else { + capture confirm matrix `line_b' `line_V' + if ("`cb_p'"=="`line_p'"& "`cb_s'"=="`line_s'" & _rc==0) { + matrix `cb_b'=`line_b' + matrix `cb_V'=`line_V' + } + else { + capture confirm matrix `dots_b' `dots_V' + if ("`cb_p'"=="`dots_p'"& "`cb_s'"=="`dots_s'" & _rc==0) { + matrix `cb_b'=`dots_b' + matrix `cb_V'=`dots_V' + } + else { + binsreg_fit `y_var' `x_var' `w_var' `conds' `wt', deriv(`deriv') /// + p(`cb_p') s(`cb_s') type(cb) `vce' /// + xcat(`xcat') kmat(`kmat') dotsmean(0) /// + xname(`xsub') yname(`ysub') catname(`xcatsub') edge(`binedges') /// + `sorted' `usegtools' /// + absorb(`absorb') reghdfeopt(`reghdfeopt') + mat `cb_b'=e(bmat) + mat `cb_V'=e(Vmat) + } + } + } + + * Compute critical values + * Prepare grid for simulation + local uni_last=`simsngrid'*`nbins'+`nbins'-1 + local nseries=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + + tempname cb_basis coeff vcov vcovtemp + mata: `cb_basis'=binsreg_grids("`kmat'", `simsngrid'); /// + `cb_basis'=binsreg_spdes(`cb_basis'[,1], "`kmat'", `cb_basis'[,3], `cb_p', `deriv', `cb_s'); /// + `coeff'=st_matrix("`cb_b'")'; `vcov'=st_matrix("`cb_V'") + + if ("`absorb'"!="") { + mata: `cb_basis'=(`cb_basis', J(rows(`cb_basis'),1,1)); /// + `coeff'=(`coeff'[|1 \ `nseries'|] \ `coeff'[cols(`coeff')]); /// + `vcov'= (`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + st_matrix("`vcovtemp'", `vcov') + } + else { + mata: `coeff'=`coeff'[|1 \ `nseries'|]; `vcov'=`vcov'[|1,1 \ `nseries', `nseries'|] + } + + mata; `Xm'=binsreg_pred(`cb_basis', `coeff', `vcov', "all") + if ("`absorb'"=="") mata: binsreg_pval(`cb_basis', `Xm'[,2], "`cb_V'", ".", `nsims', `nseries', "two", `=`level'/100', ".", "`cval'", "inf") + else mata: binsreg_pval(`cb_basis', `Xm'[,2], "`vcovtemp'", ".", `nsims', `=`nseries'+1', "two", `=`level'/100', ".", "`cval'", "inf") + + mata: mata drop `cb_basis' `Xm' `coeff' `vcov' + + * prediction + mata: `plotmatby'[|1,`cb_start' \ `cb_nr',`cb_end'|] = /// + binsreg_plotmat("`cb_b'", "`cb_V'", /// + `=`cval'', "`kmat'", /// + `nbins', `cb_p', `cb_s', `deriv', /// + "cb", `cbngrid', "`wval'", `nwvar', "`hdmethod'", "`asyvar'") + + * cb + local plotnum=`plotnum'+1 + local col: word `counter_by' of `bycolors' + local plotcond "" + if ("`plotxrange'"!=""|"`plotyrange'"!="") { + local plotcond if + if ("`plotxrange'"!="") { + local plotcond `plotcond' CB_x>=`min_xr' + if ("`max_xr'"!="") local plotcond `plotcond' &CB_x<=`max_xr' + } + if ("`plotyrange'"!="") { + if ("`plotxrange'"=="") local plotcond `plotcond' CB_l>=`min_yr' + else local plotcond `plotcond' &CB_l>=`min_yr' + if ("`max_yr'"!="") local plotcond `plotcond' &(CB_r<=`max_yr'|CB_r==.) + } + } + + local plotcmdby (rarea CB_l CB_r CB_x /// + `plotcond' in `cb_first'/`cb_last', sort cmissing(n) /// + lcolor(none%0) fcolor(`col'%50) fintensity(50) `cbplotopt') `plotcmdby' + } + mat `cvallist'=(nullmat(`cvallist') \ `cval') + + local plotcmd `plotcmd' `plotcmdby' + mata: `plotmat'=(`plotmat' \ `plotmatby') + + ********************************* + **** display ******************** + ********************************* + di "" + * Plotting + if ("`plot'"=="") { + if (`counter_by'==1) { + di in smcl in gr "Binscatter plot" + di in smcl in gr "Bin selection method: `binselectmethod'" + di in smcl in gr "Placement: `placement'" + di in smcl in gr "Derivative: `deriv'" + if (`"`savedata'"'!=`""') { + di in smcl in gr `"Output file: `savedata'.dta"' + } + } + di "" + if ("`by'"!="") { + di in smcl in gr "Group: `byvarname' = " in yellow "`byvalname'" + } + di in smcl in gr "{hline 30}{c TT}{hline 15}" + di in smcl in gr "{lalign 1:# of observations}" _col(30) " {c |} " _col(32) as result %7.0f `N' + di in smcl in gr "{lalign 1:# of distinct values}" _col(30) " {c |} " _col(32) as result %7.0f `Ndist' + di in smcl in gr "{lalign 1:# of clusters}" _col(30) " {c |} " _col(32) as result %7.0f `Nclust' + di in smcl in gr "{hline 30}{c +}{hline 15}" + di in smcl in gr "{lalign 1:Bin/Degree selection:}" _col(30) " {c |} " + if ("`selection'"=="P") { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `binsp' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `binss' + } + else { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `dots_p' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `dots_s' + } + di in smcl in gr "{ralign 29:# of bins}" _col(30) " {c |} " _col(32) as result %7.0f `nbins' + if ("`binselectmethod'"!="User-specified") { + if ("`binsmethod'"=="ROT") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_rot'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_rot'[`counter_by',1]' + } + else if ("`binsmethod'"=="DPI") { + di in smcl in gr "{ralign 29:imse, bias^2}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_bsq_dpi'[`counter_by',1]' + di in smcl in gr "{ralign 29:imse, var.}" _col(30) " {c |} " _col(32) as result %7.3f `=`mat_imse_var_dpi'[`counter_by',1]' + } + } + di in smcl in gr "{hline 30}{c BT}{hline 15}" + di "" + di in smcl in gr "{hline 9}{c TT}{hline 30}" + di in smcl _col(10) "{c |}" in gr _col(17) "p" _col(25) "s" _col(33) "df" + di in smcl in gr "{hline 9}{c +}{hline 30}" + if (`dotsntot'!=0) { + local dots_df=(`dots_p'-`dots_s'+1)*(`nbins'-1)+`dots_p'+1 + di in smcl in gr "{lalign 1: dots}" _col(10) "{c |}" in gr _col(17) "`dots_p'" _col(25) "`dots_s'" _col(33) "`dots_df'" + } + if ("`lineON'"=="T") { + local line_df=(`line_p'-`line_s'+1)*(`nbins'-1)+`line_p'+1 + di in smcl in gr "{lalign 1: line}" _col(10) "{c |}" in gr _col(17) "`line_p'" _col(25) "`line_s'" _col(33) "`line_df'" + } + if (`cintot'!=0) { + local ci_df=(`ci_p'-`ci_s'+1)*(`nbins'-1)+`ci_p'+1 + di in smcl in gr "{lalign 1: CI}" _col(10) "{c |}" in gr _col(17) "`ci_p'" _col(25) "`ci_s'" _col(33) "`ci_df'" + } + if ("`cbON'"=="T") { + local cb_df=(`cb_p'-`cb_s'+1)*(`nbins'-1)+`cb_p'+1 + di in smcl in gr "{lalign 1: CB}" _col(10) "{c |}" in gr _col(17) "`cb_p'" _col(25) "`cb_s'" _col(33) "`cb_df'" + } + if ("`polyON'"=="T") { + local poly_df=`polyreg'+1 + di in smcl in gr "{lalign 1: polyreg}" _col(10) "{c |}" in gr _col(17) "`polyreg'" _col(25) "NA" _col(33) "`poly_df'" + } + di in smcl in gr "{hline 9}{c BT}{hline 30}" + } + + + mata: mata drop `plotmatby' + local ++counter_by + } + mata: mata drop `xsub' `ysub' `binedges' + if (`bynum'>1) mata: mata drop `byindex' + capture mata: mata drop `xcatsub' + ****************** END loop **************************************** + ******************************************************************** + + ******************************************* + *************** Plotting ****************** + ******************************************* + clear + if ("`plotcmd'"!="") { + * put data back to STATA + mata: st_local("nr", strofreal(rows(`plotmat'))) + qui set obs `nr' + + * MAKE SURE the orderings match + qui gen group=. in 1 + if (`dotsntot'!=0) { + qui gen dots_x=. in 1 + qui gen dots_isknot=. in 1 + qui gen dots_binid=. in 1 + qui gen dots_fit=. in 1 + } + if (`linengrid'!=0&"`fullfewobs'"=="") { + qui gen line_x=. in 1 + qui gen line_isknot=. in 1 + qui gen line_binid=. in 1 + qui gen line_fit=. in 1 + } + if (`polyregngrid'!=0) { + qui gen poly_x=. in 1 + qui gen poly_isknot=. in 1 + qui gen poly_binid=. in 1 + qui gen poly_fit=. in 1 + if (`polyregcingrid'!=0) { + qui gen polyCI_x=. in 1 + qui gen polyCI_isknot=. in 1 + qui gen polyCI_binid=. in 1 + qui gen polyCI_l=. in 1 + qui gen polyCI_r=. in 1 + } + } + if (`cintot'!=0) { + qui gen CI_x=. in 1 + qui gen CI_isknot=. in 1 + qui gen CI_binid=. in 1 + qui gen CI_l=. in 1 + qui gen CI_r=. in 1 + } + if (`cbngrid'!=0&"`fullfewobs'"=="") { + qui gen CB_x=. in 1 + qui gen CB_isknot=. in 1 + qui gen CB_binid=. in 1 + qui gen CB_l=. in 1 + qui gen CB_r=. in 1 + } + + mata: st_store(.,.,`plotmat') + + * Legend + local plot_legend legend(order( + if ("`by'"!=""&`dotsntot'!=0) { + forval i=1/`bynum' { + local byvalname: word `i' of `byvalnamelist' + local plot_legend `plot_legend' `: word `i' of `legendnum'' "`byvarname'=`byvalname'" + } + local plot_legend `plot_legend' )) + } + else { + local plot_legend legend(off) + } + + * Plot it + local graphcmd twoway `plotcmd', xtitle(`x_varname') ytitle(`y_varname') xscale(range(`xsc')) `plot_legend' `options' + `graphcmd' + } + mata: mata drop `plotmat' `xvec' `yvec' `byvec' `cluvec' + + + * Save graph data ? + * In the normal case + if (`"`savedata'"'!=`""'&`"`plotcmd'"'!=`""') { + * Add labels + if ("`by'"!="") { + if ("`bystring'"=="T") { + label val group `bylabel' + decode group, gen(`byvarname') + } + else { + qui gen `byvarname'=group + if ("`bylabel'"!="") label val `byvarname' `bylabel' + } + label var `byvarname' "Group" + qui drop group + order `byvarname' + } + else qui drop group + + capture confirm variable dots_x dots_binid dots_isknot dots_fit + if (_rc==0) { + label var dots_x "Dots: grid" + label var dots_binid "Dots: indicator of bins" + label var dots_isknot "Dots: indicator of inner knot" + label var dots_fit "Dots: fitted values" + } + capture confirm variable line_x line_binid line_isknot line_fit + if (_rc==0) { + label var line_x "Line: grid" + label var line_binid "Line: indicator of bins" + label var line_isknot "Line: indicator of inner knot" + label var line_fit "Line: fitted values" + } + capture confirm variable poly_x poly_binid poly_isknot poly_fit + if (_rc==0) { + label var poly_x "Poly: grid" + label var poly_binid "Poly: indicator of bins" + label var poly_isknot "Poly: indicator of inner knot" + label var poly_fit "Poly: fitted values" + } + capture confirm variable polyCI_x polyCI_binid polyCI_isknot polyCI_l polyCI_r + if (_rc==0) { + label var polyCI_x "Poly confidence interval: grid" + label var polyCI_binid "Poly confidence interval: indicator of bins" + label var polyCI_isknot "Poly confidence interval: indicator of inner knot" + label var polyCI_l "Poly confidence interval: left boundary" + label var polyCI_r "Poly confidence interval: right boundary" + } + capture confirm variable CI_x CI_binid CI_isknot CI_l CI_r + if (_rc==0) { + label var CI_x "Confidence interval: grid" + label var CI_binid "Confidence interval: indicator of bins" + label var CI_isknot "Confidence interval: indicator of inner knot" + label var CI_l "Confidence interval: left boundary" + label var CI_r "Confidence interval: right boundary" + } + capture confirm variable CB_x CB_binid CB_isknot CB_l CB_r + if (_rc==0) { + label var CB_x "Confidence band: grid" + label var CB_binid "Confidence band: indicator of bins" + label var CB_isknot "Confidence band: indicator of inner knot" + label var CB_l "Confidence band: left boundary" + label var CB_r "Confidence band: right boundary" + } + qui save `"`savedata'"', `replace' + } + *************************************************************************** + + ********************************* + ********** Return *************** + ********************************* + ereturn clear + * # of observations + ereturn scalar N=`Ntotal' + * Options + ereturn scalar level=`level' + ereturn scalar dots_p=`dots_p' + ereturn scalar dots_s=`dots_s' + ereturn scalar line_p=`line_p' + ereturn scalar line_s=`line_s' + ereturn scalar ci_p=`ci_p' + ereturn scalar ci_s=`ci_s' + ereturn scalar cb_p=`cb_p' + ereturn scalar cb_s=`cb_s' + * by group: + *ereturn matrix knot=`kmat' + ereturn matrix cval_by=`cvallist' + ereturn matrix nbins_by=`nbinslist' + ereturn matrix Nclust_by=`Nclustlist' + ereturn matrix Ndist_by=`Ndistlist' + ereturn matrix N_by=`Nlist' + + ereturn matrix imse_var_rot=`mat_imse_var_rot' + ereturn matrix imse_bsq_rot=`mat_imse_bsq_rot' + ereturn matrix imse_var_dpi=`mat_imse_var_dpi' + ereturn matrix imse_bsq_dpi=`mat_imse_bsq_dpi' + +end + +* Helper commands +* Estimation +program define binsreg_fit, eclass + version 13 + syntax varlist(min=2 numeric ts fv) [if] [in] [fw aw pw] [, deriv(integer 0) /// + p(integer 0) s(integer 0) type(string) vce(passthru) /// + xcat(varname numeric) kmat(name) dotsmean(integer 0) /// /* xmean: report x-mean? */ + xname(name) yname(name) catname(name) edge(name) /// /* quantities in mata, subsample */ + usereg sorted usegtools absorb(string asis) reghdfeopt(string asis)] /* usereg: force the command to use reg; sored: sorted data? */ + + preserve + marksample touse + qui keep if `touse' + + if ("`weight'"!="") local wt [`weight'`exp'] + + tokenize `varlist' + local y_var `1' + local x_var `2' + macro shift 2 + local w_var "`*'" + + if ("`absorb'"!="") { + local usereg "usereg" + local usereghdfe "T" + } + else { + local usereghdfe "F" + } + + if ("`w_var'"==""&`p'==0&("`type'"=="dots"|"`type'"=="line")&"`usereg'"=="") { + local ymeanON "T" + } + local nbins=rowsof(`kmat')-1 + + tempname matxmean temp_b temp_V + mat `matxmean'=. + mat `temp_b'=. + mat `temp_V'=. + if (`dotsmean'!=0|"`ymeanON'"=="T") { + if ("`sorted'"==""|"`weight'"!=""|"`usegtools'"!="") { + if ("`usegtools'"=="") { + tempfile tmpfile + qui save `tmpfile', replace + + if (`dotsmean'!=0&"`ymeanON'"=="T") { + collapse (mean) `y_var' (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + mkmat `y_var', matrix(`temp_b') + mat `temp_b'=`temp_b'' /* row vector */ + } + else if (`dotsmean'!=0&"`ymeanON'"!="T") { + collapse (mean) `x_var' `wt', by(`xcat') fast + mkmat `xcat' `x_var', matrix(`matxmean') + } + else { + collapse (mean) `y_var' `wt', by(`xcat') fast + mkmat `y_var', matrix(`temp_b') + mat `temp_b'=`temp_b'' + } + use `tmpfile', clear + } + else { + tempname obj + if (`dotsmean'!=0&"`ymeanON'"=="T") { + qui gstats tabstat `y_var' `x_var' `wt', stats(mean) by(`xcat') matasave("`obj'") + mata: st_matrix("`temp_b'", `obj'.getOutputVar("`y_var'")'); /// + st_matrix("`matxmean'", (`obj'.getnum(.,1), `obj'.getOutputVar("`x_var'"))) + } + else if (`dotsmean'!=0&"`ymeanON'"!="T") { + qui gstats tabstat `x_var' `wt', stats(mean) by(`xcat') matasave("`obj'") + mata: st_matrix("`matxmean'", (`obj'.getnum(.,1), `obj'.getOutputVar("`x_var'"))) + } + else { + qui gstats tabstat `y_var' `wt', stats(mean) by(`xcat') matasave("`obj'") + mata: st_matrix("`temp_b'", `obj'.getOutputVar("`y_var'")') + } + mata: mata drop `obj' + } + } + else { + tempname output + if (`dotsmean'!=0&"`ymeanON'"=="T") { + mata: `output'=binsreg_stat((`xname',`yname'), `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`temp_b'", `output'[.,3]'); /// + st_matrix("`matxmean'", `output'[.,1..2]) + } + else if (`dotsmean'!=0&"`ymeanON'"!="T") { + mata: `output'=binsreg_stat(`xname', `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`matxmean'", `output') + } + else { + mata: `output'=binsreg_stat(`yname', `catname', `nbins', `edge', "mean", -1); /// + st_matrix("`temp_b'", `output'[.,2]') + } + mata: mata drop `output' + } + } + + * Regression? + if ("`ymeanON'"!="T") { + if (`p'==0) { + if ("`absorb'"=="") capture reg `y_var' ibn.`xcat' `w_var' `wt', nocon `vce' + else capture reghdfe `y_var' ibn.`xcat' `w_var' `wt', absorb(`absorb') `vce' `reghdfeopt' + + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + if ("`absorb'"=="") mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nbins') + else mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nbins', "T") + } + else { + error _rc + exit _rc + } + } + else { + local nseries=(`p'-`s'+1)*(`nbins'-1)+`p'+1 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`xname', "`series'", "`kmat'", `catname', `p', 0, `s') + + if ("`absorb'"=="") capture reg `y_var' `series' `w_var' `wt', nocon `vce' + else capture reghdfe `y_var' `series' `w_var' `wt', absorb(`absorb') `vce' `reghdfeopt' + + * store results + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + if ("`absorb'"=="") mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nseries') + else mata: binsreg_checkdrop("`temp_b'", "`temp_V'", `nseries', "T") + } + else { + error _rc + exit _rc + } + } + } + + ereturn clear + ereturn matrix bmat=`temp_b' + ereturn matrix Vmat=`temp_V' + ereturn matrix xmat=`matxmean' /* xcat, xbar */ + ereturn local hdmethod "`usereghdfe'" +end + +mata: + + // Prediction for plotting + real matrix binsreg_plotmat(string scalar eb, string scalar eV, real scalar cval, /// + string scalar knotname, real scalar J, /// + real scalar p, real scalar s, real scalar deriv, /// + string scalar type, real scalar ngrid, string scalar muwmat, /// + real scalar nw, string scalar hdmethod, string scalar avar, | string scalar muxmat) + { + real matrix coef, bmat, rmat, vmat, knot, xmean, wval, eval, out, fit, se, semat, Xm, result + real scalar nseries + + nseries=(p-s+1)*(J-1)+p+1 + coef=st_matrix(eb)' + bmat=coef[|1\nseries|] + if (nw>0) rmat=coef[|(nseries+1)\(nseries+nw)|] + + if (type=="ci"|type=="cb") { + vfull=st_matrix(eV) + vmat=vfull[|1,1\nseries,nseries|] + } + + // Prepare evaluation points + eval=J(0,3,.) + if (args()==15) { + xmean=st_matrix(muxmat) + eval=(eval \ (xmean[,2], J(J, 1, 0), xmean[,1])) + } + if (ngrid!=0) eval=(eval \ binsreg_grids(knotname, ngrid)) + + // adjust w variables + if (nw>0&deriv==0) { + wvec=st_matrix(muwmat) + wval=wvec*rmat + } + else wval=0 + + if (hdmethod=="T"&deriv==0) wval=wval+coef[rows(coef),1] + + fit=J(0,1,.) + se=J(0,1,.) + if (p==0) { + if (args()==15) fit=(fit \ bmat) + + if (ngrid!=0) { + fit=(fit \ (bmat#(J(ngrid,1,1)\.))) + fit=fit[|1 \ (rows(fit)-1)|] + } + if (type=="ci"|type=="cb") { + if (avar=="on") { + if (hdmethod=="T") { + semat=sqrt((diagonal(vmat):+vfull[rows(vfull), cols(vfull)]) /// + +2*vfull[|1,cols(vfull) \ nseries, cols(vfull)|]) + } + else semat=sqrt(diagonal(vmat)) + } + else { + if (nw>0) Xm=(I(nseries), J(nseries, 1, 1)#wvec) + else Xm=I(nseries) + if (hdmethod=="T") Xm=(Xm, J(nseries, 1, 1)) + semat=sqrt(rowsum((Xm*vfull):*Xm)) + } + + if (args()==15) se=(se \ semat) + + if (ngrid!=0) { + se=(se \ (semat#(J(ngrid,1,1)\.))) + se=se[|1 \ (rows(se)-1)|] + } + } + if (type=="dots"|type=="line") { + out=(eval, fit:+wval) + } + else { + out=(eval, (fit:+wval)-se*cval, (fit:+wval)+se*cval) + } + } + else { + Xm=binsreg_spdes(eval[,1], knotname, eval[,3], p, deriv, s) + if (type=="dots"|type=="line") { + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + out=(eval, fit:+wval) + } + else { + if (avar=="on") { + if (hdmethod=="T"&deriv==0) { + vmat=(vmat, vfull[|1,cols(vfull) \ nseries, cols(vfull)|] \ /// + vfull[|rows(vfull),1 \ rows(vfull), nseries|], vfull[rows(vfull), cols(vfull)]) + se=binsreg_pred((Xm, J(rows(Xm),1,1)), ., vmat, "se")[,2] + fit=binsreg_pred(Xm, bmat, ., "xb")[,1] + out=(eval, (fit:+wval)-cval*se, (fit:+wval)+cval*se) + } + else { + result=binsreg_pred(Xm, bmat, vmat, "all") + out=(eval, (result[,1]:+wval)-cval*result[,2], (result[,1]:+wval)+cval*result[,2]) + } + } + else { + if (nw>0) { + if (deriv==0) Xm=(Xm, J(rows(Xm),1,1)#wvec) + else Xm=(Xm, J(rows(Xm),nw,0)) + } + if (hdmethod=="T") { + if (deriv==0) Xm=(Xm, J(rows(Xm),1,1)) + else Xm=(Xm, J(rows(Xm),1,0)) + } + result=binsreg_pred(Xm, coef, vfull, "all") + out=(eval, result[,1]-cval*result[,2], result[,1]+cval*result[,2]) + } + } + } + + if (type=="dots"|(type=="line"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4]=J(sum(out[,2]),1,.) + } + if (type=="ci"|(type=="cb"&(s==0|s-deriv<=0))) { + out[selectindex(out[,2]:==1),4..5]=J(sum(out[,2]),2,.) + } + + return(out) + } + + +end + diff --git a/110/replication_package/replication/ado/plus/b/binsreg.sthlp b/110/replication_package/replication/ado/plus/b/binsreg.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..816098d0bb8ec09431ae8e0803bc8681cf4799ed --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsreg.sthlp @@ -0,0 +1,459 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binsreg##syntax"}{...} +{viewerjumpto "Description" "binsreg##description"}{...} +{viewerjumpto "Options" "binsreg##options"}{...} +{viewerjumpto "Examples" "binsreg##examples"}{...} +{viewerjumpto "Stored results" "binsreg##stored_results"}{...} +{viewerjumpto "References" "binsreg##references"}{...} +{viewerjumpto "Authors" "binsreg##authors"}{...} +{cmd:help binsreg} +{hline} + +{title:Title} + +{p 4 8}{hi:binsreg} {hline 2} Data-Driven Binscatter Least Squares Estimation with Robust Inference Procedures and Plots.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 12} {cmdab:binsreg} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} [ {cmd:,} {opt deriv(v)} {opt at(position)}{p_end} +{p 12 12} {opt absorb(absvars)} {opt reghdfeopt(reghdfe_option)}{p_end} +{p 12 12} {opt dots(dotsopt)} {opt dotsgrid(dotsgridoption)} {opt dotsplotopt(dotsoption)}{p_end} +{p 12 12} {opt line(lineopt)} {opt linegrid(#)} {opt lineplotopt(lineoption)}{p_end} +{p 12 12} {opt ci(ciopt)} {opt cigrid(cigridoption)} {opt ciplotopt(rcapoption)}{p_end} +{p 12 12} {opt cb(cbopt)} {opt cbgrid(#)} {opt cbplotopt(rareaoption)}{p_end} +{p 12 12} {opt polyreg(p)} {opt polyreggrid(#)} {opt polyregcigrid(#)} {opt polyregplotopt(lineoption)}{p_end} +{p 12 12} {opth by(varname)} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)}{p_end} +{p 12 12} {opt nbins(nbinsopt)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt samebinsby} {opt randcut(#)}{p_end} +{p 12 12} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 12 12} {opt nsims(#)} {opt simsgrid(#)} {opt simsseed(seed)}{p_end} +{p 12 12} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 12 12} {cmd:vce(}{it:{help vcetype}}{cmd:)} {opt asyvar(on/off)}{p_end} +{p 12 12} {opt level(level)} {opt usegtools(on/off)} {opt noplot} {opt savedata(filename)} {opt replace}{p_end} +{p 12 12} {opt plotxrange(min max)} {opt plotyrange(min max)} {it:{help twoway_options}} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} {opt fweight}s, {opt aweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binsreg} implements binscatter least squares estimation with robust inference procedure and plots, following the results in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":Cattaneo, Crump, Farrell and Feng (2022a)}. +Binscatter provides a flexible way to describe the mean relationship between two variables, after possibly adjusting for other covariates, +based on partitioning/binning of the independent variable of interest. +The main purpose of this command is to generate binned scatter plots with curve estimation with robust pointwise confidence intervals +and uniform confidence band. If the binning scheme is not set by the user, the companion command {help binsregselect:binsregselect} +is used to implement binscatter in a data-driven (optimal) way. +Hypothesis testing for parametric specifications of and shape restrictions on the regression function can be conducted via the +companion command {help binstest:binstest}. Hypothesis testing for pairwise group comparisons can be conducted via the +companion command {help binspwc: binspwc}. +{p_end} + +{p 4 8} A detailed introduction to this command is given in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Cattaneo, Crump, Farrell and Feng (2022b)}. +Companion R and Python packages with the same capabilities are available (see website below). +{p_end} + +{p 4 8} Companion commands: {help binstest:binstest} for hypothesis testing for parametric specifications +and shape restrictions, {help binspwc:binspwc} for hypothesis testing for pairwise group comparisons, +and {help binsregselect:binsregselect} for data-driven binning selection. +{p_end} + + +{p 4 8} Related Stata, R and Python packages are available in the following website:{p_end} + +{p 8 8} {browse "https://nppackages.github.io/":https://nppackages.github.io/}{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself. +{p_end} + +{p 4 8} {opt at(position)} specifies the values of {it:othercovs} at which the estimated function is evaluated for plotting. +The default is {cmd:at(mean)}, which corresponds to the mean of {it:othercovs}. Other options are: {cmd:at(median)} for the median of {it:othercovs}, +{cmd:at(0)} for zeros, and {cmd:at(filename)} for particular values of {it:othercovs} saved in another file. + +{p 4 8} Note: When {cmd:at(mean)} or {cmd:at(median)} is specified, all factor variables in {it:othercovs} (if specified) are excluded from the evaluation (set as zero). +{p_end} + +{dlgtab:Reghdfe} + +{p 4 8} {opt absorb(absvars)} specifies categorical variables (or interactions) representing the fixed effects to be absorbed. This is equivalent to including an indicator/dummy variable for each category of each {it:absvar}. +When {cmd:absorb()} is specified, the community-contributed command {cmd:reghdfe} instead of the command {cmd:regress} is used. +{p_end} + +{p 4 8} {opt reghdfeopt(reghdfe_option)} options to be passed on to the command {cmd:reghdfe}. +{p_end} + +{p 4 8} {it:Important:} + +{p 6 9} 1. Fixed effects added via {cmd:absorb()} are included in the estimation procedure but excluded from the evaluation of the estimated function (set as zero), +regardless of the option specified within {cmd:at()}. +To plot the binscatter function for a particular category of interest, save the values of {it:othercovs} +at which the function is evaluated in another file, say, +{cmd:wval.dta}, specify the corresponding factor variables in {it:othercovs} directly, and add the option {cmd:at(wval)}. +{p_end} + +{p 6 9} 2. {cmd:absorb()} and {cmd:vce()} should not be specified within {cmd:reghdfeopt()}. +{p_end} + +{p 6 9} 3. Make sure the package {cmd:reghdfe} installed has a version number greater than or equal to 5.9.0 (03jun2020). +An older version may result in an error in Mata. +{p_end} + +{p 4 8} For more information about the community-contributed command {cmd:reghdfe}, please see {browse "http://scorreia.com/software/reghdfe/":http://scorreia.com/software/reghdfe/}. +{p_end} + +{dlgtab:Dots} + +{p 4 8} {opt dots(dotsopt)} sets the degree of polynomial and the number of smoothness for point estimation and plotting as "dots". +If {cmd:dots(p s)} is specified, a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints is used. +The default is {cmd:dots(0 0)}, which corresponds to piecewise constant (canonical binscatter). +If {cmd:dots(T)} is specified, the default {cmd:dots(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:dots(F)} is specified, the dots are not included in the plot. +{p_end} + +{p 4 8} {opt dotsgrid(dotsgridoption)} specifies the number and location of dots within each bin to be plotted. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt dotsgrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt dotsgrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt dotsgrid(mean 5)} generates six evaluation points within each bin containing +the sample mean of {it:indvar} within each bin and five evenly-spaced points. +Given this choice, the dots are point estimates evaluated over the selected grid within each bin. +The default is {opt dotsgrid(mean)}, which corresponds to one dot per bin evaluated at the sample +average of {it:indvar} within each bin (canonical binscatter). +{p_end} + +{p 4 8} {opt dotsplotopt(dotsoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the plotted dots. +{p_end} + +{dlgtab:Line} + +{p 4 8} {opt line(lineopt)} sets the degree of polynomial and the number of smoothness constraints +for plotting as a "line". If {cmd:line(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:line(T)} is specified, {cmd:line(0 0)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:line(F)} or {cmd:line()} is specified, the line is not included in the plot. +The default is {cmd:line()}. +{p_end} + +{p 4 8} {opt linegrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin +used for evaluation of the point estimate set by the {cmd:line(p s)} option. +The default is {cmd:linegrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for fitting/plotting the line. +{p_end} + +{p 4 8} {opt lineplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} +command to modify the appearance of the plotted line. +{p_end} + +{dlgtab:Confidence Intervals} + +{p 4 8} {opt ci(ciopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing confidence intervals. If {cmd:ci(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:ci(T)} is specified, {cmd:ci(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:ci(F)} or {cmd:ci()} is specified, the confidence intervals are not included in the plot. +The default is {cmd:ci()}. +{p_end} + +{p 4 8} {opt cigrid(cigridoption)} specifies the number and location of evaluation points in the grid used to +construct the confidence intervals set by the {opt ci(p s)} option. +Two options are available: {it:mean} and a {it:numeric} non-negative integer. +The option {opt cigrid(mean)} adds the sample average of {it:indvar} within each bin to the grid of evaluation points. +The option {opt cigrid(#)} adds {it:#} number of evenly-spaced points to the grid of evaluation points for each bin. +Both options can be used simultaneously: for example, {opt cigrid(mean 5)} generates six evaluation points +within each bin containing the sample mean of {it:indvar} within each bin and five evenly-spaced points. +The default is {opt cigrid(mean)}, which corresponds to one evaluation point set at the sample average of +{it:indvar} within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt ciplotopt(rcapoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the confidence intervals. +{p_end} + +{dlgtab:Confidence Band} + +{p 4 8} {opt cb(cbopt)} specifies the degree of polynomial and the number of smoothness constraints +for constructing the confidence band. If {cmd:cb(p s)} is specified, a piecewise polynomial of +degree {it:p} with {it:s} smoothness constraints is used. +If the option {cmd:cb(T)} is specified, {cmd:cb(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +If {cmd:cb(F)} or {cmd:cb()} is specified, the confidence band is not included in the plot. +The default is {cmd:cb()}. +{p_end} + +{p 4 8} {opt cbgrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin +used for evaluation of the point estimate set by the {cmd:cb(p s)} option. +The default is {cmd:cbgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence band construction. +{p_end} + +{p 4 8} {opt cbplotopt(rareaoption)} standard graphs options to be passed on to the {help twoway:twoway} command to modify the appearance of the confidence band. +{p_end} + +{dlgtab:Global Polynomial Regression} + +{p 4 8} {opt polyreg(p)} sets the degree {it:p} of a global polynomial regression model for plotting. +By default, this fit is not included in the plot unless explicitly specified. +Recommended specification is {cmd:polyreg(3)}, which adds a cubic polynomial fit of the regression function of interest to the binned scatter plot. +{p_end} + +{p 4 8} {opt polyreggrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin +used for evaluation of the point estimate set by the {cmd:polyreg(p)} option. +The default is {cmd:polyreggrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for confidence interval construction. +{p_end} + +{p 4 8} {opt polyregcigrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin +used for constructing confidence intervals based on polynomial regression set by the {cmd:polyreg(p)} option. +The default is {cmd:polyregcigrid(0)}, which corresponds to not plotting confidence intervals for the global polynomial regression approximation. +{p_end} + +{p 4 8} {opt polyregplotopt(lineoption)} standard graphs options to be passed on to the {help twoway:twoway} command +to modify the appearance of the global polynomial regression fit. +{p_end} + +{dlgtab:Subgroup Analysis} + +{p 4 8} {opt by(varname)} specifies the variable containing the group indicator to perform subgroup analysis; +both numeric and string variables are supported. +When {opt by(varname)} is specified, {cmdab:binsreg} implements estimation and inference for each subgroup separately, +but produces a common binned scatter plot. +By default, the binning structure is selected for each subgroup separately, but see the option {cmd:samebinsby} below +for imposing a common binning structure across subgroups. +{p_end} + +{p 4 8} {cmd:bycolors(}{it:{help colorstyle}list}{cmd:)} specifies an ordered list of colors for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bysymbols(}{it:{help symbolstyle}list}{cmd:)} specifies an ordered list of symbols for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{p 4 8} {cmd:bylpatterns(}{it:{help linepatternstyle}list}{cmd:)} specifies an ordered list of line patterns for plotting each subgroup series defined by the option {opt by()}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt nbins(nbinsopt)} sets the number of bins for partitioning/binning of {it:indvar}. +If {cmd:nbins(T)} or {cmd:nbins()} (default) is specified, the number of bins is selected via the companion command {help binsregselect:binsregselect} +in a data-driven, optimal way whenever possible. If a {help numlist:numlist} with more than one number is specified, +the number of bins is selected within this list via the companion command {help binsregselect:binsregselect}. +{p_end} + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other options are: {cmd:es} for evenly-spaced binning, or a {help numlist} for manual specification of +the positions of inner knots (which must be within the range of {it:indvar}). +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins +via the companion command {help binsregselect:binsregselect}. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to +construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt samebinsby} forces a common partitioning/binning structure across all subgroups +specified by the option {cmd:by()}. +The knots positions are selected according to the option {cmd:binspos()} and using the full sample. +If {cmd:nbins()} is not specified, then the number of bins is selected via the companion command +{help binsregselect:binsregselect} and using the full sample. +{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample +for bins/degree/smoothness selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. +By default, max(5,000, 0.01n) observations are used if the samples size n>5,000. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} for +point estimation is selected. Piecewise polynomials of the selected optimal degree {it:p} +are used to construct dots or line if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials of degree {it:p+1} are used to construct confidence intervals +or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which +the number of smoothness constraints {it:s} +for point estimation. Piecewise polynomials with the selected optimal +{it:s} smoothness constraints are used to construct dots or line +if {cmd:dots(T)} or {cmd:line(T)} is specified, +whereas piecewise polynomials with {it:s+1} constraints are used to construct +confidence intervals or confidence band if {cmd:ci(T)} or {cmd:cb(T)} is specified. +If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:nbins(#)} must be specified. +{p_end} + +{dlgtab:Simulation} + +{p 4 8} {opt nsims(#)} specifies the number of random draws for constructing confidence bands. +The default is {cmd:nsims(500)}, which corresponds to 500 draws from a standard Gaussian random vector of size [(p+1)*J - (J-1)*s]. +A large number of random draws is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid +within each bin used for evaluation of the supremum operation needed to construct confidence bands. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within +each bin for approximating the supremum (or infimum) operator. +A large number of evaluation points is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsseed(#)} sets the seed for simulations. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, +which take into account the number of unique values of {it:indvar} (i.e., adjusting for the number of mass points), +number of clusters, and degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Standard Error} + +{p 4 8} {cmd:vce(}{it:{help vcetype}}{cmd:)} specifies the {it:vcetype} for variance estimation used by the command {help regress##options:regress} +(or {cmd:reghdfe} if {cmd:absorb()} is specified.). The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8} {opt asyvar(on/off)} specifies the method used to compute standard errors. +If {cmd:asyvar(on)} is specified, the standard error of the nonparametric component is used and the uncertainty +related to other control variables {it:othercovs} is omitted. +Default is {cmd:asyvar(off)}, that is, the uncertainty related to {it:othercovs} is taken into account. +{p_end} + +{dlgtab:Other Options} + +{p 4 8} {opt level(#)} sets the nominal confidence level for confidence interval and confidence band estimation. Default is {cmd:level(95)}. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package {cmd:gtools} +to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{p 4 8} {opt noplot} omits binscatter plotting. +{p_end} + +{p 4 8} {opt savedata(filename)} specifies a filename for saving all data underlying the binscatter plot (and more). +{p_end} + +{p 4 8} {opt replace} overwrites the existing file when saving the graph data. +{p_end} + +{p 4 8} {opt plotxrange(min max)} specifies the range of the x-axis for plotting. Observations outside the range are dropped in the plot. +{p_end} + +{p 4 8} {opt plotyrange(min max)} specifies the range of the y-axis for plotting. Observations outside the range are dropped in the plot. +{p_end} + +{p 4 8} {it:{help twoway_options}} any unrecognized options are appended to the end of the twoway command generating the binned scatter plot. +{p_end} + + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto}{p_end} + +{p 4 8} Run a binscatter regression and report the plot{p_end} +{p 8 8} . {stata binsreg mpg weight foreign}{p_end} + +{p 4 8} Add confidence intervals and confidence band{p_end} +{p 8 8} . {stata binsreg mpg weight foreign, ci(3 3) cb(3 3) nbins(13)}{p_end} + +{p 4 8} Run binscatter regression by group{p_end} +{p 8 8} . {stata binsreg mpg weight, by(foreign)}{p_end} + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 17 tabbed}{...} +{p2col 5 17 21 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(level)}}confidence level{p_end} +{synopt:{cmd:e(dots_p)}}degree of polynomial for dots{p_end} +{synopt:{cmd:e(dots_s)}}smoothness of polynomial for dots{p_end} +{synopt:{cmd:e(line_p)}}degree of polynomial for line{p_end} +{synopt:{cmd:e(line_s)}}smoothness of polynomial for line{p_end} +{synopt:{cmd:e(ci_p)}}degree of polynomial for confidence interval{p_end} +{synopt:{cmd:e(ci_s)}}smoothness of polynomial for confidence interval{p_end} +{synopt:{cmd:e(cb_p)}}degree of polynomial for confidence band{p_end} +{synopt:{cmd:e(cb_s)}}smoothness of polynomial for confidence band{p_end} +{p2col 5 17 21 2: Matrices}{p_end} +{synopt:{cmd:e(N_by)}}number of observations for each group{p_end} +{synopt:{cmd:e(Ndist_by)}}number of distinct values for each group{p_end} +{synopt:{cmd:e(Nclust_by)}}number of clusters for each group{p_end} +{synopt:{cmd:e(nbins_by)}}number of bins for each group{p_end} +{synopt:{cmd:e(cval_by)}}critical value for each group, used for confidence bands{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/b/binsreg_checkdrop.mo b/110/replication_package/replication/ado/plus/b/binsreg_checkdrop.mo new file mode 100644 index 0000000000000000000000000000000000000000..1d2be2c26440e9924941f52e80369f582858c381 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_checkdrop.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_cquantile.mo b/110/replication_package/replication/ado/plus/b/binsreg_cquantile.mo new file mode 100644 index 0000000000000000000000000000000000000000..28472c9ae8e9b74f462292d542ff06f95df8992c Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_cquantile.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_grids.mo b/110/replication_package/replication/ado/plus/b/binsreg_grids.mo new file mode 100644 index 0000000000000000000000000000000000000000..cc11d0be1e893a9c6fc7dffb23c05c33eb412f99 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_grids.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_irecode.ado b/110/replication_package/replication/ado/plus/b/binsreg_irecode.ado new file mode 100644 index 0000000000000000000000000000000000000000..ad3a39fe7d3e870a587c7e73a972effada6a494b --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsreg_irecode.ado @@ -0,0 +1,70 @@ +*! version 1.2 09-OCT-2022 + +* Generalized irecode function + +program define binsreg_irecode + version 13 + + syntax varlist(max=1 numeric) [if] [in], knotmat(name) BINid(varname numeric) /// + [usegtools nbins(string) pos(string) knotliston(string)] + + /* knot is a FULL knot matrix with boundaries */ + /* used internally, no error checks */ + + marksample touse + + if ("`usegtools'"==""|"`pos'"!="QS") { + confirm variable `binid' + confirm matrix `knotmat' + + local n=rowsof(`knotmat') + if (`n'==2) qui replace `binid'=1 if `touse' + else if (`n'==3) qui replace `binid'=1+irecode(`varlist', `knotmat'[2,1]) if `touse' + else { + local J = `n' - 2 + local knots = `knotmat'[2,1] + local assignedbins = 1 + local knotstsize : strlen local knots + local knotsnextsize = 0 + + forvalues j = 2/`J' { + local knotsnext = `knotmat'[`j'+1,1] + local knotstsize = `knotstsize' + `knotsnextsize' + local knotsnextsize : strlen local knotsnext + + if (`knotstsize' + `knotsnextsize' < `c(macrolen)') & (`j' - `assignedbins' < 248){ + local knots "`knots',`knotsnext'" + if `j' == `J' { + qui replace `binid' = `assignedbins' + irecode(`varlist',`knots') if `touse' & `binid'==. + } + } + else { + qui replace `binid' = `assignedbins' + irecode(`varlist',`knots') if `binid'==. & `touse' + if `j'<`J' { + qui replace `binid' = . if `varlist' > `knotmat'[`j'+1,1] & `touse' + local knots = `knotmat'[`j'+1,1] + local assignedbins = `j' + local knotstsize = 0 + } + else { + qui replace `binid' = `n'-1 if `varlist' > `knotmat'[`j'+1,1] & `touse' + } + } + } + } + } + else { + capture confirm variable `binid' + if (!_rc) drop `binid' + + if (`nbins'==1) qui replace `binid'=1 if `touse' + else { + tempvar cat + if ("`knotliston'"=="T") fasterxtile `binid'=`varlist', nq(`nbins') + else fasterxtile `binid'=`varlist' if `touse', nq(`nbins') + *qui replace `binid'=`cat' if `touse' + } + } + + +end diff --git a/110/replication_package/replication/ado/plus/b/binsreg_pctile.ado b/110/replication_package/replication/ado/plus/b/binsreg_pctile.ado new file mode 100644 index 0000000000000000000000000000000000000000..cc7b02659c63c8d295dfeabcbab4cb405cb6f70d --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsreg_pctile.ado @@ -0,0 +1,80 @@ +*! version 1.2 09-OCT-2022 + +* Generalized pctile function + +program define binsreg_pctile, rclass + version 13 + + syntax varlist(max=1 numeric) [if] [in] [fw aw pw] [, nq(integer 2) usegtools] + + /* nq must be >=2, # of bins */ + /* used internally, no error checks */ + + marksample touse + + if ("`weight'"!="") local wt [`weight'`exp'] + + local nk = `nq'-1 /* # of quantiles */ + tempname A + + mat `A'=J(`nk',1,.) + + if `nq' <= 1001 { + if ("`usegtools'"=="") _pctile `varlist' if `touse' `wt', nq(`nq') + else gquantiles `varlist' if `touse' `wt', _pctile nq(`nq') + forvalues j = 1/`nk' { + mat `A'[`j',1] = r(r`j') + } + } + else { + local plist = "" + local plistlen = 0 + local entryint = 1 + local plistsize = 0 + local plistnextsize = 0 + + forvalues j = 1/`nk' { + local plistnext = 100*`j'/`nq' + local plistsize = `plistsize' + `plistnextsize' + local plistnextsize : strlen local plistnext + if (`plistsize'+`plistnextsize' < `c(macrolen)') & (`plistlen' <= 999) { + if "`plist'"=="" { + local plist "`plistnext'" + } + else local plist "`plist',`plistnext'" + + local ++plistlen + + if `j' == `nk' { + if ("`usegtools'"=="") _pctile `varlist' if `touse' `wt', p(`plist') + else gquantiles `varlist' if `touse' `wt', _pctile p(`plist') + forvalues k = 1/`plistlen' { + mat `A'[`entryint'+`k'-1,1] = r(r`k') + } + } + } + else { + if ("`usegtools'"=="") _pctile `varlist' if `touse' `wt', p(`plist') + else gquantiles `varlist' if `touse' `wt', _pctile p(`plist') + forvalues k = 1/`plistlen' { + mat `A'[`entryint'+`k'-1,1] = r(r`k') + } + if (`j'<`nk') { + local plist "`plistnext'" + local entryint = `entryint' + `plistlen' + local plistlen = 1 + local plistsize = 0 + } + else { + if ("`usegtools'"=="") _pctile `varlist' if `touse' `wt', p(`plistnext') + else gquantiles `varlist' if `touse' `wt', _pctile p(`plistnext') + mat `A'[`nk',1]=r(r1) + } + } + } + } + + return clear + return matrix Q=`A' + +end diff --git a/110/replication_package/replication/ado/plus/b/binsreg_pred.mo b/110/replication_package/replication/ado/plus/b/binsreg_pred.mo new file mode 100644 index 0000000000000000000000000000000000000000..e508a1292e871aabb51323acb8152df167c92f92 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_pred.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_pval.mo b/110/replication_package/replication/ado/plus/b/binsreg_pval.mo new file mode 100644 index 0000000000000000000000000000000000000000..8afe3cd5219b2b5f5766c724d4aac2c593a603ad Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_pval.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_spdes.mo b/110/replication_package/replication/ado/plus/b/binsreg_spdes.mo new file mode 100644 index 0000000000000000000000000000000000000000..a244d6cc92dcd68a1aba516f57ee2ad6373395b5 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_spdes.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_st_spdes.mo b/110/replication_package/replication/ado/plus/b/binsreg_st_spdes.mo new file mode 100644 index 0000000000000000000000000000000000000000..5e8dd4f5537fa376b67a5b34a4d1d807e801f9e9 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_st_spdes.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_stat.mo b/110/replication_package/replication/ado/plus/b/binsreg_stat.mo new file mode 100644 index 0000000000000000000000000000000000000000..07ae82feee611788747f3e3ab5604c7185ba1b22 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_stat.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsreg_uniq.mo b/110/replication_package/replication/ado/plus/b/binsreg_uniq.mo new file mode 100644 index 0000000000000000000000000000000000000000..7bc1e964266103e2539782b0697d0b5189380a54 Binary files /dev/null and b/110/replication_package/replication/ado/plus/b/binsreg_uniq.mo differ diff --git a/110/replication_package/replication/ado/plus/b/binsregselect.ado b/110/replication_package/replication/ado/plus/b/binsregselect.ado new file mode 100644 index 0000000000000000000000000000000000000000..169e5f1642b19991ceb8ec5ba37877fc31fa1d89 --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsregselect.ado @@ -0,0 +1,1365 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binsregselect +program define binsregselect, eclass + version 13 + + syntax varlist(min=2 numeric ts fv) [if] [in] [fw aw pw] [, deriv(integer 0) /// + absorb(string asis) reghdfeopt(string asis) /// + bins(numlist integer max=2 >=0) pselect(numlist integer >=0) sselect(numlist integer >=0) /// + binspos(string) nbins(string) /// + binsmethod(string) nbinsrot(string) /// + simsgrid(integer 20) savegrid(string asis) replace /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) useeffn(string) randcut(numlist max=1 >=0 <=1) /// + norotnorm numdist(string) numclust(string)] + /* last line only for internal use */ + + set more off + + ************************************** + ******** Regularization constant **** + ************************************** + local qrot=2 + local rot_lb=1 + local den_alp=0.975 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + ********************** + ** Extract options *** + ********************** + * default vce + if ("`vce'"=="") local vce "vce(robust)" + + * binning + * indictors: selectJ (F means select p) + local selectJ "" + *if ("`nbins'"=="F") local nbins "" + if ("`nbins'"=="T"|"`nbins'"=="") local nbins=0 /* default: select J */ + local len_nbins=0 + if ("`nbins'"!="") { + numlist "`nbins'", integer range(>=0) sort + local nbins=r(numlist) + local len_nbins: word count `nbins' + } + + if ("`nbins'"=="0"|`len_nbins'>1|"`bins'"!="") local selectJ "T" + + * analyze bin- and order-related options + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + if ("`bins'"!="") { + tokenize `bins' /* overwrite pselect and sselect */ + local plist "`1'" + local slist "`2'" + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + } + + local len_p: word count `plist' + local len_s: word count `slist' + + + if ((`len_p'==1&`len_s'==0)|(`len_p'==0&`len_s'==1)|(`len_p'==1&`len_s'==1)) { + local selectJ "T" + } + + if ("`selectJ'"=="T") { + if (`len_p'>1|`len_s'>1) { + di as error "Only one p and one s are allowed." + exit + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if ((`len_p'>1|`len_s'>1) & "`selectJ'"!="T") { + local selectJ "F" /* select p and s */ + } + + if ("`selectJ'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + * find all compatible pairs of p and s + tempname deg_mat + if ("`selectJ'"=="F") { + if (`len_p'>0 & `len_s'==0) { + mat `deg_mat'=J(`len_p', 2, .) + forval i=1/`len_p' { + local el : word `i' of `plist' + mat `deg_mat'[`i',1]=`el' + mat `deg_mat'[`i',2]=`el' + } + } + if (`len_p'==0 & `len_s'>0) { + mat `deg_mat'=J(`len_s', 2, .) + forval i=1/`len_s' { + local el : word `i' of `slist' + mat `deg_mat'[`i',1]=`el' + mat `deg_mat'[`i',2]=`el' + } + } + if (`len_p'>0 & `len_s'>0) { + mat `deg_mat'=J(`=`len_p'*`len_s'',2,.) + local ncom=0 + forval i=1/`len_p' { + local el_p : word `i' of `plist' + forval j=1/`len_s' { + local el_s : word `j' of `slist' + if (`el_p'>=`el_s') { + mat `deg_mat'[`=`ncom'+1',1]=`el_p' + mat `deg_mat'[`=`ncom'+1',2]=`el_s' + local ++ncom + } + } + } + if (`ncom'>0) mat `deg_mat'=`deg_mat'[1..`ncom', 1..2] + else { + di as error "degree and smoothness incompatible" + exit + } + } + } + else { + mat `deg_mat'=(`plist', `slist') + } + + * take submatrix with p>=deriv + local ncom=0 + local index "" + tempname m_deg /* degree matrix to be used */ + forval i=1/`=rowsof(`deg_mat')' { + if (`deg_mat'[`i',1]>=`deriv') { + local ++ncom + mat `m_deg'=(nullmat(`m_deg') \ `deg_mat'[`i',1..2]) + } + } + if (`ncom'==0) { + di as error "Degree and smoothness incorrectly specified." + exit + } + + if ("`binspos'"=="") local binspos "QS" + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binsmethod'"=="") local binsmethod "DPI" + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`dfcheck'"=="") local dfcheck 20 30 + + * mass check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + di as text in gr "Warning: masspoints(veryfew) not allowed for bin selection." + local localcheck "F" + local rot_fewobs "T" + local dpi_fewobs "T" + } + + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + * use gstats tab instead of tabstat/collapse + * use gquantiles instead of _pctile + * use gunique instead of binsreg_uniq + * use fasterxtile instead of irecode (within binsreg_irecode) + * shut down local checks & do not sort + } + + * cluster var? + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'", parse(", ") + if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"|"`1'"=="cluste"|"`1'"=="cluster") { + if ("`3'"==""|"`3'"==",") local clusterON "T" + local clustervar `2' /* only keep the 1st cluster var */ + } + + * use reghdfe? + if ("`absorb'"!="") { + capture which reghdfe + if (_rc) { + di as error "reghdfe not installed." + exit + } + } + + ***************************************************** + * Error checks + if (`deriv'<0) { + di as error "deriv() incorrectly specified." + exit + } + if (`simsgrid'<0) { + di as error "simsgrid() incorrectly specified." + exit + } + if (`"`savegrid'"'!=`""'&"`replace'"=="") { + confirm new file `"`savegrid'.dta"' + } + if ("`nbinsrot'"!="") { + confirm integer n `nbinsrot' + } + + * Mark sample + preserve + + * Parse varlist into y_var, x_var and w_var + tokenize `varlist' + fvrevar `1', tsonly + local y_var "`r(varlist)'" + fvrevar `2', tsonly + local x_var "`r(varlist)'" + fvrevar `2', list + local x_varname "`r(varlist)'" + + macro shift 2 + local w_var "`*'" + fvrevar `w_var', list + local w_varname "`r(varlist)'" + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" /* so time series operator respected */ + + marksample touse /* now renew the marker to account for missing values */ + qui keep if `touse' + local eN=_N + local samplesize=_N + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") sort `x_var', stable + } + + * Normalize support + tempvar z_var + if ("`wtype'"=="f") qui sum `x_var' `wt', meanonly + else qui sum `x_var', meanonly + + local N=r(N) /* sample size, with wt */ + local xmin=r(min) + local xmax=r(max) + + tempname xvec zvec Xm binedges + + * Extract effective sample size + local Ndist=. + if ("`massadj'"=="T") { + if ("`numdist'"!=""&"`numdist'"!=".") { + local Ndist=`numdist' + } + else { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(st_data(.,"`x_var'"), ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + } + local eN=min(`eN', `Ndist') + } + + local Nclust=. + if ("`clusterON'"=="T") { + if ("`numclust'"!=""&"`numclust'"!=".") { + local Nclust=`numclust' + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(st_data(.,"`clustervar'"))))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + local eN=min(`eN', `Nclust') + } + + * Take a subsample? + if ("`randcut'"!="") { + mata: `xvec'=st_data(.,"`x_var'") + qui keep if runiform()<=`randcut' + local eN_sub=_N + + local Ndist_sub=. + if ("`massadj'"=="T") { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(st_data(.,"`x_var'"), ., 1, "Ndist_sub") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist_sub=r(unique) + } + local eN_sub=min(`eN_sub', `Ndist_sub') + } + + local Nclust_sub=. + if ("`clusterON'"=="T") { + if ("`usegtools'"=="") { + mata: st_local("Nclust_sub", strofreal(rows(uniqrows(st_data(.,"`clustervar'"))))) + } + else { + qui gunique `clustervar' + local Nclust_sub=r(unique) + } + local eN_sub=min(`eN_sub', `Nclust_sub') + } + } + else { + local eN_sub=`eN' + local Ndist_sub=`Ndist' + local Nclust_sub=`Nclust' + } + + sum `x_var', meanonly + gen `z_var'=(`x_var'-`=r(min)')/(`=r(max)'-`=r(min)') + mata: `zvec'=st_data(., "`z_var'") /* normalized x, subsample */ + + + * Define matrices here to save results + tempname mat_imse_bsq_rot mat_imse_var_rot mat_imse_bsq_dpi mat_imse_var_dpi /// + mat_J_rot_unreg mat_J_rot_reg mat_J_rot_uniq mat_J_dpi mat_J_dpi_uniq + mat `mat_imse_bsq_rot'=J(`ncom',1,.) + mat `mat_imse_var_rot'=J(`ncom',1,.) + mat `mat_imse_bsq_dpi'=J(`ncom',1,.) + mat `mat_imse_var_dpi'=J(`ncom',1,.) + mat `mat_J_rot_unreg'=J(`ncom',1,.) + mat `mat_J_rot_reg'=J(`ncom',1,.) + mat `mat_J_rot_uniq'=J(`ncom',1,.) + mat `mat_J_dpi'=J(`ncom',1,.) + mat `mat_J_dpi_uniq'=J(`ncom',1,.) + + + ****** START loop here ************** + forval num=1/`ncom' { + + * extract p and s from the matrix + local p=`m_deg'[`num', 1] + local s=`m_deg'[`num', 2] + + * prepare locals for reporting + local imse_bsq_rot=. + local imse_var_rot=. + local imse_bsq_dpi=. + local imse_var_dpi=. + + *************************** + ******* ROT choice ******** + *************************** + tempname vcons bcons coef + tempvar resid1 resid2 /* only used by reghdfe */ + local J_rot_reg=. + local J_rot_unreg=. + if ("`nbinsrot'"!="") local J_rot_reg=`nbinsrot' + + * Initial checking of sample size (for ROT) + if (`J_rot_reg'==.&"`rot_fewobs'"=="") { + if (`eN_sub'<=`dfcheck_n1'+`p'+1+`qrot') { + local rot_fewobs "T" + di as text in gr "Warning: Too small effective sample size for bin selection." + } + } + + if ("`rot_fewobs'"!="T"&`J_rot_reg'==.) { + * Power series + local series_rot "" + forvalues i=1/`=`p'+`qrot'' { + tempvar z_var_`i' + qui gen `z_var_`i''=`z_var'^`i' + local series_rot `series_rot' `z_var_`i'' + } + + * Variance Component + if ("`absorb'"=="") capture reg `y_var' `series_rot' `w_var' `wt' + else capture reghdfe `y_var' `series_rot' `w_var' `wt', absorb(`absorb') resid(`resid1') + + if (_rc==0) { + mat `coef'=e(b) + mat `coef'=`coef'[1,`=`p'+1'..`=`p'+`qrot''] + } + else { + error _rc + exit _rc + } + + tempvar pred_y y_var_2 pred_y2 s2 + if ("`absorb'"=="") predict `pred_y', xb + else predict `pred_y', xbd + + qui gen `y_var_2'=`y_var'^2 // move it outside + if ("`absorb'"=="") { + capture reg `y_var_2' `series_rot' `w_var' `wt' + if (_rc) { + error _rc + exit _rc + } + predict `pred_y2', xb + } + else { + capture reghdfe `y_var_2' `series_rot' `w_var' `wt', absorb(`absorb') resid(`resid2') + if (_rc) { + error _rc + exit _rc + } + + predict `pred_y2', xbd + } + + qui gen `s2'=`pred_y2'-`pred_y'^2 /* sigma^2(x) var */ + + * Normal density + if ("`rotnorm'"=="") { + if ("`wtype'"!="p") qui sum `z_var' `wt' + else qui sum `z_var' [aw`exp'] + local zbar=r(mean) + local zsd=r(sd) + + tempvar fz + * trim density from below + local cutval=normalden(invnormal(`den_alp')*`zsd', 0, `zsd') + qui gen `fz'=max(normalden(`z_var', `zbar', `zsd'), `cutval') + if ("`binspos'"=="ES") qui replace `s2'=`s2'/`fz' + else qui replace `s2'=`s2'*(`fz'^(2*`deriv')) + } + + if ("`wt'"!="") qui sum `s2' [aw`exp'], meanonly + else qui sum `s2', meanonly + local sig2=r(mean) + mata: imse_v_cons(`p', `deriv', "`vcons'") + local imse_v=`sig2'*`vcons' /* variance constant */ + + * Bias component + * gen data for derivative + tempvar pred_deriv + mata: `Xm'=J(rows(`zvec'),0,.) + + forval i=`=`p'+1'/`=`p'+`qrot'' { + mata:`Xm'=(`Xm',`zvec':^(`i'-`p'-1)*factorial(`i')/factorial(`i'-`p'-1)) + } + + mata: `Xm'=`Xm'*st_matrix("`coef'")'; /// + st_store(.,st_addvar("float", "`pred_deriv'"), `Xm':^2) + + mata: mata drop `Xm' + + if ("`rotnorm'"=="") { + if ("`binspos'"=="QS") { + qui replace `pred_deriv'=`pred_deriv'/(`fz'^(2*`p'+2-2*`deriv')) + } + } + if ("`wt'"!="") qui sum `pred_deriv' [aw`exp'], meanonly + else qui sum `pred_deriv', meanonly + local mean_deriv=r(mean) + + mata: imse_b_cons(`p', `deriv', "`bcons'") + local imse_b=`mean_deriv'*`bcons' /* bias constant */ + + * ROT J + local J_rot_unreg=ceil((`imse_b'*2*(`p'+1-`deriv')/ /// + (`imse_v'*(1+2*`deriv')))^(1/(2*`p'+2+1))* /// + `eN_sub'^(1/(2*`p'+2+1))) + local J_rot_reg=max(`J_rot_unreg', /// + ceil((2*(`p'+1-`deriv')/(1+2*`deriv')*`rot_lb'*`eN_sub')^(1/(2*`p'+2+1)))) + + local imse_bsq_rot=`imse_b' + local imse_var_rot=`imse_v' + + mat `mat_imse_bsq_rot'[`num',1]=`imse_bsq_rot' + mat `mat_imse_var_rot'[`num',1]=`imse_var_rot' + } + + ** Repeated knots? *************** + local J_rot_uniq=`J_rot_reg' + + if (("`binsmethod'"=="DPI"|"`localcheck'"=="T")&"`masspoints'"!="veryfew") { + tempvar zcat + qui gen `zcat'=. in 1 + * Prepare bins + tempname kmat + + if "`binspos'"=="ES" { + local stepsize=1/`J_rot_reg' + forvalues i=1/`=`J_rot_reg'+1' { + mat `kmat'=(nullmat(`kmat') \ `=0+`stepsize'*(`i'-1)') + } + } + else { + if (`J_rot_reg'==1) { + mat `kmat'=(0 \ 1) + } + else { + binsreg_pctile `z_var' `wt', nq(`J_rot_reg') `usegtools' + mat `kmat'=(0 \ r(Q) \ 1) + } + } + + mata: st_matrix("`kmat'", (0 \ uniqrows(st_matrix("`kmat'")[|2 \ `=`J_rot_reg'+1'|]))) + local J_rot_uniq=rowsof(`kmat')-1 + if ("`binsmethod'"=="DPI"&"`dpi_fewobs'"=="") { + binsreg_irecode `z_var', knotmat(`kmat') bin(`zcat') /// + `usegtools' nbins(`J_rot_uniq') pos(`binspos') knotliston(T) + } + } + + ********************************* + ********** DPI Choice *********** + ********************************* + local J_dpi=. + * Check if DPI can be implemented + if ("`J_rot_uniq'"!="."&"`binsmethod'"=="DPI"&"`masspoints'"!="veryfew") { + * Compare with degree of freedom + if ((`p'-`s'+1)*(`J_rot_uniq'-1)+`p'+2+`dfcheck_n2'>=`eN_sub') { + di as text in gr "Warning: Too small effective sample size for DPI selection." + local dpi_fewobs "T" + } + + * Check local effective size + if ("`localcheck'"=="T"&"`dpi_fewobs'"!="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(st_data(.,"`zcat'"))))) + if (`J_rot_uniq'==`Ncat') { + mata: `binedges'=binsreg_uniq(`zvec', st_data(.,"`zcat'"), `J_rot_uniq', "uniqmin") + mata: mata drop `binedges' + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins." + } + + if (`uniqmin'<`p'+1) { + local dpi_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for DPI selection." + } + } + } + else local dpi_fewobs "T" + + if ("`binsmethod'"=="DPI"&"`dpi_fewobs'"!="T") { + * Update vce condition + if ("`massadj'"=="T") { + if ("`absorb'"=="") { + if ("`clusterON'"=="") { + local vce "vce(cluster `z_var')" + } + else { + if (`Nclust_sub'>`Ndist_sub') { + local vce "vce(cluster `z_var')" + di as text in gr "Warning: # of mass points < # of clusters. vce option overridden." + } + } + } + else { + if ("`clustervar'"=="") local vce "vce(cluster `z_var')" + } + } + + ************************************** + * Start computation + tempvar derivfit derivse biasterm biasterm_v projbias + qui gen `derivfit'=. in 1 + qui gen `derivse'=. in 1 + qui gen `biasterm'=. in 1 /* save bias */ + if (`deriv'>0) qui gen `biasterm_v'=. in 1 /* error of approx deriv */ + qui gen `projbias'=. in 1 /* save proj of bias */ + + ************************************** + * predict leading bias + mata: bias("`z_var'", "`zcat'", "`kmat'", `p', 0, "`biasterm'") + if (`deriv'>0) { + mata: bias("`z_var'", "`zcat'", "`kmat'", `p', `deriv', "`biasterm_v'") + } + + * Increase order from p to p+1 + * Expand basis + local nseries=(`p'-`s'+1)*(`J_rot_uniq'-1)+`p'+2 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`zvec', "`series'", "`kmat'", st_data(.,"`zcat'"), `=`p'+1', 0, `=`s'+1') + + if ("`absorb'"=="") capture reg `y_var' `series' `w_var' `wt', nocon + else capture reghdfe `y_var' `series' `w_var' `wt', absorb(`absorb') `reghdfeopt' + + * store results + tempname temp_b temp_V + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + } + else { + error _rc + exit _rc + } + + * Predict (p+1)th derivative + mata: `Xm'=binsreg_spdes(`zvec', "`kmat'", st_data(.,"`zcat'"), `=`p'+1', `=`p'+1', `=`s'+1'); /// + st_store(.,"`derivfit'", (binsreg_pred(`Xm', (st_matrix("`temp_b'")[|1 \ `nseries'|])', /// + st_matrix("`temp_V'")[|1,1 \ `nseries',`nseries'|], "xb"))[,1]) + mata: mata drop `Xm' + + qui replace `biasterm'=`derivfit'*`biasterm' + if (`deriv'>0) qui replace `biasterm_v'=`derivfit'*`biasterm_v' + drop `series' + + * Then get back degree-p spline, run OLS + local nseries=(`p'-`s'+1)*(`J_rot_uniq'-1)+`p'+1 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`zvec', "`series'", "`kmat'", st_data(.,"`zcat'"), `p', 0, `s') + capture reg `biasterm' `series' `wt', nocon /* project bias on X of degree p */ + tempname bias_b bias_V + if (_rc==0) { + matrix `bias_b'=e(b) + matrix `bias_V'=e(V) + } + else { + error _rc + exit _rc + } + + mata: `Xm'=binsreg_spdes(`zvec', "`kmat'", st_data(.,"`zcat'"), `p', `deriv', `s'); /// + st_store(.,"`projbias'", binsreg_pred(`Xm', st_matrix("`bias_b'")', st_matrix("`bias_V'"), "xb")[,1]) + + if (`deriv'==0) { + qui replace `biasterm'=(`biasterm'-`projbias')^2 + } + else { + qui replace `biasterm'=(`biasterm_v'-`projbias')^2 /* still save in biasterm if deriv>0 */ + } + + if ("`wt'"!="") qui sum `biasterm' [aw`exp'], meanonly + else qui sum `biasterm', meanonly + local m_bias=r(mean) + local imse_b=`m_bias'*`J_rot_uniq'^(2*(`p'+1-`deriv')) + + * for variance purpose + if ("`absorb'"=="") capture reg `y_var' `series' `w_var' `wt', nocon `vce' + else capture reghdfe `y_var' `series' `w_var' `wt', absorb(`absorb') `vce' `reghdfeopt' + + * store results + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + tempname vcov + mata: `vcov'=st_matrix("`temp_V'") + if ("`absorb'"=="") mata: `vcov'=`vcov'[|1,1 \ `nseries',`nseries'|] + else { + mata: `vcov'=(`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + `Xm'=(`Xm', J(rows(`Xm'),1,1)) + } + } + else { + error _rc + exit _rc + } + + mata: st_store(., "`derivse'", (binsreg_pred(`Xm', ., `vcov', "se")[,2]):^2) + mata: mata drop `vcov' + + if ("`wt'"!="") qui sum `derivse' [aw`exp'], meanonly + else qui sum `derivse', meanonly + local m_se=r(mean) + local imse_v=`m_se'/(`J_rot_uniq'^(1+2*`deriv')) + mata: mata drop `Xm' + + * DPI J + local J_dpi=ceil((`imse_b'*2*(`p'+1-`deriv')/ /// + (`imse_v'*(1+2*`deriv')))^(1/(2*`p'+2+1))) + + local imse_bsq_dpi=`imse_b' + local imse_var_dpi=`imse_v'*`eN_sub' + + mat `mat_imse_bsq_dpi'[`num',1]=`imse_bsq_dpi' + mat `mat_imse_var_dpi'[`num',1]=`imse_var_dpi' + + } + local J_dpi_uniq=`J_dpi' + + + + ************************************************ + * update J if useeffn or subsample specified + if ("`useeffn'"!=""|"`randcut'"!="") { + if ("`useeffn'"!="") local scaling=(`useeffn'/`eN')^(1/(2*`p'+2+1)) + if ("`randcut'"!="") local scaling=(`eN'/`eN_sub')^(1/(2*`p'+2+1)) + + if (`J_rot_unreg'!=.) local J_rot_unreg=ceil(`J_rot_unreg'*`scaling') + if (`J_rot_reg'!=.) local J_rot_reg=ceil(`J_rot_reg'*`scaling') + if (`J_rot_uniq'!=.) local J_rot_uniq=ceil(`J_rot_uniq'*`scaling') + if (`J_dpi'!=.) local J_dpi=ceil(`J_dpi'*`scaling') + if (`J_dpi_uniq'!=.) local J_dpi_uniq=ceil(`J_dpi_uniq'*`scaling') + } + + mat `mat_J_rot_unreg'[`num',1]=`J_rot_unreg' + mat `mat_J_rot_reg'[`num',1]=`J_rot_reg' + mat `mat_J_rot_uniq'[`num',1]=`J_rot_uniq' + mat `mat_J_dpi'[`num',1]=`J_dpi' + mat `mat_J_dpi_uniq'[`num',1]=`J_dpi_uniq' + + } + + ****** END loop ******* + tempname ord_rot_unreg ord_rot_reg ord_rot_uniq ord_dpi ord_dpi_uniq /// + ind_rot_unreg ind_rot_reg ind_rot_uniq ind_dpi ind_dpi_uniq + local imse_var_dpi_upd=. + local imse_bsq_dpi_upd=. + if ("`selectJ'"=="F") { + * output a row vector of p and s + foreach name in "rot_unreg" "rot_reg" "rot_uniq" "dpi" "dpi_uniq" { + mata: findmindex("`mat_J_`name''", "`ind_`name''", `nbins', `ncom') + mat `ord_`name''=`m_deg'[`ind_`name'',1..2] + local J_`name'=`nbins' + } + if (`nbins'!=`=`mat_J_dpi'[`ind_dpi',1]') { + qui bins_imse `y_var' `z_var' `w_var' `wt', deriv(`deriv') /// + p(`=`ord_dpi'[1,1]') s(`=`ord_dpi'[1,2]') nbins(`J_dpi') eN_sub(`eN_sub') /// + binspos(`binspos') `vce' `usegtools' /// + zvec(`zvec') absorb(`absorb') reghdfeopt(`reghdfeopt') + local imse_var_dpi_upd=e(imse_var) + local imse_bsq_dpi_upd=e(imse_bsq) + } + } + else { + if (`len_nbins'>1) { + tempname m_nbins + forval i=1/`len_nbins' { + local el: word `i' of `nbins' + mat `m_nbins'=(nullmat(`m_nbins') \ `el') + } + * output a scalar + foreach name in "rot_unreg" "rot_reg" "rot_uniq" "dpi" "dpi_uniq" { + mata: findmindex("`m_nbins'", "`ind_`name''", `=`mat_J_`name''[1,1]', `len_nbins') + local J_`name'=`m_nbins'[`ind_`name'',1] + } + } + foreach name in "rot_unreg" "rot_reg" "rot_uniq" "dpi" "dpi_uniq" { + mat `ord_`name''=`m_deg'[1,1..2] + } + } + mata: mata drop `zvec' + + * Reconstruct knot list + tempname xkmat + + if ("`binsmethod'"=="ROT"&"`rot_fewobs'"!="T") { + local Jselected=`J_rot_uniq' + local pselected=`ord_rot_uniq'[1,1] + local sselected=`ord_rot_uniq'[1,2] + } + else if ("`binsmethod'"=="DPI"&"`dpi_fewobs'"!="T") { + local Jselected=`J_dpi' + local pselected=`ord_dpi'[1,1] + local sselected=`ord_dpi'[1,2] + } + else { + local Jselectfail "T" + } + + if ("`Jselectfail'"!="T"&"`useeffn'"=="") { + if ("`binspos'"=="ES") { + local stepsize=1/`Jselected' + forvalues i=1/`=`Jselected'+1' { + mat `xkmat'=(nullmat(`xkmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else { + if (`Jselected'>1) { + if ("`randcut'"!="") { + qui set obs `samplesize' + mata: st_store(., "`x_var'", `xvec') + mata: mata drop `xvec' + } + binsreg_pctile `x_var' `wt', nq(`Jselected') + mat `xkmat'=(`xmin'\ r(Q) \ `xmax') + } + else mat `xkmat'=(`xmin' \ `xmax') + } + + * Renew if needed + mata: st_matrix("`xkmat'", (`xmin' \ uniqrows(st_matrix("`xkmat'")[|2 \ `=`Jselected'+1'|]))) + if (`Jselected'!=rowsof(`xkmat')-1) { + local Jselected=rowsof(`xkmat')-1 + } + if ("`binsmethod'"=="DPI") { + local J_dpi_uniq=`Jselected' + } + } + else mat `xkmat'=. + + + if ("`binsmethod'"=="DPI") local method "IMSE-optimal plug-in choice" + else local method "IMSE-optimal rule-of-thumb choice" + if ("`selectJ'"=="T") local method "`method' (select # of bins)" + else local method "`method' (select degree and smoothness)" + + + if ("`binspos'"=="ES") { + local placement "Evenly-spaced" + } + else { + local placement "Quantile-spaced" + } + + * Save data? + if (`"`savegrid'"'!=`""') { + if ("`Jselectfail'"!="T"&"`useeffn'"=="") { + clear + local obs=`simsgrid'*`Jselected'+`Jselected'-1 + qui set obs `obs' + + qui gen `x_varname'=. in 1 + label var `x_varname' "Eval. point" + qui gen binsreg_isknot=. in 1 + label var binsreg_isknot "Is the eval. point an inner knot" + qui gen binsreg_bin=. in 1 + label var binsreg_bin "indicator of bin" + mata: st_store(., (1,2,3), binsreg_grids("`xkmat'", `simsgrid')) + foreach var of local w_varname { + qui gen `var'=0 + } + + qui save `"`savegrid'"', `replace' + } + else { + di as text in gr "Warning: Grid not saved. Selection fails or useeffn() is specified." + } + } + + * Display + di "" + di in smcl in gr "Bin selection for binscatter estimates" + di in smcl in gr "Method: `method'" + di in smcl in gr "Position: `placement'" + if (`"`savegrid'"'!=`""') { + di in smcl in gr `"Output file: `savegrid'.dta"' + } + di "" + di in smcl in gr "{hline 28}{c TT}{hline 10}" + di in smcl in gr "{ralign 27:# of observations}" _col(28) " {c |} " _col(30) as result %7.0f `N' + di in smcl in gr "{ralign 27:# of distince values}" _col(28) " {c |} " _col(30) as result %7.0f `Ndist' + di in smcl in gr "{ralign 27:# of clusters}" _col(28) " {c |} " _col(30) as result %7.0f `Nclust' + if ("`useeffn'"=="") { + di in smcl in gr "{ralign 27:eff. sample size}" _col(28) " {c |} " _col(30) as result %7.0f `eN' + } + else { + di in smcl in gr "{ralign 27:eff. sample size}" _col(28) " {c |} " _col(30) as result %7.0f `useeffn' + } + + foreach name in "rot_unreg" "rot_reg" "rot_uniq" "dpi" "dpi_uniq" { + local df_`name'=. + if (`J_`name''!=.) local df_`name'=(`ord_`name''[1,1]-`ord_`name''[1,2]+1)*(`J_`name''-1)+`ord_`name''[1,1]+1 + } + + if ("`selectJ'"=="F") { + local imse_bsq_rot=`mat_imse_bsq_rot'[`ind_rot_unreg',1] + local imse_var_rot=`mat_imse_var_rot'[`ind_rot_unreg',1] + if ("`imse_var_dpi_upd'"!=".") { + local imse_bsq_dpi=`imse_bsq_dpi_upd' + local imse_var_dpi=`imse_var_dpi_upd' + } + else { + local imse_bsq_dpi=`mat_imse_bsq_dpi'[`ind_dpi',1] + local imse_var_dpi=`mat_imse_var_dpi'[`ind_dpi',1] + } + di in smcl in gr "{hline 28}{c +}{hline 10}" + di in smcl in gr "{ralign 27:# of bins}" _col(28) " {c |} " _col(30) as result %7.0f `nbins' + di in smcl in gr "{hline 28}{c BT}{hline 10}" + di "" + di in smcl in gr "{hline 14}{c TT}{hline 8}{c TT}{hline 7}{c TT}{hline 7}{c TT}{hline 15}{c TT}{hline 14}" + di in smcl in gr "{rcenter 13: method}" _col(13) " {c |} " "{center 7: p}" /// + _col(22) "{c |}" "{rcenter 7: s}" /// + _col(31) "{c |}" "{center 7: df}" /// + _col(40) "{c |}" "{center 14: imse, bias^2}" /// + _col(56) "{c |}" "{center 14: imse, var.}" + di in smcl in gr "{hline 14}{c +}{hline 8}{c +}{hline 7}{c +}{hline 7}{c +}{hline 15}{c +}{hline 14}" + di in smcl in gr "{rcenter 13: ROT-POLY}" _col(13) " {c |} " as result %4.0f `ord_rot_unreg'[1,1] /// + _col(23) in gr " {c |} " as result %4.0f `ord_rot_unreg'[1,2] /// + _col(32) in gr "{c |}" as result %5.0f `df_rot_unreg' /// + _col(40) in gr "{c |} " as result %7.3f `imse_bsq_rot' /// + _col(56) in gr "{c |} " as result %7.3f `imse_var_rot' + di in smcl in gr "{rcenter 13: ROT-REGUL}" _col(13) " {c |} " as result %4.0f `ord_rot_reg'[1,1] /// + _col(23) in gr " {c |} " as result %4.0f `ord_rot_reg'[1,2] /// + _col(32) in gr "{c |}" as result %5.0f `df_rot_reg' /// + _col(40) in gr "{c |} " as result %7.3f . /// + _col(56) in gr "{c |} " as result %7.3f . + di in smcl in gr "{rcenter 13: ROT-UKNOT}" _col(13) " {c |} " as result %4.0f `ord_rot_uniq'[1,1] /// + _col(23) in gr " {c |} " as result %4.0f `ord_rot_uniq'[1,2] /// + _col(32) in gr "{c |}" as result %5.0f `df_rot_uniq' /// + _col(40) in gr "{c |} " as result %7.3f . /// + _col(56) in gr "{c |} " as result %7.3f . + di in smcl in gr "{rcenter 13: DPI}" _col(13) " {c |} " as result %4.0f `ord_dpi'[1,1] /// + _col(23) in gr " {c |} " as result %4.0f `ord_dpi'[1,2] /// + _col(32) in gr "{c |}" as result %5.0f `df_dpi' /// + _col(40) in gr "{c |} " as result %7.3f `imse_bsq_dpi' /// + _col(56) in gr "{c |} " as result %7.3f `imse_var_dpi' + di in smcl in gr "{rcenter 13: DPI-UKNOT}" _col(13) " {c |} " as result %4.0f `ord_dpi_uniq'[1,1] /// + _col(23) in gr " {c |} " as result %4.0f `ord_dpi_uniq'[1,2] /// + _col(32) in gr "{c |}" as result %5.0f `df_dpi_uniq' /// + _col(40) in gr "{c |} " as result %7.3f . /// + _col(56) in gr "{c |} " as result %7.3f . + di in smcl in gr "{hline 14}{c BT}{hline 8}{c BT}{hline 7}{c +}{hline 7}{c BT}{hline 15}{c BT}{hline 14}" + di in smcl in gr "p: degree of polynomial. s: # of smoothness constraints. df: degrees of freedom." + } + else { + local imse_bsq_rot=`mat_imse_bsq_rot'[1,1] + local imse_var_rot=`mat_imse_var_rot'[1,1] + local imse_bsq_dpi=`mat_imse_bsq_dpi'[1,1] + local imse_var_dpi=`mat_imse_var_dpi'[1,1] + di in smcl in gr "{hline 28}{c +}{hline 10}" + di in smcl in gr "{ralign 27:Degree of polynomial}" _col(28) " {c |} " _col(30) as result %7.0f `m_deg'[1,1] + di in smcl in gr "{ralign 27:# of smoothness constraint}" _col(28) " {c |} " _col(30) as result %7.0f `m_deg'[1,2] + di in smcl in gr "{hline 28}{c BT}{hline 10}" + di "" + di in smcl in gr "{hline 14}{c TT}{hline 12}{c TT}{hline 10}{c TT}{hline 14}{c TT}{hline 14}" + di in smcl in gr "{rcenter 13: method}" _col(13) " {c |} " "{center 11: # of bins}" _col(26) "{c |}" "{rcenter 10: df}" _col(39) "{c |}" "{center 14: imse, bias^2}" _col(54) "{c |}" "{center 14: imse, var.}" + di in smcl in gr "{hline 14}{c +}{hline 12}{c +}{hline 10}{c +}{hline 14}{c +}{hline 14}" + di in smcl in gr "{rcenter 13: ROT-POLY}" _col(13) " {c |} " as result %7.0f `J_rot_unreg' _col(28) in gr "{c |}" as result %7.0f `df_rot_unreg' /// + _col(39) in gr "{c |} " as result %7.3f `imse_bsq_rot' _col(54) in gr "{c |} " as result %7.3f `imse_var_rot' + di in smcl in gr "{rcenter 13: ROT-REGUL}" _col(13) " {c |} " as result %7.0f `J_rot_reg' _col(28) in gr "{c |}" as result %7.0f `df_rot_reg' /// + _col(39) in gr "{c |} " as result %7.3f . _col(54) in gr "{c |} " as result %7.3f . + di in smcl in gr "{rcenter 13: ROT-UKNOT}" _col(13) " {c |} " as result %7.0f `J_rot_uniq' _col(28) in gr "{c |}" as result %7.0f `df_rot_uniq' /// + _col(39) in gr "{c |} " as result %7.3f . _col(54) in gr "{c |} " as result %7.3f . + di in smcl in gr "{rcenter 13: DPI}" _col(13) " {c |} " as result %7.0f `J_dpi' _col(28) in gr "{c |}" as result %7.0f `df_dpi' /// + _col(39) in gr "{c |} " as result %7.3f `imse_bsq_dpi' _col(54) in gr "{c |} " as result %7.3f `imse_var_dpi' + di in smcl in gr "{rcenter 13: DPI-UKNOT}" _col(13) " {c |} " as result %7.0f `J_dpi_uniq' _col(28) in gr "{c |}" as result %7.0f `df_dpi_uniq' /// + _col(39) in gr "{c |} " as result %7.3f . _col(54) in gr "{c |} " as result %7.3f . + di in smcl in gr "{hline 14}{c BT}{hline 12}{c BT}{hline 10}{c BT}{hline 14}{c BT}{hline 14}" + di in smcl in gr "df: degrees of freedom." + } + * return + * notes: J_rot_uniq is obtained possibly based on the subsample; J_dpi_uniq is ALWAYS obtained based on the full sample + ereturn clear + ereturn scalar N=`N' + ereturn scalar Ndist=`Ndist' + ereturn scalar Nclust=`Nclust' + ereturn scalar deriv=`deriv' + ereturn scalar imse_bsq_rot=`imse_bsq_rot' + ereturn scalar imse_var_rot=`imse_var_rot' + ereturn scalar imse_bsq_dpi=`imse_bsq_dpi' + ereturn scalar imse_var_dpi=`imse_var_dpi' + + ereturn scalar nbinsrot_poly=`J_rot_unreg' + ereturn scalar nbinsrot_regul=`J_rot_reg' + ereturn scalar nbinsrot_uknot=`J_rot_uniq' + ereturn scalar nbinsdpi=`J_dpi' + ereturn scalar nbinsdpi_uknot=`J_dpi_uniq' + + ereturn scalar prot_poly=`ord_rot_unreg'[1,1] + ereturn scalar prot_regul=`ord_rot_reg'[1,1] + ereturn scalar prot_uknot=`ord_rot_uniq'[1,1] + ereturn scalar pdpi=`ord_dpi'[1,1] + ereturn scalar pdpi_uknot=`ord_dpi_uniq'[1,1] + + ereturn scalar srot_poly=`ord_rot_unreg'[1,2] + ereturn scalar srot_regul=`ord_rot_reg'[1,2] + ereturn scalar srot_uknot=`ord_rot_uniq'[1,2] + ereturn scalar sdpi=`ord_dpi'[1,2] + ereturn scalar sdpi_uknot=`ord_dpi_uniq'[1,2] + + ereturn matrix knot=`xkmat' + tempname m_p m_s + mat `m_p'=`m_deg'[1..`ncom',1] + mat `m_s'=`m_deg'[1..`ncom',2] + ereturn matrix m_p=`m_p' + ereturn matrix m_s=`m_s' + ereturn matrix m_nbinsrot_poly=`mat_J_rot_unreg' + ereturn matrix m_nbinsrot_regul=`mat_J_rot_reg' + ereturn matrix m_nbinsrot_uknot=`mat_J_rot_uniq' + ereturn matrix m_nbinsdpi=`mat_J_dpi' + ereturn matrix m_nbinsdpi_uknot=`mat_J_dpi_uniq' + + ereturn matrix m_imse_bsq_dpi=`mat_imse_bsq_dpi' + ereturn matrix m_imse_var_dpi=`mat_imse_var_dpi' + ereturn matrix m_imse_bsq_rot=`mat_imse_bsq_rot' + ereturn matrix m_imse_var_rot=`mat_imse_var_rot' + + + +end + +* Helper command +program define bins_imse, eclass + + version 13 + syntax varlist(min=2 numeric ts fv) [if] [in] [fw aw pw] [, deriv(integer 0) /// + p(integer 0) s(integer 0) nbins(integer 0) eN_sub(integer 0) /// + binspos(string) vce(passthru) usegtools /// + zvec(name) absorb(string asis) reghdfeopt(string asis)] + + preserve + marksample touse + qui keep if `touse' + + if ("`weight'"!="") local wt [`weight'`exp'] + + tokenize `varlist' + local y_var `1' + local z_var `2' + macro shift 2 + local w_var "`*'" + + tempvar zcat + qui gen `zcat'=. in 1 + * Prepare bins + tempname kmat + + if "`binspos'"=="ES" { + local stepsize=1/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=0+`stepsize'*(`i'-1)') + } + } + else { + if (`nbins'==1) { + mat `kmat'=(0 \ 1) + } + else { + binsreg_pctile `z_var' `wt', nq(`nbins') `usegtools' + mat `kmat'=(0 \ r(Q) \ 1) + } + } + + binsreg_irecode `z_var', knotmat(`kmat') bin(`zcat') /// + `usegtools' nbins(`nbins') pos(`binspos') knotliston(T) + + + * Start computation + tempvar derivfit derivse biasterm biasterm_v projbias + qui gen `derivfit'=. in 1 + qui gen `derivse'=. in 1 + qui gen `biasterm'=. in 1 /* save bias */ + if (`deriv'>0) qui gen `biasterm_v'=. in 1 /* error of approx deriv */ + qui gen `projbias'=. in 1 /* save proj of bias */ + + ************************************** + * predict leading bias + mata: bias("`z_var'", "`zcat'", "`kmat'", `p', 0, "`biasterm'") + if (`deriv'>0) { + mata: bias("`z_var'", "`zcat'", "`kmat'", `p', `deriv', "`biasterm_v'") + } + + * Increase order from p to p+1 + * Expand basis + local nseries=(`p'-`s'+1)*(`nbins'-1)+`p'+2 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`zvec', "`series'", "`kmat'", st_data(.,"`zcat'"), `=`p'+1', 0, `=`s'+1') + + if ("`absorb'"=="") capture reg `y_var' `series' `w_var' `wt', nocon + else capture reghdfe `y_var' `series' `w_var' `wt', absorb(`absorb') `reghdfeopt' + + * store results + tempname temp_b temp_V + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + } + else { + error _rc + exit _rc + } + + tempname Xm + * Predict (p+1)th derivative + mata: `Xm'=binsreg_spdes(`zvec', "`kmat'", st_data(.,"`zcat'"), `=`p'+1', `=`p'+1', `=`s'+1'); /// + st_store(.,"`derivfit'", (binsreg_pred(`Xm', (st_matrix("`temp_b'")[|1 \ `nseries'|])', /// + st_matrix("`temp_V'")[|1,1 \ `nseries',`nseries'|], "xb"))[,1]) + mata: mata drop `Xm' + + qui replace `biasterm'=`derivfit'*`biasterm' + if (`deriv'>0) qui replace `biasterm_v'=`derivfit'*`biasterm_v' + drop `series' + + * Then get back degree-p spline, run OLS + local nseries=(`p'-`s'+1)*(`nbins'-1)+`p'+1 + local series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local series `series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`zvec', "`series'", "`kmat'", st_data(.,"`zcat'"), `p', 0, `s') + capture reg `biasterm' `series' `wt', nocon /* project bias on X of degree p */ + tempname bias_b bias_V + if (_rc==0) { + matrix `bias_b'=e(b) + matrix `bias_V'=e(V) + } + else { + error _rc + exit _rc + } + + mata: `Xm'=binsreg_spdes(`zvec', "`kmat'", st_data(.,"`zcat'"), `p', `deriv', `s'); /// + st_store(.,"`projbias'", binsreg_pred(`Xm', st_matrix("`bias_b'")', st_matrix("`bias_V'"), "xb")[,1]) + + if (`deriv'==0) { + qui replace `biasterm'=(`biasterm'-`projbias')^2 + } + else { + qui replace `biasterm'=(`biasterm_v'-`projbias')^2 /* still save in biasterm if deriv>0 */ + } + + if ("`wt'"!="") qui sum `biasterm' [aw`exp'], meanonly + else qui sum `biasterm', meanonly + local m_bias=r(mean) + local imse_b=`m_bias'*`nbins'^(2*(`p'+1-`deriv')) + + * for variance purpose + if ("`absorb'"=="") capture reg `y_var' `series' `w_var' `wt', nocon `vce' + else capture reghdfe `y_var' `series' `w_var' `wt', absorb(`absorb') `vce' `reghdfeopt' + + * store results + if (_rc==0) { + matrix `temp_b'=e(b) + matrix `temp_V'=e(V) + tempname vcov + mata: `vcov'=st_matrix("`temp_V'") + if ("`absorb'"=="") mata: `vcov'=`vcov'[|1,1 \ `nseries',`nseries'|] + else { + mata: `vcov'=(`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + `Xm'=(`Xm', J(rows(`Xm'),1,1)) + } + } + else { + error _rc + exit _rc + } + + mata: st_store(., "`derivse'", (binsreg_pred(`Xm', ., `vcov', "se")[,2]):^2) + mata: mata drop `vcov' + + if ("`wt'"!="") qui sum `derivse' [aw`exp'], meanonly + else qui sum `derivse', meanonly + local m_se=r(mean) + local imse_v=`m_se'/(`nbins'^(1+2*`deriv')) + mata: mata drop `Xm' + + * DPI J + local J_dpi=ceil((`imse_b'*2*(`p'+1-`deriv')/ /// + (`imse_v'*(1+2*`deriv')))^(1/(2*`p'+2+1))) + + local imse_bsq_dpi=`imse_b' + local imse_var_dpi=`imse_v'*`eN_sub' + + ereturn clear + ereturn scalar imse_bsq=`imse_bsq_dpi' + ereturn scalar imse_var=`imse_var_dpi' + +end + + +version 13 +mata: + // Constant in variance + void imse_v_cons(real scalar degree, real scalar deriv, string scalar vcons) + { + real scalar v_cons, m + real matrix V, Vderiv + + m=degree+1 + if (deriv==0) { + v_cons=m + } + else { + V=J(m, m, .) + Vderiv=J(m, m, 0) + + for (i=1; i<=m; i++){ + for (j=1; j<=i; j++) { + V[i,j]=1/(i+j-1) + if (i>deriv & j>deriv) { + Vderiv[i,j]=1/(i+j-1-2*deriv)* /* + */ (factorial(i-1)/factorial(i-1-deriv))* /* + */ (factorial(j-1)/factorial(j-1-deriv)) + } + } + } + V=makesymmetric(V) + Vderiv=makesymmetric(Vderiv) + v_cons=trace(invsym(V)*Vderiv) + } + + // return results + st_numscalar(vcons,v_cons) + } + + // Constant in bias + void imse_b_cons(real scalar degree, real scalar deriv, string scalar bcons, | real scalar s) + { + real scalar b_cons, m, bernum + m=degree+1 + if (args()<4) { + b_cons=1/(2*(m-deriv)+1)/factorial(m-deriv)^2/comb(2*(m-deriv), m-deriv)^2 + } + else { + if (degree==0) { + bernum=1/6 + } + else if (degree==1) { + bernum=1/30 + } + else if (degree==2) { + bernum=1/42 + } + else if (degree==3) { + bernum=1/30 + } + else if (degree==4) { + bernum=5/66 + } + else if (degree==5) { + bernum=691/2730 + } + else if (degree==6) { + bernum=7/6 + } + else { + _error("p>6 not allowed.") + } + b_cons=1/factorial(2*(m-deriv))*bernum + } + + // return results + st_numscalar(bcons, b_cons) + } + + // Bernoulli polynomial + real vector bernpoly(real vector x, real scalar degree) + { + n=rows(x) + if (degree==0) { + bernx=J(n,1,1) + } + else if (degree==1) { + bernx=x:-0.5 + } + else if (degree==2) { + bernx=x:^2-x:+1/6 + } + else if (degree==3) { + bernx=x:^3-1.5*x:^2+0.5*x + } + else if (degree==4) { + bernx=x:^4-2*x:^3+x:^2:-1/30 + } + else if (degree==5) { + bernx=x:^5-2.5*x:^4+5/3*x:^3-1/6*x + } + else if (degree==6) { + bernx=x:^6-3*x:^5+2.5*x:^4-0.5*x:^2:+1/42 + } + else { + _error("p is too large.") + } + return(bernx) + } + + // Leading bias for splines + void bias(string scalar Var, string scalar Xcat, string scalar knotname, /// + real scalar degree, real scalar deriv, /// + string scalar biasname, | string scalar select) + { + if (args()<7) { + X=st_data(., (Var)) + xcat=st_data(., (Xcat)) + st_view(bias=., ., (biasname)) + } + else { + X=st_data(., (Var), select) + xcat=st_data(., (Xcat), select) + st_view(bias=.,.,(biasname), select) + } + knot=st_matrix(knotname) + h=knot[|2 \ length(knot)|]-knot[|1 \ (length(knot)-1)|] + h=h[xcat] + if (rows(h)==1) { + h=h' + } + tl=knot[|1 \ (length(knot)-1)|] + tl=tl[xcat] + if (rows(tl)==1) { + tl=tl' + } + bern=bernpoly((X-tl):/h, degree+1-deriv)/factorial(degree+1-deriv):*(h:^(degree+1-deriv)) + bias[.,.]=bern + } + + // find the minimum + void findmindex(string scalar matname, string scalar outname, /// + real scalar J, real scalar nr) + { + real matrix A + + A=sort((abs(st_matrix(matname):-J), (1::nr)), 1) + st_numscalar(outname, A[1,2]) + } + +end + diff --git a/110/replication_package/replication/ado/plus/b/binsregselect.sthlp b/110/replication_package/replication/ado/plus/b/binsregselect.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..fa1b1df284e368b2e03473e30f5ef1b9271216e6 --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binsregselect.sthlp @@ -0,0 +1,248 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binsregselect##syntax"}{...} +{viewerjumpto "Description" "binsregselect##description"}{...} +{viewerjumpto "Options" "binsregselect##options"}{...} +{viewerjumpto "Examples" "binsregselect##examples"}{...} +{viewerjumpto "Stored results" "binsregselect##stored_results"}{...} +{viewerjumpto "References" "binsregselect##references"}{...} +{viewerjumpto "Authors" "binsregselect##authors"}{...} +{cmd:help binsregselect} +{hline} + +{title:Title} + +{p 4 8}{hi:binsregselect} {hline 2} Data-driven IMSE-Optimal Partitioning/Binning Selection for Binscatter.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 18} {cmdab:binsregselect} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} [{cmd:,} {opt deriv(v)}{p_end} +{p 18 18} {opt absorb(absvars)} {opt reghdfeopt(reghdfe_option)}{p_end} +{p 18 18} {opt bins(p s)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt nbins(nbinsopt)}{p_end} +{p 18 18} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 18 18} {opt simsgrid(#)} {opt savegrid(filename)} {opt replace}{p_end} +{p 18 18} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 18 18} {cmd:vce(}{it:{help vcetype}}{cmd:)} {opt usegtools(on/off)} {opt useeffn(#)} {opt randcut(#)} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} {opt fweight}s, {opt aweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binsregselect} implements data-driven procedures for selecting the number of bins for binscatter estimation. +The selected number is optimal in minimizing the (asymptotic) integrated mean squared error (IMSE). +{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation, testing and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself.{p_end} + +{dlgtab:Reghdfe} + +{p 4 8} {opt absorb(absvars)} specifies categorical variables (or interactions) representing the fixed effects to be absorbed. +This is equivalent to including an indicator/dummy variable for each category of each {it:absvar}. When {cmd:absorb()} is specified, +the community-contributed command {cmd:reghdfe} instead of the command {cmd:regress} is used. +{p_end} + +{p 4 8} {opt reghdfeopt(reghdfe_option)} options to be passed on to {cmd:reghdfe}. Important: {cmd:absorb()} and {cmd:vce()} should not be specified within this option. +{p_end} + +{p 4 8} For more information about the community-contributed command {cmd:reghdfe}, please see {browse "http://scorreia.com/software/reghdfe/":http://scorreia.com/software/reghdfe/}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt bins(p s)} sets a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints for +data-driven (IMSE-optimal) selection of the partitioning/binning scheme. +The default is {cmd:bins(0 0)}, which corresponds to piecewise constant (canonical binscatter). + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other option is {cmd:es} for evenly-spaced binning. +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt nbins(nbinsopt)} sets the number of bins for degree/smoothness selection. +If {cmd:nbins(T)} is specified, the command selects the number of bins instead, +given the specified degree and smoothness. +If a {help numlist:numlist} with more than one number is specified, +the command selects the number of bins within this list. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} for +point estimation is selected. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which the number of smoothness constraints {it:s} +for point estimation is selected. If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:nbins(#)} must be specified. +{p_end} + +{dlgtab:Evaluation Points Grid Generation} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used +for evaluation of the supremum (infimum or Lp metric) operation needed to construct confidence bands and hypothesis testing procedures. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within each bin for +approximating the supremum (or infimum) operator. +{p_end} + +{p 4 8} {opt savegrid(filename)} specifies a filename for storing the simulation grid of evaluation points. +It contains the following variables: +{it:indvar}, which is a sequence of evaluation points used in approximation; +all control variables in {it:othercovs}, which take values of zero for prediction purpose; +{it:binsreg_isknot}, indicating whether the evaluation point is an inner knot; +and {it:binsreg_bin}, indicating which bin the evaluation point belongs to. +{p_end} + +{p 4 8} {opt replace} overwrites the existing file when saving the grid. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, +which take into account the number of unique values of {it:indvar} (i.e., adjusting for the number of mass points), +number of clusters, and degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Other Options} + +{p 4 8} {cmd:vce(}{it:{help vcetype}}{cmd:)} specifies the {it:vcetype} for variance estimation +used by the command {help regress##options:regress} (or {cmd:reghdfe} if {cmd:absorb()} is specified). +The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package {cmd:gtools} +to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{p 4 8} {opt useeffn(#)} specifies the effective sample size {it:#} to be used when computing the (IMSE-optimal) number of bins. +This option is useful for extrapolating the optimal number of bins to larger (or smaller) datasets than the one used to compute it. +{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample for bins selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto}{p_end} + +{p 4 8} Select IMSE-optimal number of bins using DPI-procedure{p_end} +{p 8 8} . {stata binsregselect mpg weight foreign}{p_end} + + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(Ndist)}}number of distinct values{p_end} +{synopt:{cmd:e(Nclust)}}number of clusters{p_end} +{synopt:{cmd:e(deriv)}}order of derivative{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(nbinsrot_poly)}}ROT number of bins, unregularized{p_end} +{synopt:{cmd:e(nbinsrot_regul)}}ROT number of bins, regularized or user-specified{p_end} +{synopt:{cmd:e(nbinsrot_uknot)}}ROT number of bins, unique knots{p_end} +{synopt:{cmd:e(nbinsdpi)}}DPI number of bins{p_end} +{synopt:{cmd:e(nbinsdpi_uknot)}}DPI number of bins, unique knots{p_end} +{synopt:{cmd:e(prot_poly)}}ROT degree of polynomial, unregularized{p_end} +{synopt:{cmd:e(prot_regul)}}ROT degree of polynomial, regularized or user-specified{p_end} +{synopt:{cmd:e(prot_uknot)}}ROT degree of polynomial, unique knots{p_end} +{synopt:{cmd:e(pdpi)}}DPI degree of polynomial{p_end} +{synopt:{cmd:e(pdpi_uknot)}}DPI degree of polynomial, unique knots{p_end} +{synopt:{cmd:e(srot_poly)}}ROT number of smoothness constraints, unregularized{p_end} +{synopt:{cmd:e(srot_regul)}}ROT number of smoothness constraints, regularized or user-specified{p_end} +{synopt:{cmd:e(srot_uknot)}}ROT number of smoothness constraints, unique knots{p_end} +{synopt:{cmd:e(sdpi)}}DPI number of smoothness constraints{p_end} +{synopt:{cmd:e(sdpi_uknot)}}DPI number of smoothness constraints, unique knots{p_end} +{p2col 5 20 24 2: Matrices}{p_end} +{synopt:{cmd:e(knot)}}numlist of knots{p_end} +{synopt:{cmd:e(m_p)}}vector of degrees of polynomial{p_end} +{synopt:{cmd:e(m_s)}}vector of number of smoothness constraints{p_end} +{synopt:{cmd:e(m_nbinsrot_poly)}}ROT number of bins, unregularized, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_nbinsrot_regul)}}ROT number of bins, regularized or user-specified, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_nbinsrot_uknot)}}ROT number of bins, unique knots, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_nbinsdpi)}}DPI number of bins, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_nbinsdpi_uknot)}}DPI number of bins, unique knots, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_imse_bsq_rot)}}bias constant in IMSE, ROT selection, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_imse_var_rot)}}variance constant in IMSE, ROT selection, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_imse_bsq_dpi)}}bias constant in IMSE, DPI selection, for each pair of degree and smoothness{p_end} +{synopt:{cmd:e(m_imse_var_dpi)}}variance constant in IMSE, DPI selection, for each pair of degree and smoothness{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/b/binstest.ado b/110/replication_package/replication/ado/plus/b/binstest.ado new file mode 100644 index 0000000000000000000000000000000000000000..a7b9687bb982a0f70242d4763b0876c10cfca4cb --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binstest.ado @@ -0,0 +1,1455 @@ +*! version 1.2 09-Oct-2022 + +capture program drop binstest +program define binstest, eclass + version 13 + + syntax varlist(min=2 numeric fv ts) [if] [in] [fw aw pw] [, deriv(integer 0) at(string asis) nolink /// + estmethod(string) estmethodopt(string asis) absorb(string asis) reghdfeopt(string asis) /// + testmodel(string) /// + testmodelparfit(string asis) testmodelpoly(string) /// + testshape(string) /// + testshapel(numlist) testshaper(numlist) testshape2(numlist) /// + lp(string) /// + bins(numlist integer max=2 >=0) nbins(string) /// + pselect(numlist integer >=0) sselect(numlist integer >=0) /// + binspos(string) binsmethod(string) nbinsrot(string) randcut(numlist max=1 >=0 <=1) /// + nsims(integer 500) simsgrid(integer 20) simsseed(numlist integer max=1 >=0) /// + dfcheck(numlist integer max=2 >=0) masspoints(string) usegtools(string) /// + vce(passthru) asyvar(string) /// + numdist(string) numclust(string)] + /* last line only for internal use */ + + * Regularization constant (for checking only) + local qrot=2 + + ************************************** + * Create weight local + if ("`weight'"!="") { + local wt [`weight'`exp'] + local wtype=substr("`weight'",1,1) + } + + * Extract options + * which model? + if ("`absorb'"!="") { + if ("`estmethod'"!="") { + if ("`estmethod'"!="reghdfe") { + di as error "absorb() can only be combined with estmethod(reghdfe)." + exit + } + } + else local estmethod "reghdfe" + } + if ("`estmethod'"=="") local estmethod "reg" + tokenize `estmethod' + local estmethod `1' + if ("`estmethod'"=="reg") { + local estcmd "reg" + } + else if ("`estmethod'"=="qreg") { + local estcmd "qreg" + local quantile `2' + if ("`quantile'"=="") local quantile=0.5 + } + else if ("`estmethod'"=="logit") { + local estcmd "logit" + } + else if ("`estmethod'"=="probit") { + local estcmd "probit" + } + else if ("`estmethod'"=="reghdfe") { + local estcmd "reghdfe" + } + + * report the results for the cond. mean model? + if ("`link'"!="") local transform "F" + else local transform "T" + + * default vce + if ("`vce'"=="") local vce "vce(robust)" + * vce for bin selection + if ("`estmethod'"=="qreg") { + if ("`vce'"=="vce(iid)") local vce_select "vce(ols)" + else local vce_select "vce(robust)" + } + else if ("`estmethod'"=="logit"|"`estmethod'"=="probit") { + if ("`vce'"=="oim"|"`vce'"=="opg") local vce_select "vce(ols)" + else local vce_select "`vce'" + } + else if ("`estmethod'"=="reg"|"`estmethod'"=="reghdfe") { + local vce_select "`vce'" + } + + * use bootstrap cmd? cluster specified? + local vcetemp: subinstr local vce "vce(" "", all + local vcetemp: subinstr local vcetemp ")" "", all + tokenize "`vcetemp'", parse(", ") + if ("`1'"=="boot" | "`1'"=="bootstrap") { + local boot "on" + local repstemp `3' + if ("`repstemp'"=="") local repstemp reps(20) + local repstemp: subinstr local repstemp "reps(" "", all + local reps: subinstr local repstemp ")" "", all + if ("`estmethod'"=="qreg") { + local estcmd "bsqreg" + if ("`weight'"!="") { + di as error "Weights not allowed for bootstrapping." + exit + } + } + } + else if ("`1'"=="cl"|"`1'"=="clu"|"`1'"=="clus"|"`1'"=="clust"| /// + "`1'"=="cluste"|"`1'"=="cluster") { + if ("`3'"==""|"`3'"==",") local clusterON "T" /* cluster is specified */ + local clustervar `2' + local boot "off" + } + else { + local boot "off" + } + + if ("`asyvar'"=="") local asyvar "off" + + if ("`binspos'"=="es") local binspos "ES" + if ("`binspos'"=="qs") local binspos "QS" + if ("`binspos'"=="") local binspos "QS" + if ("`binsmethod'"=="rot") local binsmethod "ROT" + if ("`binsmethod'"=="dpi") local binsmethod "DPI" + if ("`binsmethod'"=="") local binsmethod "DPI" + + + * analyze options related to J, p and s + if ("`testshape'"!="T"&"`testshape'"!="F"&"`testshape'"!="") { + numlist "`testshape'", integer max(2) range(>=0) + local testshape=r(numlist) + } + if ("`testmodel'"!="T"&"`testmodel'"!="F"&"`testmodel'"!="") { + numlist "`testmodel'", integer max(2) range(>=0) + local testmodel=r(numlist) + } + + if ("`testshape'"=="F") local testshape "" + if ("`testmodel'"=="F") local testmodel "" + + local selection "" + + * analyze nbins + if ("`nbins'"=="T") local nbins=0 + local len_nbins=0 + if ("`nbins'"!=""&"`nbins'"!="F") { + numlist "`nbins'", integer sort + local nbins=r(numlist) + local len_nbins: word count `nbins' + } + * shut down selection if knot is specified by users + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + if ("`nbins'"!=""|"`pselect'"!=""|"`sselect'"!="") { + di as error "nbins(), pselect() or sselect() incorrectly specified." + exit + } + } + + + * analyze numlist in pselect and sselect + local len_p=0 + local len_s=0 + + if ("`pselect'"!="") { + numlist "`pselect'", integer range(>=`deriv') sort + local plist=r(numlist) + } + + if ("`sselect'"!="") { + numlist "`sselect'", integer range(>=0) sort + local slist=r(numlist) + } + + local len_p: word count `plist' + local len_s: word count `slist' + + if (`len_p'==1&`len_s'==0) { + local slist `plist' + local len_s=1 + } + if (`len_p'==0&`len_s'==1) { + local plist `slist' + local len_p=1 + } + + tokenize `bins' + local binsp "`1'" + local binss "`2'" + if ("`binsp'"=="") local binsp=. + if ("`binss'"=="") local binss `binsp' + if ("`bins'"!="") { + if ("`nbins'"!=""&"`nbins'"!="T"&"`nbins'"!="0"&`len_nbins'<=1) { + di as error "nbins() or bins() is incorrectly specified." + exit + } + } + + * 1st case: select J + if (("`bins'"!=""|"`nbins'"=="0"|`len_nbins'>1|"`nbins'"=="T"|"`nbins'"=="")&("`binspos'"=="ES"|"`binspos'"=="QS")) { + local selection "J" + } + + if ("`selection'"=="J") { + if (`len_p'>1|`len_s'>1) { + if ("`nbins'"=="") { + di as error "nbins() must be specified for degree/smoothness selection." + exit + } + else { + di as error "Only one p and one s are allowed to select # of bins." + exit + } + } + if ("`plist'"=="") local plist=`deriv' + if ("`slist'"=="") local slist=`plist' + if ("`bins'"=="") { + local binsp `plist' + local binss `slist' + } + local len_p=1 + local len_s=1 + if ("`testshape'"=="T"|"`testshape'"=="") local testshape `=`binsp'+1' `=`binss'+1' + if ("`testmodel'"=="T"|"`testmodel'"=="") local testmodel `=`binsp'+1' `=`binss'+1' + } + + * 2nd case: select P (the special case with nbins() pselect() will be modified in the next step) + if ("`selection'"!="J" & ("`testshape'"==""|"`testshape'"=="T"|"`testmodel'"==""|"`testmodel'"=="T")) { + local pselectOK "T" + } + + if ("`pselectOK'"=="T" & `len_nbins'==1 & (`len_p'>1|`len_s'>1)) { + local selection "P" + *if ("`plist'"=="") { + * numlist "`=max(`deriv', 0)'/4" + * local plist=r(numlist) + *} + } + + * 3rd case: user-specified J and p + *if ("`testshape'"!="T"&"`testmodel'"!="T") local userOK "T" + if ((`len_p'<=1&`len_s'<=1) & "`selection'"!="J") { + local selection "NA" + if ("`testshape'"=="") { + if ("`bins'"!="") local testshape `=`binsp'+1' `=`binss'+1' + else { + if (`len_p'==1&`len_s'==1) local testshape `=`plist'+1' `=`slist'+1' + else local testshape `=`deriv'+1' `=`deriv'+1' + } + } + if ("`testmodel'"=="") { + if ("`bins'"!="") local testmodel `=`binsp'+1' `=`binss'+1' + else { + if (`len_p'==1&`len_s'==1) local testmodel `=`plist'+1' `=`slist'+1' + else local testmodel `=`deriv'+1' `=`deriv'+1' + } + } + } + + * exclude all other cases + if ("`selection'"=="") { + di as error "Degree, smoothness, or # of bins are not correctly specified." + exit + } + + * Option for testing shape + tokenize `testshape' + local tsha_p "`1'" + local tsha_s "`2'" + if ("`tsha_p'"==""|"`tsha_p'"=="T") local tsha_p=. + if ("`tsha_s'"=="") local tsha_s `tsha_p' + + local val_L `testshapel' + local nL: word count `val_L' + local val_R `testshaper' + local nR: word count `val_R' + local val_T `testshape2' + local nT: word count `val_T' + local ntestshape=`nL'+`nR'+`nT' /* number of tests (for shape) */ + + * Option for testing model + if ("`testmodelpoly'"!="") { + confirm integer n `testmodelpoly' + local testpolyp=`testmodelpoly' + } + tokenize `testmodel' + local tmod_p "`1'" + local tmod_s "`2'" + if ("`tmod_p'"==""|"`tmod_p'"=="T") local tmod_p=. + if ("`tmod_s'"=="") local tmod_s `tmod_p' + + + + * Add warnings about degrees for estimation and inference + if ("`selection'"=="J") { + if ("`tsha_p'"!=".") { + if (`tsha_p'<=`binsp') { + local tsha_p=`binsp'+1 + local tsha_s=`tsha_p' + di as text "Warning: Degree for testshape() has been changed. It must be greater than the degree for bin selection." + } + } + if ("`tmod_p'"!=".") { + if (`tmod_p'<=`binsp') { + local tmod_p=`binsp'+1 + local tmod_s=`tmod_p' + di as text "Warning: Degree for testmodel() has been changed. It must be greater than the degree for bin selection." + } + } + } + if ("`selection'"=="NA") { + di as text "Warning: Testing procedures are valid when nbins() is much larger than the IMSE-optimal choice." + } + + * mass check? + if ("`masspoints'"=="") { + local massadj "T" + local localcheck "T" + } + else if ("`masspoints'"=="off") { + local massadj "F" + local localcheck "F" + } + else if ("`masspoints'"=="noadjust") { + local massadj "F" + local localcheck "T" + } + else if ("`masspoints'"=="nolocalcheck") { + local massadj "T" + local localcheck "F" + } + else if ("`masspoints'"=="veryfew") { + di as error "veryfew() not allowed for testing." + exit + } + + * extract dfcheck + if ("`dfcheck'"=="") local dfcheck 20 30 + tokenize `dfcheck' + local dfcheck_n1 "`1'" + local dfcheck_n2 "`2'" + + * evaluate at w from another dataset? + if (`"`at'"'!=`""'&`"`at'"'!=`"mean"'&`"`at'"'!=`"median"'&`"`at'"'!=`"0"') local atwout "user" + + * default for lp metric + if ("`lp'"=="") local lp "inf" + + * use gtools commands instead? + if ("`usegtools'"=="off") local usegtools "" + if ("`usegtools'"=="on") local usegtools usegtools + if ("`usegtools'"!="") { + capture which gtools + if (_rc) { + di as error "Gtools package not installed." + exit + } + local localcheck "F" + local sel_gtools "on" + } + else local sel_gtools "off" + + * use reghdfe? + if ("`absorb'"!="") { + capture which reghdfe + if (_rc) { + di as error "reghdfe not installed." + exit + } + } + + * Error check + if (`"`testmodelparfit'"'==`""'&`ntestshape'==0&"`testmodelpoly'"=="") { + di as error "No tests specified." + exit + } + if (`tsha_p'<`tsha_s'|`tmod_p'<`tmod_s'|`binsp'<`binss') { + di as error "p cannot be smaller than s." + exit + } + if ("`tsha_p'"!="."&"`binsp'"!=".") { + if (`tsha_p'<=`binsp') { + di as text in gr "Warning: p for testing <= p for bins() not suggested." + } + } + if ("`tmod_p'"!="."&"`binsp'"!=".") { + if (`tmod_p'<=`binsp') { + di as text in gr "Warning: p for testing <= p for bins() not suggested." + } + } + if (`tsha_p'<`deriv'|`tmod_p'<`deriv') { + di as error "p for test cannot be smaller than deriv." + exit + } + if ("`testmodelpoly'"!="") { + if (`testpolyp'<`deriv') { + di as error "Degree of polynomial model cannot be smaller than deriv." + exit + } + } + if (`nsims'<2000|`simsgrid'<50) { + di as text "Note: A larger number random draws/evaluation points is recommended to obtain the final results." + } + + * Mark sample + preserve + + * Parse varlist into y_var, x_var and w_var + tokenize `varlist' + fvrevar `1', tsonly + local y_var "`r(varlist)'" + fvrevar `2', tsonly + local x_var "`r(varlist)'" + + macro shift 2 + local w_var "`*'" + + * read eval point for w from another file + if ("`atwout'"=="user") { + append using `at' + } + + fvrevar `w_var', tsonly + local w_var "`r(varlist)'" + local nwvar: word count `w_var' + + * Save the last obs in a vector and then drop it + tempname wuser /* a vector used to keep eval for w */ + if ("`atwout'"=="user") { + mata: st_matrix("`wuser'", st_data(`=_N', "`w_var'")) + qui drop in `=_N' + } + + * Get positions of factor vars + local indexlist "" + local i = 1 + foreach v in `w_var' { + if strpos("`v'", ".") == 0 { + local indexlist `indexlist' `i' + } + local ++i + } + + * add a default for at + if (`"`at'"'==""&`nwvar'>0) { + local at "mean" + } + + marksample touse /* now renew the mark to account for missing values */ + qui keep if `touse' + local eN=_N + local nsize=_N /* # of rows in the original dataset */ + + if ("`usegtools'"==""&("`masspoints'"!="off"|"`binspos'"=="QS")) { + if ("`:sortedby'"!="`x_var'") sort `x_var', stable + } + + if ("`wtype'"=="f") qui sum `x_var' `wt', meanonly + else qui sum `x_var', meanonly + + local xmin=r(min) + local xmax=r(max) + local N=r(N) /* sample size, with wt */ + + tempname xvec binedges + mata: `xvec'=st_data(., "`x_var'") + * effective sample size + local Ndist=. + if ("`massadj'"=="T") { + if ("`numdist'"!=""&"`numdist'"!=".") { + local Ndist=`numdist' + } + else { + if ("`usegtools'"=="") { + mata: `binedges'=binsreg_uniq(`xvec', ., 1, "Ndist") + mata: mata drop `binedges' + } + else { + qui gunique `x_var' + local Ndist=r(unique) + } + } + local eN=min(`eN', `Ndist') + } + + local Nclust=. + if ("`clusterON'"=="T") { + if ("`numclust'"!=""&"`numclust'"!=".") { + local Nclust=`numclust' + } + else { + if ("`usegtools'"=="") { + mata: st_local("Nclust", strofreal(rows(uniqrows(st_data(.,"`clustervar'"))))) + } + else { + qui gunique `clustervar' + local Nclust=r(unique) + } + } + if ("`estmethod'"=="qreg") { + local vce "vce(robust)" + di as text in gr "Warning: vce(cluster) not allowed. vce(robust) used instead." + } + local eN=min(`eN', `Nclust') + } + + ********************************** + ********** Bins ****************** + ********************************** + * determine # of bins + if ("`binspos'"!="ES"&"`binspos'"!="QS") { + capture numlist "`binspos'", ascending + if (_rc==0) { + local knotlist `binspos' + local nbins: word count `knotlist' + local first: word 1 of `knotlist' + local last: word `nbins' of `knotlist' + if (`first'<`xmin'|`last'>`xmax') { + di as error "Inner knots specified out of allowed range." + exit + } + else { + local nbins=`nbins'+1 + local binspos "user" + } + } + else { + di as error "Numeric list incorrectly specified in binspos()." + exit + } + } + + * if binsmethod is specified + local imse_bsq_rot=. + local imse_var_rot=. + local imse_bsq_dpi=. + local imse_var_dpi=. + if ("`selection'"!="NA") { + * Check effective sample size + if ("`binsp'"==".") local binspcheck=6 + else local binspcheck=`binsp' + if ("`nbinsrot'"==""&(`eN'<=`dfcheck_n1'+`binspcheck'+1+`qrot')) { + * ROT unavailable, exit + di as error "Too few observations for bin selection." + exit + } + else { + local randcut1k `randcut' + if ("`randcut'"=="" & `N'>5000) { + local randcut1k=max(5000/`N', 0.01) + di as text in gr "Warning: To speed up computation, bin/degree selection uses a subsample of roughly max(5000, 0.01n) observations if n>5000. To use the full sample, set randcut(1)." + } + + if ("`selection'"=="J") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') bins(`binsp' `binss') nbins(`nbins') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce_select' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(nbinsrot_regul)==.) { + di as error "bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local nbins=e(nbinsrot_regul) + local imse_bsq_rot=e(imse_bsq_rot) + local imse_var_rot=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local nbins=e(nbinsdpi) + local imse_bsq_dpi=e(imse_bsq_dpi) + local imse_var_dpi=e(imse_var_dpi) + if (`nbins'==.) { + local nbins=e(nbinsrot_regul) + local imse_bsq_rot=e(imse_bsq_rot) + local imse_var_rot=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + } + else if ("`selection'"=="P") { + qui binsregselect `y_var' `x_var' `w_var' `wt', deriv(`deriv') nbins(`nbins') /// + absorb(`absorb') reghdfeopt(`reghdfeopt') /// + pselect(`plist') sselect(`slist') /// + binsmethod(`binsmethod') binspos(`binspos') nbinsrot(`nbinsrot') /// + `vce_select' masspoints(`masspoints') dfcheck(`dfcheck_n1' `dfcheck_n2') /// + numdist(`Ndist') numclust(`Nclust') randcut(`randcut1k') usegtools(`sel_gtools') + if (e(prot_regul)==.) { + di as error "Bin selection fails." + exit + } + if ("`binsmethod'"=="ROT") { + local binsp=e(prot_regul) + local binss=e(srot_regul) + local imse_bsq_rot=e(imse_bsq_rot) + local imse_var_rot=e(imse_var_rot) + } + else if ("`binsmethod'"=="DPI") { + local binsp=e(pdpi) + local binss=e(sdpi) + local imse_bsq_dpi=e(imse_bsq_dpi) + local imse_var_dpi=e(imse_var_dpi) + if (`binsp'==.) { + local binsp=e(prot_regul) + local binss=e(srot_regul) + local imse_bsq_rot=e(imse_bsq_rot) + local imse_var_rot=e(imse_var_rot) + di as text in gr "Warning: DPI selection fails. ROT choice used." + } + } + if ("`testshape'"=="T"|"`testshape'"=="") { + local tsha_p=`binsp'+1 + local tsha_s=`binss'+1 + } + else { + if (`tsha_p'<=`binsp') { + local tsha_p=`binsp'+1 + local tsha_s=`tsha_p' + di as text "Warning: Degree for testshape() has been changed. It must be greater than the IMSE-optimal degree." + } + } + if ("`testmodel'"=="T"|"`testmodel'"=="") { + local tmod_p=`binsp'+1 + local tmod_s=`binss'+1 + } + else { + if (`tmod_p'<=`binsp') { + local tmod_p=`binsp'+1 + local tmod_s=`tmod_p' + di as text "Warning: Degree for testmodel() has been changed. It must be greater than the IMSE-optimal degree." + } + } + } + } + } + + ******************************************************* + * Check if eff. sample size is large enough for testing + if ((`nbins'-1)*(`tsha_p'-`tsha_s'+1)+`tsha_p'+1+`dfcheck_n2'>=`eN') { + local tsha_fewobs "T" + di as text in gr "Warning: Too small effective sample size for testing shape." + } + if ((`nbins'-1)*(`tmod_p'-`tmod_s'+1)+`tmod_p'+1+`dfcheck_n2'>=`eN') { + local tmod_fewobs "T" + di as text "Warning: Too small effective sample size for testing models." + } + ******************************************************** + + * Generate category variable for data and save knot in matrix + tempname kmat + tempvar xcat + qui gen `xcat'=. in 1 + + if ("`binspos'"=="ES") { + local stepsize=(`xmax'-`xmin')/`nbins' + forvalues i=1/`=`nbins'+1' { + mat `kmat'=(nullmat(`kmat') \ `=`xmin'+`stepsize'*(`i'-1)') + } + } + else if ("`knotlist'"!="") { + foreach el of local knotlist { + mat `kmat'=(nullmat(`kmat') \ `el') + } + mat `kmat'=(`xmin' \ `kmat' \ `xmax') + } + else { + if (`nbins'==1) mat `kmat'=(`xmin' \ `xmax') + else { + binsreg_pctile `x_var' `wt', nq(`nbins') `usegtools' + mat `kmat'=(`xmin' \ r(Q) \ `xmax') + } + } + + mata: st_matrix("`kmat'", (`xmin' \ uniqrows(st_matrix("`kmat'")[|2 \ `=`nbins'+1'|]))) + + binsreg_irecode `x_var', knotmat(`kmat') bin(`xcat') /// + `usegtools' nbins(`nbins') pos(`binspos') knotliston(T) + + if (`nbins'!=rowsof(`kmat')-1) { + di as text in gr "Warning: Repeated knots. Some bins dropped." + local nbins=rowsof(`kmat')-1 + } + + * Check for empty bins + if ("`localcheck'"=="T") { + mata: st_local("Ncat", strofreal(rows(uniqrows(st_data(.,"`xcat'"))))) + if (`nbins'==`Ncat') { + mata: `binedges'=binsreg_uniq(`xvec', st_data(.,"`xcat'"), `nbins', "uniqmin") + mata: mata drop `binedges' + } + else { + local uniqmin=0 + di as text in gr "Warning: There are empty bins. Specify a smaller number in nbins()." + } + + if (`ntestshape'!=0) { + if (`uniqmin'<`tsha_p'+1) { + local tsha_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for testing." + } + } + if (`"`testmodelparfit'"'!=`""'|"`testmodelpoly'"!="") { + if (`uniqmin'<`tmod_p'+1) { + local tmod_fewobs "T" + di as text in gr "Warning: Some bins have too few distinct x-values for testing." + } + } + } + + ******************************************************** + * Set seed + if ("`simsseed'"!="") set seed `simsseed' + local uni_last=`simsgrid'*`nbins'+`nbins'-1 + + tempname Xm Xm0 fit se fit0 uni_grid uni_basis tstat vcov /* objects in MATA */ + mata: `uni_grid'=binsreg_grids("`kmat'", `simsgrid') + mata: `Xm0'=.; `fit'=.; `fit0'=0; `se'=.; `vcov'=. + + * adjust w vars + tempname wval + if (`nwvar'>0) { + if (`"`at'"'==`"mean"'|`"`at'"'==`"median"') { + matrix `wval'=J(1, `nwvar', 0) + tempname wvaltemp mataobj + mata: `mataobj'=. + foreach wpos in `indexlist' { + local wname: word `wpos' of `w_var' + if ("`usegtools'"=="") { + if ("`wtype'"!="") qui tabstat `wname' `conds' [aw`exp'], stat(`at') save + else qui tabstat `wname' `conds', stat(`at') save + mat `wvaltemp'=r(StatTotal) + } + else { + qui gstats tabstat `wname' `conds' `wt', stat(`at') matasave("`mataobj'") + mata: st_matrix("`wvaltemp'", `mataobj'.getOutputCol(1)) + } + mat `wval'[1,`wpos']=`wvaltemp'[1,1] + } + mata: mata drop `mataobj' + } + else if (`"`at'"'==`"0"') { + matrix `wval'=J(1,`nwvar',0) + } + else if ("`atwout'"=="user") { + matrix `wval'=`wuser' + } + } + + * define a w vector (possibly a constant) in MATA + tempname wvec wvec0 + mata: `wvec'=J(1,0,.); `wvec0'=J(1,0,.) + if (`nwvar'>0) { + mata: `wvec0'=st_matrix("`wval'") + if (`deriv'==0&"`asyvar'"=="off") mata: `wvec'=(`wvec', `wvec0') + else mata: `wvec'=(`wvec', J(1,`nwvar',0)) + } + if ("`estmethod'"=="qreg"|"`estmethod'"=="reghdfe") { + mata: `wvec0'=(`wvec0', 1) + if (`deriv'==0) mata: `wvec'=(`wvec', 1) + else mata: `wvec'=(`wvec', 0) + } + + ******************************* + ******* Testing Shape ********* + ******************************* + tempname stat_shape pval_shape /* test stat and p value */ + if (`ntestshape'!=0&"`tsha_fewobs'"!="T") { + * Regression + local nseries=(`tsha_p'-`tsha_s'+1)*(`nbins'-1)+`tsha_p'+1 + local tsha_series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local tsha_series `tsha_series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + tempname tsha_b tsha_V + mata: binsreg_st_spdes(`xvec', "`tsha_series'", "`kmat'", st_data(.,"`xcat'"), `tsha_p', 0, `tsha_s') + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + capture `estcmd' `y_var' `tsha_series' `w_var' `wt', nocon `vce' `estmethodopt' + } + else if ("`estmethod'"=="qreg") { + if ("`boot'"=="on") capture bsqreg `y_var' `tsha_series' `w_var', quantile(`quantile') reps(`reps') + else capture qreg `y_var' `tsha_series' `w_var' `wt', quantile(`quantile') `vce' `estmethodopt' + } + else { + capture `estcmd' `y_var' `tsha_series' `w_var' `wt', absorb(`absorb') `reghdfeopt' `vce' + } + + * store results + if (_rc==0) { + matrix `tsha_b'=e(b) + matrix `tsha_V'=e(V) + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") mata: binsreg_checkdrop("`tsha_b'", "`tsha_V'", `nseries') + else mata: binsreg_checkdrop("`tsha_b'", "`tsha_V'", `nseries', "T") + matrix `tsha_b'=`tsha_b'' + } + else { + error _rc + exit _rc + } + + * Predict + * fitted values & standard errors + mata: `uni_basis'=binsreg_spdes(`uni_grid'[,1], "`kmat'", `uni_grid'[,3], `tsha_p', `deriv', `tsha_s') + if (("`estmethod'"=="logit"|"`estmethod'"=="probit")&"`transform'"=="T") { + if (`deriv'==0) { + mata: `fit0'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec0')*st_matrix("`tsha_b'") + if ("`estmethod'"=="logit") { + mata: `fit'=logistic(`fit0'); /// + `se'=logisticden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tsha_V'"),"se")[,2] + } + else { + mata: `fit'=normal(`fit0'); /// + `se'=normalden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tsha_V'"),"se")[,2] + } + } + if (`deriv'==1) { + mata: `Xm0'=binsreg_spdes(`uni_grid'[,1], "`kmat'", `uni_grid'[,3], `tsha_p', 0, `tsha_s'); /// + `Xm0'=(`Xm0', J(rows(`Xm0'),1,1)#`wvec0'); /// + `fit0'=`Xm0'*st_matrix("`tsha_b'"); /// + `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec') + if ("`estmethod'"=="logit") { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tsha_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata: `Xm'=logisticden(`fit0'):*(1:-2*logistic(`fit0')):*`fit':*`Xm0' + /// + logisticden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tsha_V'")):*`Xm')) + } + else { + mata: `se'=logisticden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tsha_V'"),"se")[,2]) + } + mata: `fit'=logisticden(`fit0'):*`fit' + } + else { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tsha_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata:`Xm'=(-`fit0'):*normalden(`fit0'):*`fit':*`Xm0' + /// + normalden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tsha_V'")):*`Xm')) + } + else { + mata: `se'=normalden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tsha_V'"),"se")[,2]) + } + mata: `fit'=normalden(`fit0'):*`fit' + } + } + mata: `Xm'=(`fit', `se') + } + else { + mata: `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'); /// + `Xm'=binsreg_pred(`Xm', st_matrix("`tsha_b'"), st_matrix("`tsha_V'"), "all") + } + + * Test statistics + mata: `tstat'=J(`ntestshape',2,.) + + forval i=1/`ntestshape' { + if (`i'<=`nL') { + local val: word `i' of `val_L' + mata: `tstat'[`i',.]=(max((`Xm'[,1]:-`val'):/`Xm'[,2]), 1) + } + else if (`i'<=`nL'+`nR') { + local val: word `=`i'-`nL'' of `val_R' + mata: `tstat'[`i',.]=(min((`Xm'[,1]:-`val'):/`Xm'[,2]), 2) + } + else { + local val: word `=`i'-`nL'-`nR'' of `val_T' + if ("`lp'"=="inf") { + mata: `tstat'[`i',.]=(max(abs((`Xm'[,1]:-`val'):/`Xm'[,2])), 3) + } + else { + mata: `tstat'[`i',.]=(mean(abs((`Xm'[,1]:-`val'):/`Xm'[,2]):^`lp')^(1/`lp'), 3) + } + } + } + mata: st_matrix("`stat_shape'", `tstat') + + * p value + if ("`estmethod'"=="qreg"|"`estmethod'"=="reghdfe") { + if (`deriv'==0) mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,1)) + else mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,0)) + mata: `vcov'=st_matrix("`tsha_V'"); /// + `vcov'= (`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + st_matrix("`vcov'", `vcov') + } + + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + mata: `Xm'=binsreg_pred(`uni_basis', ., st_matrix("`tsha_V'")[|1,1 \ `nseries',`nseries'|], "se"); /// + binsreg_pval(`uni_basis', `Xm'[,2], "`tsha_V'", "`stat_shape'", `nsims', `nseries', /// + ".", 0, "`pval_shape'", ".", "`lp'") + } + else { + mata: `Xm'=binsreg_pred(`uni_basis', ., `vcov', "se"); /// + binsreg_pval(`uni_basis', `Xm'[,2], "`vcov'", "`stat_shape'", `nsims', `=`nseries'+1', /// + ".", 0, "`pval_shape'", ".", "`lp'") + } + + drop `tsha_series' + mata: mata drop `Xm' `uni_basis' `tstat' + + if ("`testshapel'"!="") { + tempname stat_shapeL pval_shapeL + mat `stat_shapeL'=`stat_shape'[1..`nL',1] + mat `pval_shapeL'=`pval_shape'[1..`nL',1] + } + if ("`testshaper'"!="") { + tempname stat_shapeR pval_shapeR + mat `stat_shapeR'=`stat_shape'[`=`nL'+1'..`=`nL'+`nR'',1] + mat `pval_shapeR'=`pval_shape'[`=`nL'+1'..`=`nL'+`nR'',1] + } + if ("`testshape2'"!="") { + tempname stat_shape2 pval_shape2 + mat `stat_shape2'=`stat_shape'[`=`nL'+`nR'+1'..`ntestshape',1] + mat `pval_shape2'=`pval_shape'[`=`nL'+`nR'+1'..`ntestshape',1] + } + } + else { + local tsha_p=. + local tsha_s=. + } + + ************************************* + ****** Testing Models *************** + ************************************* + tempname stat_poly pval_poly /* for testing poly reg */ + tempname stat_model pval_model /* for testing models */ + if ((`"`testmodelparfit'"'!=`""'|"`testmodelpoly'"!="")&"`tmod_fewobs'"!="T") { + *********************************************** + * Regression: for BOTH + local nseries=(`tmod_p'-`tmod_s'+1)*(`nbins'-1)+`tmod_p'+1 + + tempname tmod_b tmod_V + capture confirm matrix `tsha_b' `tsha_V' + if (_rc==0&`tmod_p'==`tsha_p'& `tmod_s'==`tsha_s') { + matrix `tmod_b'=`tsha_b' + matrix `tmod_V'=`tsha_V' + } + else { + local tmod_series "" + forvalues i=1/`nseries' { + tempvar sp`i' + local tmod_series `tmod_series' `sp`i'' + qui gen `sp`i''=. in 1 + } + + mata: binsreg_st_spdes(`xvec', "`tmod_series'", "`kmat'", st_data(.,"`xcat'"), `tmod_p', 0, `tmod_s') + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + capture `estcmd' `y_var' `tmod_series' `w_var' `wt', nocon `vce' `estmethodopt' + } + else if ("`estmethod'"=="qreg") { + if ("`boot'"=="on") capture bsqreg `y_var' `tmod_series' `w_var', quantile(`quantile') reps(`reps') + else capture qreg `y_var' `tmod_series' `w_var' `wt', quantile(`quantile') `vce' `estmethodopt' + } + else { + capture `estcmd' `y_var' `tmod_series' `w_var' `wt', absorb(`absorb') `reghdfeopt' `vce' + } + + * store results + if (_rc==0) { + matrix `tmod_b'=e(b) + matrix `tmod_V'=e(V) + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") mata: binsreg_checkdrop("`tmod_b'", "`tmod_V'", `nseries') + else mata: binsreg_checkdrop("`tmod_b'", "`tmod_V'", `nseries', "T") + matrix `tmod_b'=`tmod_b'' + } + else { + error _rc + exit _rc + } + + drop `tmod_series' + } + + + ******************************************************** + * If a test for poly reg is requested + if ("`testmodelpoly'"!="") { + * fitted values + mata: `uni_basis'=binsreg_spdes(`uni_grid'[,1], "`kmat'", `uni_grid'[,3], `tmod_p', `deriv', `tmod_s') + + if (("`estmethod'"=="logit"|"`estmethod'"=="probit")&"`transform'"=="T") { + if (`deriv'==0) { + mata: `fit0'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec0')*st_matrix("`tmod_b'") + if ("`estmethod'"=="logit") { + mata: `fit'=logistic(`fit0'); /// + `se'=logisticden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tmod_V'"),"se")[,2] + } + else { + mata: `fit'=normal(`fit0'); /// + `se'=normalden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tmod_V'"),"se")[,2] + } + } + if (`deriv'==1) { + mata: `Xm0'=binsreg_spdes(`uni_grid'[,1], "`kmat'", `uni_grid'[,3], `tmod_p', 0, `tmod_s'); /// + `Xm0'=(`Xm0', J(rows(`Xm0'),1,1)#`wvec0'); /// + `fit0'=`Xm0'*st_matrix("`tmod_b'"); /// + `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec') + if ("`estmethod'"=="logit") { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tmod_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata: `Xm'=logisticden(`fit0'):*(1:-2*logistic(`fit0')):*`fit':*`Xm0' + /// + logisticden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tmod_V'")):*`Xm')) + } + else { + mata: `se'=logisticden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tmod_V'"),"se")[,2]) + } + mata: `fit'=logisticden(`fit0'):*`fit' + } + else { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tmod_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata:`Xm'=(-`fit0'):*normalden(`fit0'):*`fit':*`Xm0' + /// + normalden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tmod_V'")):*`Xm')) + } + else { + mata: `se'=normalden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tmod_V'"),"se")[,2]) + } + mata: `fit'=normalden(`fit0'):*`fit' + } + } + mata: `Xm'=(`fit', `se') + } + else { + mata: `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'); /// + `Xm'=binsreg_pred(`Xm', st_matrix("`tmod_b'"), st_matrix("`tmod_V'"), "all") + } + + * Polynomial fit + tempvar poly_fit + local poly_series "" + *if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") local ini=0 + *else local ini=1 + forval i=1/`testpolyp' { + tempvar x_var_`i' + qui gen `x_var_`i''=`x_var'^`i' + local poly_series `poly_series' `x_var_`i'' + } + + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + capture `estcmd' `y_var' `poly_series' `w_var' `wt', `estmethodopt' + } + else if ("`estmethod'"=="qreg") { + capture qreg `y_var' `poly_series' `w_var' `wt', quantile(`quantile') `estmethodopt' + } + else { + capture `estcmd' `y_var' `poly_series' `w_var' `wt', absorb(`absorb') `reghdfeopt' + } + + * store results + tempname poly_b poly_adjw + if (_rc==0) { + matrix `poly_b'=e(b) + if (`nwvar'>0&`deriv'==0) matrix `poly_adjw'=`wval'*`poly_b'[1, `=`testpolyp'+1'..`=`testpolyp'+`nwvar'']' + else matrix `poly_adjw'=0 + + if (`deriv'==0) { + if (`testpolyp'>0) matrix `poly_b'=(`poly_b'[1, `=`testpolyp'+`nwvar'+1'], `poly_b'[1,1..`testpolyp']) + else matrix `poly_b'=`poly_b'[1, `=`testpolyp'+`nwvar'+1'] + } + else { + matrix `poly_b'=`poly_b'[1, `deriv'..`testpolyp'] + } + *if ("`estmethod'"=="qreg") matrix `poly_b'=(`poly_b'[1,colsof(`poly_b')], `poly_b'[1, 1..`testpolyp']) + *matrix `poly_b'=`poly_b'[1, `=`deriv'+1'..`=`testpolyp'+1'] + } + else { + error _rc + exit _rc + } + + * Data for derivative + tempname polym polym0 + mata: `polym'=J(`uni_last',0,.) + forval i=`deriv'/`testpolyp' { + mata: `polym'=(`polym', `uni_grid'[,1]:^(`i'-`deriv')*factorial(`i')/factorial(`i'-`deriv')) + } + + mata: `polym'=`polym'*st_matrix("`poly_b'")':+st_matrix("`poly_adjw'") + + if (("`estmethod'"=="logit"|"`estmethod'"=="probit")&"`transform'"=="T") { + mata: `polym0'=J(rows(`uni_grid'),0,.) + if (`deriv'==1) { + forval i=1/`testpolyp' { + mata: `polym0'=(`polym0', `uni_grid'[,1]:^`i') + } + if (`nwvar'>0) mata: `polym0'=(`polym0', J(rows(`polym0'),1,1)#st_matrix("`wval'")) + mata: `polym0'=(`polym0', J(rows(`polym0'),1,1)) + } + + if ("`estmethod'"=="logit") { + if (`deriv'==0) mata: `polym'=logistic(`polym') + if (`deriv'==1) mata: `polym'=logisticden(`polym0'*st_matrix("e(b)")'):*`polym' + } + else { + if (`deriv'==0) mata: `polym'=normal(`polym') + if (`deriv'==1) mata: `polym'=normalden(`polym0'*st_matrix("e(b)")'):*`polym' + } + mata: mata drop `polym0' + } + + if ("`lp'"=="inf") { + mata: st_matrix("`stat_poly'", (max(abs((`Xm'[,1]-`polym'):/`Xm'[,2])),3)) + } + else { + mata: st_matrix("`stat_poly'", (mean(abs((`Xm'[,1]-`polym'):/`Xm'[,2]):^`lp')^(1/`lp'),3)) + } + + * p value + if ("`estmethod'"=="qreg"|"`estmethod'"=="reghdfe") { + if (`deriv'==0) mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,1)) + else mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,0)) + mata: `vcov'=st_matrix("`tmod_V'"); /// + `vcov'= (`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + st_matrix("`vcov'", `vcov') + } + + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + mata: `Xm'=binsreg_pred(`uni_basis', ., st_matrix("`tmod_V'")[|1,1 \ `nseries',`nseries'|], "se"); /// + binsreg_pval(`uni_basis', `Xm'[,2], "`tmod_V'", "`stat_poly'", /// + `nsims', `nseries', ".", 0, "`pval_poly'", ".", "`lp'") + } + else { + mata: `Xm'=binsreg_pred(`uni_basis', ., `vcov', "se"); /// + binsreg_pval(`uni_basis', `Xm'[,2], "`vcov'", "`stat_poly'", /// + `nsims', `=`nseries'+1', ".", 0, "`pval_poly'", ".", "`lp'") + } + + mata: mata drop `Xm' `polym' `uni_basis' + } + + ****************************************************************** + * if the model is stored in another file + if (`"`testmodelparfit'"'!=`""') { + use `"`testmodelparfit'"', clear + qui ds binsreg_fit* + local varls=r(varlist) + local nfitval: word count `varls' + tempvar uni_xcat uni_fit uni_se + + qui gen `uni_fit'=. in 1 + qui gen `uni_se'=. in 1 + qui gen `uni_xcat'=. in 1 + binsreg_irecode `x_var', knotmat(`kmat') bin(`uni_xcat') /// + `usegtools' nbins(`nbins') pos(`binspos') knotliston(T) + + mata: `uni_basis'=binsreg_spdes(st_data(.,"`x_var'"), "`kmat'", st_data(.,"`uni_xcat'"), /// + `tmod_p', `deriv', `tmod_s') + + if (("`estmethod'"=="logit"|"`estmethod'"=="probit")&"`transform'"=="T") { + if (`deriv'==0) { + mata: `fit0'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec0')*st_matrix("`tmod_b'") + if ("`estmethod'"=="logit") { + mata: `fit'=logistic(`fit0'); /// + `se'=logisticden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tmod_V'"),"se")[,2] + } + else { + mata: `fit'=normal(`fit0'); /// + `se'=normalden(`fit0'):* /// + binsreg_pred((`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'),.,st_matrix("`tmod_V'"),"se")[,2] + } + } + if (`deriv'==1) { + mata: `Xm0'=binsreg_spdes(st_data(.,"`x_var'"), "`kmat'", st_data(.,"`uni_xcat'"), `tmod_p', 0, `tmod_s'); /// + `Xm0'=(`Xm0', J(rows(`Xm0'),1,1)#`wvec0'); /// + `fit0'=`Xm0'*st_matrix("`tmod_b'"); /// + `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec') + if ("`estmethod'"=="logit") { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tmod_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata: `Xm'=logisticden(`fit0'):*(1:-2*logistic(`fit0')):*`fit':*`Xm0' + /// + logisticden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tmod_V'")):*`Xm')) + } + else { + mata: `se'=logisticden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tmod_V'"),"se")[,2]) + } + mata: `fit'=logisticden(`fit0'):*`fit' + } + else { + mata: `fit'=binsreg_pred(`Xm',st_matrix("`tmod_b'"),.,"xb")[,1] + if ("`asyvar'"=="off") { + mata:`Xm'=(-`fit0'):*normalden(`fit0'):*`fit':*`Xm0' + /// + normalden(`fit0'):*`Xm'; /// + `se'=sqrt(rowsum((`Xm'*st_matrix("`tmod_V'")):*`Xm')) + } + else { + mata: `se'=normalden(`fit0'):*(binsreg_pred(`Xm',.,st_matrix("`tmod_V'"),"se")[,2]) + } + mata: `fit'=normalden(`fit0'):*`fit' + } + } + mata: `Xm'=(`fit', `se') + } + else { + mata: `Xm'=(`uni_basis', J(rows(`uni_basis'),1,1)#`wvec'); /// + `Xm'=binsreg_pred(`Xm', st_matrix("`tmod_b'"), st_matrix("`tmod_V'"), "all") + } + + mata: `tstat'=J(`nfitval',2,.) + local counter=1 + if ("`lp'"=="inf") { + foreach var of local varls { + mata: `tstat'[`counter',]=(max(abs((`Xm'[,1]-st_data(.,"`var'")):/`Xm'[,2])), 3) + local ++counter + } + } + else { + foreach var of local varls { + mata: `tstat'[`counter',]=(mean(abs((`Xm'[,1]-st_data(.,"`var'")):/`Xm'[,2]):^`lp')^(1/`lp'), 3) + local ++counter + } + } + mata: st_matrix("`stat_model'", `tstat') + + * p values + if ("`estmethod'"=="qreg"|"`estmethod'"=="reghdfe") { + if (`deriv'==0) mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,1)) + else mata: `uni_basis'=(`uni_basis', J(rows(`uni_basis'),1,0)) + mata: `vcov'=st_matrix("`tmod_V'"); /// + `vcov'= (`vcov'[|1,1 \ `nseries', `nseries'|], `vcov'[|1,cols(`vcov') \ `nseries', cols(`vcov')|] \ /// + `vcov'[|cols(`vcov'), 1 \ cols(`vcov'), `nseries'|], `vcov'[cols(`vcov'), cols(`vcov')]); /// + st_matrix("`vcov'", `vcov') + } + if ("`estmethod'"!="qreg"&"`estmethod'"!="reghdfe") { + mata: `Xm'=binsreg_pred(`uni_basis', ., st_matrix("`tmod_V'")[|1,1 \ `nseries',`nseries'|], "se"); /// + binsreg_pval(`uni_basis', `Xm'[,2], "`tmod_V'", "`stat_model'", `nsims', /// + `nseries', ".", 0, "`pval_model'", ".", "`lp'") + } + else { + mata: `Xm'=binsreg_pred(`uni_basis', ., `vcov', "se"); /// + binsreg_pval(`uni_basis', `Xm'[,2], "`vcov'", "`stat_model'", `nsims', /// + `=`nseries'+1', ".", 0, "`pval_model'", ".", "`lp'") + } + + mata: mata drop `Xm' `tstat' `uni_basis' + } + } + else { + local tmod_p=. + local tmod_s=. + } + mata: mata drop `uni_grid' `xvec' `Xm0' `fit' `se' `fit0' `wvec' `wvec0' `vcov' + + ****** End of testing ***************************************** + + ****************************** + ******* Display ************** + ****************************** + if ("`knotlist'"!=""|"`selection'"=="NA") { + local binselectmethod "User-specified" + local placement "User-specified" + } + else { + if ("`binsmethod'"=="DPI") local binselectmethod "IMSE-optimal plug-in choice" + if ("`binsmethod'"=="ROT") local binselectmethod "IMSE-optimal rule-of-thumb choice" + if ("`selection'"=="J") local binselectmethod "`binselectmethod' (select # of bins)" + if ("`selection'"=="P") local binselectmethod "`binselectmethod' (select degree and smoothness)" + if ("`binspos'"=="ES") local placement "Evenly-spaced" + if ("`binspos'"=="QS") local placement "Quantile-spaced" + } + + di "" + di in smcl in gr "Hypothesis tests based on binscatter estimates" + di in smcl in gr "Estimation method: `estmethod'" + di in smcl in gr "Bin selection method: `binselectmethod'" + di in smcl in gr "Placement: `placement'" + di in smcl in gr "Derivative: `deriv'" + di "" + di in smcl in gr "{hline 30}{c TT}{hline 15}" + di in smcl in gr "{lalign 1:# of observations}" _col(30) " {c |} " _col(32) as result %7.0f `N' + di in smcl in gr "{lalign 1:# of distinct values}" _col(30) " {c |} " _col(32) as result %7.0f `Ndist' + di in smcl in gr "{lalign 1:# of clusters}" _col(30) " {c |} " _col(32) as result %7.0f `Nclust' + di in smcl in gr "{hline 30}{c +}{hline 15}" + di in smcl in gr "{lalign 1:Bin/Degree selection:}" _col(30) " {c |} " +* if ("`binselectmethod'"=="User-specified") { +* di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(39) as result %7.0f "." +* di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(39) as result %7.0f "." +* } +* else { + di in smcl in gr "{ralign 29:Degree of polynomial}" _col(30) " {c |} " _col(32) as result %7.0f `binsp' + di in smcl in gr "{ralign 29:# of smoothness constraints}" _col(30) " {c |} " _col(32) as result %7.0f `binss' +* } + di in smcl in gr "{ralign 29:# of bins}" _col(30) " {c |} " _col(32) as result %7.0f `nbins' + di in smcl in gr "{hline 30}{c BT}{hline 15}" + + if ("`tsha_fewobs'"!="T") { + if (`ntestshape'!=0) { + di "" + di in smcl in gr "Shape Restriction Tests:" + di in smcl in gr "Degree: `tsha_p'" _col(15) "# of smoothness constraints: `tsha_s'" + + } + if ("`testshapel'"!="") { + di "" + di in smcl in gr "{hline 19}{c TT}{hline 30}" + di in smcl in gr "H0: sup mu <=" _col(20) in gr /// + "{c |}" _col(22) "sup T" _col(40) "p value" + di in smcl in gr "{hline 19}{c +}{hline 30}" + forval i=1/`nL' { + local val: word `i' of `testshapel' + local stat=`stat_shapeL'[`i',1] + local pval=`pval_shapeL'[`i',1] + di in smcl in yellow "{rcenter 19:`val'}" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + } + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + + if ("`testshaper'"!="") { + di "" + di in smcl in gr "{hline 19}{c TT}{hline 30}" + di in smcl in gr "H0: inf mu >=" _col(20) in gr /// + "{c |}" _col(22) "inf T" _col(40) "p value" + di in smcl in gr "{hline 19}{c +}{hline 30}" + forval i=1/`nR' { + local val: word `i' of `testshaper' + local stat=`stat_shapeR'[`i',1] + local pval=`pval_shapeR'[`i',1] + di in smcl in yellow "{rcenter 19:`val'}" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + } + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + if ("`testshape2'"!="") { + di "" + di in smcl in gr "{hline 19}{c TT}{hline 30}" + if ("`lp'"=="inf") { + di in smcl in gr "H0: mu =" _col(20) in gr /// + "{c |}" _col(22) "sup |T|" _col(40) "p value" + } + else { + di in smcl in gr "H0: mu =" _col(20) in gr /// + "{c |}" _col(22) "L`lp' of T" _col(40) "p value" + } + di in smcl in gr "{hline 19}{c +}{hline 30}" + forval i=1/`nT' { + local val: word `i' of `testshape2' + local stat=`stat_shape2'[`i',1] + local pval=`pval_shape2'[`i',1] + di in smcl in yellow "{rcenter 19:`val'}" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + } + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + } + + if ("`tmod_fewobs'"!="T") { + if ("`testmodelpoly'"!=""|`"`testmodelparfit'"'!=`""') { + di "" + di in smcl in gr "Model specification Tests:" + di in smcl in gr "Degree: `tmod_p'" _col(15) "# of smoothness constraints: `tmod_s'" + } + if ("`testmodelpoly'"!="") { + di "" + local stat=`stat_poly'[1,1] + local pval=`pval_poly'[1,1] + di in smcl in gr "{hline 19}{c TT}{hline 30}" + if ("`lp'"=="inf") { + di in smcl in gr "H0: mu =" _col(20) in gr /// + "{c |}" _col(22) "sup |T|" _col(40) "p value" + } + else { + di in smcl in gr "H0: mu =" _col(20) in gr /// + "{c |}" _col(22) "L`lp' of T" _col(40) "p value" + } + di in smcl in gr "{hline 19}{c +}{hline 30}" + di in smcl in gr "poly. degree " as result `testpolyp' _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + if (`"`testmodelparfit'"'!=`""') { + di "" + di in smcl in gr `"Input file: `testmodelparfit'.dta"' + di in smcl in gr "{hline 19}{c TT}{hline 30}" + if ("`lp'"=="inf") { + di in smcl in gr "H0: mu =" _col(20) in gr /// + "{c |}" _col(22) "sup |T|" _col(40) "p value" + } + else { + di in smcl in gr "H0: mu =" _col(20) in gr /// + "{c |}" _col(22) "L`lp' of T" _col(40) "p value" + } + di in smcl in gr "{hline 19}{c +}{hline 30}" + forval i=1/`nfitval' { + local val: word `i' of `varls' + local stat=`stat_model'[`i',1] + local pval=`pval_model'[`i',1] + di in smcl in yellow "{rcenter 19:`val'}" _col(20) in gr "{c |}" /// + _col(22) as result %7.3f `stat' /// + _col(40) as result %7.3f `pval' + } + di in smcl in gr "{hline 19}{c BT}{hline 30}" + } + } + + **************************** + ******* Return ************* + **************************** + ereturn clear + ereturn scalar N=`N' + ereturn scalar Ndist=`Ndist' + ereturn scalar Nclust=`Nclust' + ereturn scalar nbins=`nbins' + ereturn scalar p=`binsp' + ereturn scalar s=`binss' + ereturn scalar testshape_p=`tsha_p' + ereturn scalar testshape_s=`tsha_s' + ereturn scalar testmodel_p=`tmod_p' + ereturn scalar testmodel_s=`tmod_s' + + ereturn scalar imse_var_rot=`imse_var_rot' + ereturn scalar imse_bsq_rot=`imse_bsq_rot' + ereturn scalar imse_var_dpi=`imse_var_dpi' + ereturn scalar imse_bsq_dpi=`imse_bsq_dpi' + + if ("`tsha_fewobs'"!="T") { + if ("`testshapel'"!="") { + ereturn local testvalueL `testshapel' + ereturn matrix stat_shapeL=`stat_shapeL' + ereturn matrix pval_shapeL=`pval_shapeL' + } + if ("`testshaper'"!="") { + ereturn local testvalueR `testshaper' + ereturn matrix stat_shapeR=`stat_shapeR' + ereturn matrix pval_shapeR=`pval_shapeR' + } + if ("`testshape2'"!="") { + ereturn local testvalue2 `testshape2' + ereturn matrix stat_shape2=`stat_shape2' + ereturn matrix pval_shape2=`pval_shape2' + } + } + + if ("`tmod_fewobs'"!="T") { + if ("`testmodelpoly'"!="") { + ereturn scalar testpolyp=`testpolyp' + ereturn scalar stat_poly=`stat_poly'[1,1] + ereturn scalar pval_poly=`pval_poly'[1,1] + } + if (`"`testmodelparfit'"'!=`""') { + ereturn local testvarlist `varls' + ereturn matrix stat_model=`stat_model' + ereturn matrix pval_model=`pval_model' + } + } + +end + diff --git a/110/replication_package/replication/ado/plus/b/binstest.sthlp b/110/replication_package/replication/ado/plus/b/binstest.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..63287b601cfda46382e50c1d76e8ef925f2068a1 --- /dev/null +++ b/110/replication_package/replication/ado/plus/b/binstest.sthlp @@ -0,0 +1,356 @@ +{smcl} +{* *! version 1.2 09-OCT-2022}{...} +{viewerjumpto "Syntax" "binstest##syntax"}{...} +{viewerjumpto "Description" "binstest##description"}{...} +{viewerjumpto "Options" "binstest##options"}{...} +{viewerjumpto "Examples" "binstest##examples"}{...} +{viewerjumpto "Stored results" "binstest##stored_results"}{...} +{viewerjumpto "References" "binstest##references"}{...} +{viewerjumpto "Authors" "binstest##authors"}{...} +{cmd:help binstest} +{hline} + +{title:Title} + +{p 4 8}{hi:binstest} {hline 2} Data-Driven Nonparametric Shape Restriction and Parametric Model Specification Testing using Binscatter.{p_end} + + +{marker syntax}{...} +{title:Syntax} + +{p 4 13} {cmdab:binstest} {depvar} {it:indvar} [{it:othercovs}] {ifin} {weight} [ {cmd:,} {p_end} +{p 13 13} {opt estmethod(cmdname)} {opt deriv(v)} {opt at(position)} {opt nolink}{p_end} +{p 13 13} {opt absorb(absvars)} {opt reghdfeopt(reghdfe_option)}{p_end} +{p 13 13} {opt testmodel(testmodelopt)} {opt testmodelparfit(filename)} {opt testmodelpoly(p)}{p_end} +{p 13 13} {opt testshape(testshapeopt)} {opt testshapel(numlist)} {opt testshaper(numlist)} {opt testshape2(numlist)} {opt lp(metric)}{p_end} +{p 13 13} {opt bins(p s)} {opt nbins(nbinsopt)} {opt binspos(position)} {opt binsmethod(method)} {opt nbinsrot(#)} {opt randcut(#)}{p_end} +{p 13 13} {cmd:pselect(}{it:{help numlist}}{cmd:)} {cmd:sselect(}{it:{help numlist}}{cmd:)}{p_end} +{p 13 13} {opt nsims(#)} {opt simsgrid(#)} {opt simsseed(seed)}{p_end} +{p 13 13} {opt dfcheck(n1 n2)} {opt masspoints(masspointsoption)}{p_end} +{p 13 13} {cmd:vce(}{it:{help vcetype}}{cmd:)} {opt asyvar(on/off)} {opt estmethodopt(cmd_option)} {opt usegtools(on/off)} ]{p_end} + +{p 4 8} where {depvar} is the dependent variable, {it:indvar} is the independent variable for binning, and {it:othercovs} +are other covariates to be controlled for.{p_end} + +{p 4 8} The degree of the piecewise polynomial p, the number of smoothness constraints s, and the derivative order v are integers +satisfying 0 <= s,v <= p, which can take different values in each case.{p_end} + +{p 4 8} At least one test has to be specified via {opt testmodelparfit()}, {opt testmodelpoly()}, {opt testshapel()}, +{opt testshaper()} and/or {opt testshape2()}. +{p_end} + +{p 4 8} {opt fweight}s, {opt aweight}s and {opt pweight}s are allowed; see {help weight}.{p_end} + +{marker description}{...} +{title:Description} + +{p 4 8} {cmd:binstest} implements binscatter-based hypothesis testing procedures for parametric functional forms of +and nonparametric shape restrictions on the regression function estimators, following the results in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":Cattaneo, Crump, Farrell and Feng (2022a)}. +If the binning scheme is not set by the user, the companion command {help binsregselect:binsregselect} is used +to implement binscatter in a data-driven (optimal) way and inference procedures are based on robust bias correction. +Binned scatter plots based on different models can be constructed using the companion commands {help binsreg:binsreg}, +{help binsqreg: binsqreg}, {help binslogit:binslogit} and {help binsprobit:binsprobit}. +{p_end} + +{p 4 8} A detailed introduction to this command is given in +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Cattaneo, Crump, Farrell and Feng (2022b)}. +Companion R and Python packages with the same capabilities are available (see website below). +{p_end} + +{p 4 8} Companion commands: {help binsreg:binsreg} for binscatter regression with robust inference procedures and plots, +{help binsqreg:binsqreg} for binscatter quantile regression with robust inference procedures and plots, +{help binslogit:binslogit} for binscatter logit estimation with robust inference procedures and plots, +{help binsprobit:binsprobit} for binscatter probit estimation with robust inference procedures and plots, +and {help binsregselect:binsregselect} for data-driven (optimal) binning selection.{p_end} + +{p 4 8} Related Stata, R and Python packages are available in the following website:{p_end} + +{p 8 8} {browse "https://nppackages.github.io/":https://nppackages.github.io/}{p_end} + + +{marker options}{...} +{title:Options} + +{dlgtab:Estimand} + +{p 4 8} {opt estmethod(cmdname)} specifies the binscatter model. The default is {cmd:estmethod(reg)}, +which corresponds to the binscatter least squares regression. Other options are: {cmd:estmethod(qreg #)} +for binscatter quantile regression where # is the quantile to be estimated, {cmd:estmethod(logit)} for +binscatter logistic regression and {cmd:estmethod(probit)} for binscatter probit regression. +{p_end} + +{p 4 8} {opt deriv(v)} specifies the derivative order of the regression function for estimation, testing and plotting. +The default is {cmd:deriv(0)}, which corresponds to the function itself. +{p_end} + +{p 4 8} {opt at(position)} specifies the values of {it:othercovs} at which the estimated function is evaluated for plotting. +The default is {cmd:at(mean)}, which corresponds to the mean of {it:othercovs}. Other options are: {cmd:at(median)} for the median of {it:othercovs}, +{cmd:at(0)} for zeros, and {cmd:at(filename)} for particular values of {it:othercovs} saved in another file. +{p_end} + +{p 4 8} Note: When {cmd:at(mean)} or {cmd:at(median)} is specified, all factor variables in {it:othercovs} (if specified) +are excluded from the evaluation (set as zero). +{p_end} + +{p 4 8}{opt nolink} specifies that the function within the inverse link (logistic) function be reported instead of +the conditional probability function. This option is used only if logit or probit model is specified in {cmd:estmethod()}. +{p_end} + +{dlgtab:Reghdfe} + +{p 4 8} {opt absorb(absvars)} specifies categorical variables (or interactions) representing the fixed effects to be absorbed. +This is equivalent to including an indicator/dummy variable for each category of each {it:absvar}. +When {cmd:absorb()} is specified, the community-contributed command {cmd:reghdfe} instead of the command {cmd:regress} is used. +{p_end} + +{p 4 8} {opt reghdfeopt(reghdfe_option)} options to be passed on to the command {cmd:reghdfe}. +Important: {cmd:absorb()} and {cmd:vce()} should not be specified within this option. +{p_end} + +{p 4 8} For more information about the community-contributed command {cmd:reghdfe}, please see {browse "http://scorreia.com/software/reghdfe/":http://scorreia.com/software/reghdfe/}. + +{dlgtab:Parametric Model Specification Testing} + +{p 4 8} {opt testmodel(testmodelopt)} sets the degree of polynomial and the number of smoothness constraints for parametric model specification testing. +If {cmd:testmodel(p s)} is specified, a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:testmodel(T)} or {cmd:testmodel()} is specified, +{cmd:testmodel(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +The default is {cmd:testmodel()}. +{p_end} + +{p 4 8} {opt testmodelparfit(filename)} specifies a dataset which contains the evaluation grid and fitted values of the model(s) to be tested against. +The file must have a variable with the same name as {it:indvar}, which contains a series of evaluation points at which +the binscatter model and the parametric model of interest are compared with each other. +Each parametric model is represented by a variable named as {it:binsreg_fit*}, which must contain the fitted values at the corresponding evaluation points. +{p_end} + +{p 4 8} {opt testmodelpoly(p)} specifies the degree of a global polynomial model to be tested against. +{p_end} + +{dlgtab:Nonparametric Shape Restriction Testing} + +{p 4 8} {opt testshape(testshapeopt)} sets the degree of polynomial and the number of smoothness constraints +for nonparametric shape restriction testing. If {cmd:testshape(p s)} is specified, +a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints is used. +If {cmd:testshape(T)} or {cmd:testshape()} is specified, +{cmd:testshape(1 1)} is used unless the degree {it:p} and smoothness {it:s} selection +is requested via the option {cmd:pselect()} (see more details in the explanation of {cmd:pselect()}). +The default is {cmd:testshape()}. +{p_end} + +{p 4 8} {opt testshapel(numlist)} specifies a {help numlist} of null boundary values for hypothesis testing. +Each number {it:a} in the {it:numlist} corresponds to one boundary of a one-sided hypothesis test to the left of the form H0: {it:sup_x mu(x)<=a}. +{p_end} + +{p 4 8} {opt testshaper(numlist)} specifies a {help numlist} of null boundary values for hypothesis testing. +Each number {it:a} in the {it:numlist} corresponds to one boundary of a one-sided hypothesis test to the right of the form H0: {it:inf_x mu(x)>=a}. +{p_end} + +{p 4 8} {opt testshape2(numlist)} specifies a {help numlist} of null boundary values for hypothesis testing. +Each number {it:a} in the {it:numlist} corresponds to one boundary of a two-sided hypothesis test of the +form H0: {it:sup_x |mu(x)-a|=0}. +{p_end} + +{dlgtab:Metric for Hypothesis Testing} + +{p 4 8} {opt lp(metric)} specifies an Lp metric used for (two-sided) parametric model specification testing and/or shape restriction testing. +The default is {cmd:lp(inf)}, +which corresponds to the sup-norm. Other options are {cmd:lp(q)} for a positive integer {cmd:q}. +{p_end} + +{dlgtab:Binning/Degree/Smoothness Selection} + +{p 4 8} {opt bins(p s)} sets a piecewise polynomial of degree {it:p} with {it:s} smoothness constraints for +data-driven (IMSE-optimal) selection of the partitioning/binning scheme. +The default is {cmd:bins(0 0)}, which corresponds to the piecewise constant. + +{p 4 8} {opt nbins(nbinsopt)} sets the number of bins for partitioning/binning of {it:indvar}. +If {cmd:nbins(T)} or {cmd:nbins()} (default) is specified, the number of bins is selected via the companion command {help binsregselect:binsregselect} +in a data-driven, optimal way whenever possible. If a {help numlist:numlist} with more than one number is specified, +the number of bins is selected within this list via the companion command {help binsregselect:binsregselect}. +{p_end} + +{p 4 8} {opt binspos(position)} specifies the position of binning knots. +The default is {cmd:binspos(qs)}, which corresponds to quantile-spaced binning (canonical binscatter). +Other options are: {cmd:es} for evenly-spaced binning, or a {help numlist} for manual specification of the positions +of inner knots (which must be within the range of {it:indvar}). +{p_end} + +{p 4 8} {opt binsmethod(method)} specifies the method for data-driven selection of the number of bins via +the companion command {help binsregselect:binsregselect}. +The default is {cmd:binsmethod(dpi)}, which corresponds to the IMSE-optimal direct plug-in rule. +The other option is: {cmd:rot} for rule of thumb implementation. +{p_end} + +{p 4 8} {opt nbinsrot(#)} specifies an initial number of bins value used to construct the DPI number of bins selector. +If not specified, the data-driven ROT selector is used instead. +{p_end} + +{p 4 8} {opt randcut(#)} specifies the upper bound on a uniformly distributed variable used to draw a subsample +for bins/degree/smoothness selection. +Observations for which {cmd:runiform()<=#} are used. # must be between 0 and 1. +By default, max(5,000, 0.01n) observations are used if the samples size n>5,000. +{p_end} + +{p 4 8} {opt pselect(numlist)} specifies a list of numbers within which the degree of polynomial {it:p} for +point estimation is selected. If the selected optimal degree is {it:p}, then piecewise polynomials +of degree {it:p+1} are used to conduct testing +for nonparametric shape restrictions or parametric model specifications. +{p_end} + +{p 4 8} {opt sselect(numlist)} specifies a list of numbers within which the number of smoothness constraints {it:s} +for point estimation. If the selected optimal smoothness is {it:s}, +then piecewise polynomials with {it:s+1} smoothness constraints are used to conduct testing +for nonparametric shape restrictions or parametric model specifications. +If not specified, for each value {it:p} supplied in the +option {cmd:pselect()}, only the piecewise polynomial with the maximum smoothness is considered, i.e., {it:s=p}. +{p_end} + +{p 4 8} Note: To implement the degree or smoothness selection, in addition to {cmd:pselect()} +or {cmd:sselect()}, {cmd:nbins(#)} must be specified. +{p_end} + +{dlgtab:Simulation} + +{p 4 8} {opt nsims(#)} specifies the number of random draws for hypothesis testing. +The default is {cmd:nsims(500)}, which corresponds to 500 draws from a standard Gaussian random vector of size [(p+1)*J - (J-1)*s]. +A large number of random draws is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsgrid(#)} specifies the number of evaluation points of an evenly-spaced grid within each bin used +for evaluation of the supremum (infimum or Lp metric) operation needed for hypothesis testing procedures. +The default is {cmd:simsgrid(20)}, which corresponds to 20 evenly-spaced evaluation points within +each bin for approximating the supremum (infimum or Lp metric) operator. +A large number of evaluation points is recommended to obtain the final results. +{p_end} + +{p 4 8} {opt simsseed(#)} sets the seed for simulations. +{p_end} + +{dlgtab:Mass Points and Degrees of Freedom} + +{p 4 8} {opt dfcheck(n1 n2)} sets cutoff values for minimum effective sample size checks, which take into account the number of unique values of {it:indvar} +(i.e., adjusting for the number of mass points), number of clusters, and degrees of freedom of the different statistical models considered. +The default is {cmd:dfcheck(20 30)}. See Cattaneo, Crump, Farrell and Feng (2022b) for more details. +{p_end} + +{p 4 8} {opt masspoints(masspointsoption)} specifies how mass points in {it:indvar} are handled. +By default, all mass point and degrees of freedom checks are implemented. +Available options: +{p_end} +{p 8 8} {opt masspoints(noadjust)} omits mass point checks and the corresponding effective sample size adjustments.{p_end} +{p 8 8} {opt masspoints(nolocalcheck)} omits within-bin mass point and degrees of freedom checks.{p_end} +{p 8 8} {opt masspoints(off)} sets {opt masspoints(noadjust)} and {opt masspoints(nolocalcheck)} simultaneously.{p_end} +{p 8 8} {opt masspoints(veryfew)} forces the command to proceed as if {it:indvar} has only a few number of mass points (i.e., distinct values). +In other words, forces the command to proceed as if the mass point and degrees of freedom checks were failed.{p_end} + +{dlgtab:Other Options} + +{p 4 8} {cmd:vce(}{it:{help vcetype}}{cmd:)} specifies the {it:vcetype} for variance estimation used by the commands {help regress##options:regress}, +{help logit##options:logit}, {help probit##options:probit}, +{help qreg##qreg_options:qreg} or {cmd:reghdfe}. The default is {cmd:vce(robust)}. +{p_end} + +{p 4 8} {opt asyvar(on/off)} specifies the method used to compute standard errors. +If {cmd:asyvar(on)} is specified, the standard error of the nonparametric component is used and the +uncertainty related to other control variables {it:othercovs} is omitted. Default is {cmd:asyvar(off)}, +that is, the uncertainty related to {it:othercovs} is taken into account. +{p_end} + +{p 4 8} {opt estmethodopt(cmd_option)} options to be passed on to the estimation command specified in {cmd:estmethod()}. +For example, options that control for the optimization process can be added here. +{p_end} + +{p 4 8}{opt usegtools(on/off)} forces the use of several commands in the community-distributed Stata package {cmd:gtools} +to speed the computation up, if {it:on} is specified. +Default is {cmd:usegtools(off)}. +{p_end} + +{p 4 8} For more information about the package {cmd:gtools}, please see {browse "https://gtools.readthedocs.io/en/latest/index.html":https://gtools.readthedocs.io/en/latest/index.html}. +{p_end} + +{marker examples}{...} +{title:Examples} + +{p 4 8} Setup{p_end} +{p 8 8} . {stata sysuse auto} + +{p 4 8} Test for linearity{p_end} +{p 8 8} . {stata binstest mpg weight foreign, testmodelpoly(1)}{p_end} + +{p 4 8} Test for monotonicity{p_end} +{p 8 8} . {stata binstest mpg weight foreign, deriv(1) bins(1 1) testshapel(0)}{p_end} + + +{marker stored_results}{...} +{title:Stored results} + +{synoptset 17 tabbed}{...} +{p2col 5 17 21 2: Scalars}{p_end} +{synopt:{cmd:e(N)}}number of observations{p_end} +{synopt:{cmd:e(Ndist)}}number of distinct values{p_end} +{synopt:{cmd:e(Nclust)}}number of clusters{p_end} +{synopt:{cmd:e(nbins)}}number of bins{p_end} +{synopt:{cmd:e(p)}}degree of polynomial for bin selection{p_end} +{synopt:{cmd:e(s)}}smoothness of polynomial for bin selection{p_end} +{synopt:{cmd:e(testshape_p)}}degree of polynomial for testing shape restrictions{p_end} +{synopt:{cmd:e(testshape_s)}}smoothness of polynomial for testing shape restrictions{p_end} +{synopt:{cmd:e(testmodel_p)}}degree of polynomial for testing model specifications{p_end} +{synopt:{cmd:e(testmodel_s)}}smoothness of polynomial for testing model specifications{p_end} +{synopt:{cmd:e(testpolyp)}}degree of polynomial regression model{p_end} +{synopt:{cmd:e(stat_poly)}}statistic for testing global polynomial model{p_end} +{synopt:{cmd:e(pval_poly)}}p value for testing global polynomial model{p_end} +{synopt:{cmd:e(imse_var_rot)}}variance constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_bsq_rot)}}bias constant in IMSE, ROT selection{p_end} +{synopt:{cmd:e(imse_var_dpi)}}variance constant in IMSE, DPI selection{p_end} +{synopt:{cmd:e(imse_bsq_dpi)}}bias constant in IMSE, DPI selection{p_end} +{p2col 5 17 21 2: Macros}{p_end} +{synopt:{cmd:e(testvarlist)}}varlist found in {cmd:testmodel()}{p_end} +{synopt:{cmd:e(testvalue2)}}values in {cmd:testshape2()}{p_end} +{synopt:{cmd:e(testvalueR)}}values in {cmd:testshaper()}{p_end} +{synopt:{cmd:e(testvalueL)}}values in {cmd:testshapel()}{p_end} +{p2col 5 17 21 2: Matrices}{p_end} +{synopt:{cmd:e(pval_model)}}p values for {cmd:testmodel()}{p_end} +{synopt:{cmd:e(stat_model)}}statistics for {cmd:testmodel()}{p_end} +{synopt:{cmd:e(pval_shape2)}}p values for {cmd:testshape2()}{p_end} +{synopt:{cmd:e(stat_shape2)}}statistics for {cmd:testshape2()}{p_end} +{synopt:{cmd:e(pval_shapeR)}}p values for {cmd:testshaper()}{p_end} +{synopt:{cmd:e(stat_shapeR)}}statistics for {cmd:testshaper()}{p_end} +{synopt:{cmd:e(pval_shapeL)}}p values for {cmd:testshapel()}{p_end} +{synopt:{cmd:e(stat_shapeL)}}statistics for {cmd:testshapel()}{p_end} + +{marker references}{...} +{title:References} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022a. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Binscatter.pdf":On Binscatter}. +{it:arXiv:1902.09608}. +{p_end} + +{p 4 8} Cattaneo, M. D., R. K. Crump, M. H. Farrell, and Y. Feng. 2022b. +{browse "https://nppackages.github.io/references/Cattaneo-Crump-Farrell-Feng_2022_Stata.pdf":Binscatter Regressions}. +{it:arXiv:1902.09615}. +{p_end} + + +{marker authors}{...} +{title:Authors} + +{p 4 8} Matias D. Cattaneo, Princeton University, Princeton, NJ. +{browse "mailto:cattaneo@princeton.edu":cattaneo@princeton.edu}. +{p_end} + +{p 4 8} Richard K. Crump, Federal Reserve Band of New York, New York, NY. +{browse "mailto:richard.crump@ny.frb.org":richard.crump@ny.frb.org}. +{p_end} + +{p 4 8} Max H. Farrell, University of Chicago, Chicago, IL. +{browse "mailto:max.farrell@chicagobooth.edu":max.farrell@chicagobooth.edu}. +{p_end} + +{p 4 8} Yingjie Feng, Tsinghua University, Beijing, China. +{browse "mailto:fengyingjiepku@gmail.com":fengyingjiepku@gmail.com}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/backup.trk b/110/replication_package/replication/ado/plus/backup.trk new file mode 100644 index 0000000000000000000000000000000000000000..571bf0f90075b41209400cf69fdc171c912edb57 --- /dev/null +++ b/110/replication_package/replication/ado/plus/backup.trk @@ -0,0 +1,633 @@ +* 00000015 +*! version 1.0.0 +* Do not erase or edit this file +* It is used by Stata to track the ado and help +* files you have installed. + +S http://fmwww.bc.edu/repec/bocode/g +N geodist.pkg +D 5 Mar 2023 +U 1 +d 'GEODIST': module to compute geographical distances +d +d geodist calculates geographical distances by measuring the +d length of the shortest path between two points along the surface +d of a mathematical model of the earth. By default, geodist +d implements Vincenty's (1975) formula to calculate distances on a +d reference ellipsoid. geodist can also calculate great-circle +d distances using the haversine formula. +d +d KW: geodetic +d KW: geodesic,Vincenty,great-circle,ellipsoid,distance +d +d Requires: Stata version 9.2 +d +d Distribution-Date: 20190624 +d +d Author: Robert Picard +d Support: email robertpicard@@gmail.com +d +f g\geodist.ado +f g\geodist_run.ado +f g\geodist.hlp +e +S http://fmwww.bc.edu/repec/bocode/b +N binscatter.pkg +D 5 Mar 2023 +U 2 +d 'BINSCATTER': module to generate binned scatterplots +d +d binscatter generates binned scatterplots, and is optimized for +d speed in large datasets. Binned scatterplots provide a +d non-parametric way of visualizing the relationship between two +d variables. With a large number of observations, a scatterplot +d that plots every data point would become too crowded to interpret +d visually. binscatter groups the x-axis variable into equal-sized +d bins, computes the mean of the x-axis and y-axis variables within +d each bin, then creates a scatterplot of these data points. It +d provides built-in options to control for covariates before +d plotting the relationship. It will also plot fit lines based on +d the underlying data, and can automatically handle regression +d discontinuities. +d +d KW: scatterplot +d KW: data description +d KW: regression discontinuity +d +d Requires: Stata version 12.1 +d +d Distribution-Date: 20131124 +d +d Author: Michael Stepner +d Support: email michaelstepner@@gmail.com +d +f b\binscatter.ado +f b\binscatter.sthlp +e +S http://fmwww.bc.edu/repec/bocode/i +N ivreg2.pkg +D 5 Mar 2023 +U 3 +d 'IVREG2': module for extended instrumental variables/2SLS and GMM estimation +d +d ivreg2 provides extensions to Stata's official ivregress and +d newey. Its main capabilities: two-step feasible GMM estimation; +d continuously updated GMM estimation (CUE); LIML and k-class +d estimation; automatic output of the Hansen-Sargan or +d Anderson-Rubin statistic for overidentifying restrictions; C +d statistic test of exogeneity of subsets of instruments (orthog() +d option); kernel-based autocorrelation-consistent (AC) and +d heteroskedastic and autocorrelation-consistent (HAC) estimation, +d with user-specified choice of kernel; Cragg's "heteroskedastic +d OLS" (HOLS) estimator; default reporting of large-sample +d statistics (z and chi-squared rather than t and F); small option +d to report small-sample statistics; first-stage regression +d reported with F-test of excluded instruments and R-squared with +d included instruments "partialled-out"; enhanced Kleibergen-Paap +d and Cragg-Donald tests for weak instruments, redundancy of +d instruments, significance of endogenous regressors; two-way +d clustering of standard errors; Kiefer and Driscoll-Kraay +d standard errors. ivreg2 can also be used for ordinary least +d squares (OLS) estimation using the same command syntax as Stata's +d official regress and newey. New in this version: ivreg2 now +d supports factor variables. This is version 4.1.11 of ivreg2, +d updated from that published in Stata Journal, 5(4), requiring +d Stata 11.2 or better. Stata 8.2/9.2/10.2 users may use this +d routine, which will automatically call ivreg28, ivreg29, or +d ivreg210, respectively. These versions are now included in the +d ivreg2 package. Stata 7 users may use the Stata Journal version +d of ivreg2, accessible via net search ivreg2. +d +d KW: instrumental variables +d KW: Sargan test +d KW: robust estimation +d KW: orthogonality +d KW: GMM +d KW: Hansen's J +d KW: heteroskedastic OLS, +d +d Requires: Stata version 11.2 and ranktest from SSC +d +d Distribution-Date: 20220510 +d +d Author: Christopher F Baum, Boston College +d Support: email baum@@bc.edu +d +d Author: Mark E Schaffer, Heriot-Watt University +d Support: email m.e.schaffer@@hw.ac.uk +d +d Author: Steven Stillman, Free University of Bozen-Bolzano +d Support: email Steven.Stillman@@unibz.it +d +f i\ivreg2.ado +f i\ivreg2.sthlp +f i\ivreg2_p.ado +f l\livreg2.mlib +f i\ivreg210.ado +f i\ivreg210.sthlp +f i\ivreg210_p.ado +f i\ivreg29.ado +f i\ivreg29.hlp +f i\ivreg29_p.ado +f i\ivreg29_cue.ado +f i\ivreg28.ado +f i\ivreg28.hlp +f i\ivreg28_p.ado +f i\ivreg28_cue.ado +e +S http://fmwww.bc.edu/repec/bocode/r +N ranktest.pkg +D 5 Mar 2023 +U 4 +d 'RANKTEST': module to test the rank of a matrix +d +d ranktest implements various tests for the rank of a matrix. +d Tests of the rank of a matrix have many practical applications. +d For example, in econometrics the requirement for identification +d is the rank condition, which states that a particular matrix must +d be of full column rank. Another example from econometrics +d concerns cointegration in vector autoregressive (VAR) models; the +d Johansen trace test is a test of a rank of a particular matrix. +d The traditional test of the rank of a matrix for the standard +d (stationary) case is the Anderson (1951) canonical correlations +d test. If we denote one list of variables as Y and a second as Z, +d and we calculate the squared canonical correlations between Y and +d Z, the LM form of the Anderson test, where the null hypothesis is +d that the matrix of correlations or regression parameters B +d between Y and Z has rank(B)=r, is N times the sum of the r+1 +d largest squared canonical correlations. A large test statistic +d and rejection of the null indicates that the matrix has rank at +d least r+1. The Cragg-Donald (1993) statistic is a closely related +d Wald test for the rank of a matrix. The standard versions of +d these tests require the assumption that the covariance matrix has +d a Kronecker form; when this is not so, e.g., when disturbances +d are heteroskedastic or autocorrelated, the test statistics are no +d longer valid. ranktest implements various generalizations of +d these tests - Kleibergen-Paap, Cragg-Donald, and J-type 2-step +d GMM and CUE GMM tests - to the case of a non-Kronecker covariance +d matrix. The implementation in ranktest will calculate test +d statistics that are robust to various forms of +d heteroskedasticity, autocorrelation, and clustering. +d +d KW: matrix +d KW: rank +d KW: collinearity +d KW: cointegration +d +d Requires: Stata version 12 (version 9.2 for ranktest9, version 11 for ranktest11) +d +d Distribution-Date: 20200929 +d +d Author: Frank Kleibergen, Brown University +d Support: email Frank_Kleibergen@@brown.edu +d +d Author: Mark E Schaffer, Heriot-Watt University +d Support: email m.e.schaffer@@hw.ac.uk +d +d Author: Frank Windmeijer, University of Oxford +d Support: email frank.windmeijer@@stats.ox.ac.uk +d +f r\ranktest.ado +f r\ranktest9.ado +f r\ranktest11.ado +f r\ranktest.sthlp +f r\ranktest11.sthlp +f l\livreg2.mlib +e +S http://fmwww.bc.edu/repec/bocode/e +N estout.pkg +D 5 Mar 2023 +U 5 +d 'ESTOUT': module to make regression tables +d +d estout produces a table of regression results from one or +d several models for use with spreadsheets, LaTeX, HTML, or a +d word-processor table. eststo stores a quick copy of the active +d estimation results for later tabulation. esttab is a wrapper for +d estout. It displays a pretty looking publication-style regression +d table without much typing. estadd adds additional results to the +d e()-returns for one or several models previously fitted and +d stored. This package subsumes the previously circulated esto, +d esta, estadd, and estadd_plus. An earlier version of estout is +d available as estout1. +d +d KW: estimates +d KW: LaTeX +d KW: HTML +d KW: word processor +d KW: output +d +d Requires: Stata version 8.2 +d +d Distribution-Date: 20230212 +d +d Author: Ben Jann, University of Bern +d Support: email jann@@soz.unibe.ch +d +f _\_eststo.ado +f _\_eststo.hlp +f e\estadd.ado +f e\estadd.hlp +f e\estout.ado +f e\estout.hlp +f e\eststo.ado +f e\eststo.hlp +f e\estpost.ado +f e\estpost.hlp +f e\esttab.ado +f e\esttab.hlp +e +S http://fmwww.bc.edu/repec/bocode/e +N erepost.pkg +D 5 Mar 2023 +U 6 +d 'EREPOST': module to repost the estimation results +d +d erepost changes the b or V matrix of the current estimation +d results or changes the declared estimation sample. erepost is +d similar to official ereturn repost. However, erepost is allowed +d after estimation commands that do not post their results using +d -ereturn post- (e.g. logit) and erepost can be used outside of +d eclass programs. +d +d KW: estimation +d KW: results +d +d Requires: Stata version 8.2 +d +d Distribution-Date: 20150617 +d +d Author: Ben Jann, University of Bern +d Support: email jann@@soz.unibe.ch +d +f e\erepost.ado +f e\erepost.hlp +e +S http://fmwww.bc.edu/repec/bocode/c +N coefplot.pkg +D 5 Mar 2023 +U 7 +d 'COEFPLOT': module to plot regression coefficients and other results +d +d coefplot plots results from estimation commands or Stata +d matrices. Results from multiple models or matrices can be +d combined in a single graph. The default behavior of coefplot is +d to draw markers for coefficients and horizontal spikes for +d confidence intervals. However, coefplot can also produce various +d other types of graphs. +d +d KW: graphics +d KW: coefficients +d KW: estimation +d +d Requires: Stata version 11 +d +d Distribution-Date: 20230225 +d +d Author: Ben Jann, University of Bern +d Support: email jann@@soz.unibe.ch +d +f c\coefplot.ado +f c\coefplot.sthlp +e +S http://fmwww.bc.edu/repec/bocode/t +N tmpdir.pkg +D 5 Mar 2023 +U 8 +d 'TMPDIR': module to indicate the directory Stata is using for a temporary directory +d +d tmpdir is designed for programmers who want to know what +d directory Stata writes temp files to. This can be helpful when +d using the file command to write intermediary files for a +d program. Since spaces in directory names can cause problems for +d programs running in Windows operating system, tmpdir replaces +d directory / subdirectory names that contain spaces, or optionally +d any name longer than 8 characters, with the first 6 non-space +d characters plus "~1" or "~2" (up to "~4"). After that it gets a +d bit crazy with hexadecimal replacements. Tmpdir shells out to +d DOS and finds the short directory name that DOS has come up +d with, so there's no attempt to guess the short name. This makes +d it possible to use this program on many different versions of +d Windows and always get the short name right. Tmpdir works on +d all operating systems. +d +d KW: tempdir +d KW: directory +d KW: temporary +d +d Requires: Stata version 8.0 +d +d +d Author: Dan Blanchette, The Carolina Population Center, UNC-CH +d Support: email dan_blanchette@@unc.edu +d +d Distribution-Date: 20110226 +d +f t\tmpdir.ado +f t\tmpdir.hlp +f s\shortdir.ado +f s\shortdir.hlp +f c\confirmdir.ado +f c\confirmdir.hlp +e +S http://fmwww.bc.edu/repec/bocode/r +N reg2hdfe.pkg +D 5 Mar 2023 +U 9 +d 'REG2HDFE': module to estimate a Linear Regression Model with two High Dimensional Fixed Effects +d +d This command implements the algorithm of Guimaraes & Portugal +d for estimation of a linear regression model with two high +d dimensional fixed effects. The command is particularly suited for +d use with large data sets because you can "store" the transformed +d variables and reuse them in alternative specifications. The +d command is based on the algorithm presented in Carneiro, +d Guimaraes and Portugal (2009) and explained in more detail in +d Guimaraes and Portugal (2009). +d +d KW: regression +d KW: fixed effects +d KW: two-way fixed effects +d KW: Guimaraes & Portugal +d +d Requires: Stata version 9.1 +d +d Distribution-Date: 20150328 +d +d Author: Paulo Guimaraes, Division of Research, University of South Carolina +d Support: email guimaraes@@moore.sc.edu +d +f r\reg2hdfe.ado +f r\reg2hdfe.hlp +e +S https://raw.githubusercontent.com/rdpackages/rdrobust/master/stata +N rdrobust.pkg +D 5 Mar 2023 +U 10 +d STATA Package: RDROBUST +d +d Authors: Sebastian Calonico, Department of Health Policy and Management, Columbia University, sebastian.calonico@columbia.edu +d Matias D. Cattaneo, Operations Research and Financial Engineering, Princeton University, cattaneo@princeton.edu +d Max H. Farrell, Booth School of Business, University of Chicago, max.farrell@chicagobooth.edu +d Rocio Titiunik, Department of Politics, Princeton University, titiunik@princeton.edu +d +d Distribution-Date: 20221028 +d +d ----------------------- +d BACK COMPATIBILITY: +d ----------------------- +f r\rdbwselect.ado +f r\rdplot.ado +f r\rdplot.sthlp +f r\rdbwselect.sthlp +f r\rdrobust.ado +f r\rdrobust.sthlp +f r\rdrobust_bw.mo +f r\rdrobust_kweight.mo +f r\rdrobust_res.mo +f r\rdrobust_vce.mo +f r\rdrobust_collapse.mo +f r\rdrobust_median.mo +f r\rdrobust_groupid.mo +f r\rdbwselect_2014.ado +f r\rdbwselect_2014.sthlp +f r\rdbwselect_2014_kconst.ado +e +S https://raw.githubusercontent.com/nppackages/binsreg/master/stata +N binsreg.pkg +D 5 Mar 2023 +U 11 +d STATA Package: BINSREG PACKAGE +d +d Authors: Matias D. Cattaneo, Princeton University, cattaneo@princeton.edu +d Richard K. Crump, Federal Reserve Band of New York, richard.crump@ny.frb.org +d Max H. Farrell, University of Chicago, max.farrell@chicagobooth.edu +d Yingjie Feng, Tsinghua University, fengyingjiepku@gmail.com +d +d Date: 09-OCT-2022 +d Distribution-Date: 20221009 +d +f b\binsreg.ado +f b\binsreg.sthlp +f b\binsqreg.ado +f b\binsqreg.sthlp +f b\binslogit.ado +f b\binslogit.sthlp +f b\binsprobit.ado +f b\binsprobit.sthlp +f b\binsregselect.ado +f b\binsregselect.sthlp +f b\binstest.ado +f b\binstest.sthlp +f b\binspwc.ado +f b\binspwc.sthlp +f b\binsreg_irecode.ado +f b\binsreg_pctile.ado +f b\binsreg_checkdrop.mo +f b\binsreg_grids.mo +f b\binsreg_pred.mo +f b\binsreg_pval.mo +f b\binsreg_spdes.mo +f b\binsreg_st_spdes.mo +f b\binsreg_uniq.mo +f b\binsreg_cquantile.mo +f b\binsreg_stat.mo +e +S https://raw.githubusercontent.com/sergiocorreia/ftools/master/src +N ftools.pkg +D 5 Mar 2023 +U 12 +d ftools. Alternatives to common Stata commands optimized for large datasets +d +d Author: Sergio Correia. Board of Governors of the Federal Reserve +d Support: sergio.correia@gmail.com +d +d ftools consists of a Mata file and several Stata commands: +d +d The Mata file creates identifiers (factors) from variables by using +d hash functions instead of sorting the data, so it runs in time O(N) +d and not in O(N log N). +d +d The Stata commands exploit this to avoid -sort- operations, +d at the cost of being slower for small datasets (mainly because of the +d cost involved in moving data from Stata to Mata). +d +d Implemented commands are fcollapse, fegen group, and fsort. +d Note that most of the capabilities of -levels- and -contract- +d are already supported by these commands. +d +d Possible commands include more -egen- functions and -merge- and +d -reshape- alternatives. +d +d KW: factor variables +d KW: levels +d KW: mata +d KW: collapse +d KW: contract +d KW: egen +d KW: merge +d KW: levelsof +d KW: sort +d KW: inlist +d +d Requires: Stata version 11.2 +d (Stata 12 or older also require the boottest package from ssc) +d +d Distribution-Date: 20220506 +d +f f\ftools.ado +f f\ftools.sthlp +f f\fcollapse.ado +f f\fcollapse.sthlp +f f\fsort.ado +f f\fsort.sthlp +f f\fisid.ado +f f\fisid.sthlp +f f\fegen.ado +f f\fegen.sthlp +f f\fegen_group.ado +f j\join.ado +f j\join.sthlp +f f\fmerge.ado +f f\fmerge.sthlp +f f\flevelsof.ado +f f\flevelsof.sthlp +f l\local_inlist.ado +f l\local_inlist.sthlp +f f\ftools.mata +f f\ftools_type_aliases.mata +f f\ftools_common.mata +f f\ftools_main.mata +f f\ftools_hash1.mata +f f\ftools_plugin.mata +f f\fcollapse_main.mata +f f\fcollapse_functions.mata +f m\ms_compile_mata.ado +f m\ms_get_version.ado +f m\ms_fvunab.ado +f m\ms_parse_absvars.ado +f m\ms_parse_varlist.ado +f m\ms_parse_vce.ado +f m\ms_expand_varlist.ado +f m\ms_add_comma.ado +f m\ms_fvstrip.ado +f m\ms_fvstrip.sthlp +f p\parallel_map.ado +f p\parallel_map.sthlp +f p\parallel_map_template.do.ado +f f\ftab.ado +e +S https://raw.githubusercontent.com/sergiocorreia/reghdfe/master/src +N reghdfe.pkg +D 5 Mar 2023 +U 13 +d REGHDFE: Linear models with multi-way fixed effects and multi-way clustering +d +d Authors: +d - Sergio Correia. Board of Governors of the Federal Reserve System +d - Noah Constantine. Board of Governors of the Federal Reserve System +d Support: +d - https://github.com/sergiocorreia/reghdfe/issues +d +d reghdfe fits a linear or instrumental-variable/GMM regression absorbing an arbitrary number of categorical factors and factorial interactions +d Optionally, it saves the estimated fixed effects. +d +d The estimator employed is described in Correia (2017): +d http://scorreia.com/research/hdfe.pdf +d +d For details (user guide, help, FAQ), see the website: +d http://scorreia.com/reghdfe/ +d +d KW: fixed effects +d KW: panel data +d KW: hdfe +d KW: areg +d KW: xtreg +d KW: MWFE +d KW: MWC +d KW: cluster +d +d Requires: Stata version 11.2 +d +d Required packages: +d ftools +d +d Distribution-Date: 20210216 +d +f r\reghdfe.ado +f r\reghdfe_estat.ado +f r\reghdfe_header.ado +f r\reghdfe_footnote.ado +f r\reghdfe_p.ado +f r\reghdfe.mata +f r\reghdfe.sthlp +f r\reghdfe_programming.sthlp +f e\estfe.ado +f r\reghdfe3.ado +f r\reghdfe3.sthlp +f r\reghdfe3_estat.ado +f r\reghdfe3_footnote.ado +f r\reghdfe3_p.ado +f r\reghdfe5.ado +f r\reghdfe5.mata +f r\reghdfe5.sthlp +f r\reghdfe5_estat.ado +f r\reghdfe5_footnote.ado +f r\reghdfe5_header.ado +f r\reghdfe5_p.ado +f r\reghdfe5_parse.ado +e +S https://raw.githubusercontent.com/sergiocorreia/ivreghdfe/master/src +N ivreghdfe.pkg +D 5 Mar 2023 +U 14 +d Instrumental Variables with High Dimensional Fixed Effects (ivreg2 with an absorb() option) +d +d KW: fixed effects +d KW: ivreg2 +d KW: reghdfe +d +d Requires: Stata version 11.2 +d +d Required packages: +d ftools +d reghdfe +d ivreg2 +d boottest (for Stata version 12 or earlier) +d +d Distribution-Date: 20211214 +f i\ivreghdfe.ado +f i\ivreghdfe.sthlp +e +S https://raw.githubusercontent.com/nppackages/lpdensity/master/stata +N lpdensity.pkg +D 5 Mar 2023 +U 15 +d STATA Package: LPDENSITY +d +d Authors: Matias D. Cattaneo, Department of Operations Research and Financial Engineering, Princeton University, cattaneo@princeton.edu +d Michael Jansson, Department of Economics, UC-Berkeley, mjansson@econ.berkeley.edu +d Xinwei Ma, Department of Economics, UCSD, x1ma@ucsd.edu +d +d Distribution-Date: 20230121 +d +f l\lpbwdensity.ado +f l\lpbwdensity.sthlp +f l\lpdensity.ado +f l\lpdensity.sthlp +f l\lpdensity_bwimse.mo +f l\lpdensity_bwirot.mo +f l\lpdensity_bwmse.mo +f l\lpdensity_bwrot.mo +f l\lpdensity_cgen.mo +f l\lpdensity_fn.mo +f l\lpdensity_ggen.mo +f l\lpdensity_normdgp.mo +f l\lpdensity_optfunc.mo +f l\lpdensity_quantile.mo +f l\lpdensity_rep.mo +f l\lpdensity_sgen.mo +f l\lpdensity_tgen.mo +f l\lpdensity_unique.mo +f l\lpdensity_whichmin.mo +e diff --git a/110/replication_package/replication/ado/plus/c/coefplot.ado b/110/replication_package/replication/ado/plus/c/coefplot.ado new file mode 100644 index 0000000000000000000000000000000000000000..90c6fcd537418c276621ad03c76c100fece49e7f --- /dev/null +++ b/110/replication_package/replication/ado/plus/c/coefplot.ado @@ -0,0 +1,3903 @@ +*! version 1.8.6 22feb2023 Ben Jann + +program coefplot + version 11 + nobreak { + capt mata: mata drop COEFPLOT_STRUCT + capt n break _coefplot `macval(0)' + local rc = _rc + capt mata: mata drop COEFPLOT_STRUCT + exit `rc' + } +end + +program _coefplot, rclass + // get subgraphs and parse global options + parse_subgraphs `macval(0)' // returns n_subgr, subgr_#, opts + parse_globalopts `macval(opts)' // returns expanded global opts and twopts, + // subgropts0, plotopts0, modelopts0, twplotopts0 + + // backup current estimates, initialize struct + tempname ecurrent + _est hold `ecurrent', restore estsystem nullok + mata: COEFPLOT_STRUCT = coefplot_struct_init() + + // dryrun across subgraphs and plots to collect options + local i 0 + local N_plots 0 + forv j = 1/`n_subgr' { + if "`recycle'"=="" local i 0 + local firstplot_`j' = `i'+1 + parse_plots `j' `macval(subgr_`j')' // returns n_plots_j, plot_j_#, opts + if ("`recycle'"=="" & `n_subgr'>1) { + combine_plotopts, `macval(opts)' _opts0(`macval(plotopts1prev)') + local plotopts1prev `macval(plotopts)' + local opts `macval(plotopts)' `macval(options)' + } + merge_subgropts, `macval(opts)' _opts0(`subgropts0' `plotopts0' `macval(modelopts0)') + // returns subgropts, plotopts1, modelopts1, twplotopts1 + parse_subgropts `j', `subgropts' + forv k = 1/`n_plots_`j'' { + local ++i + local twplotopts1_`i' `twplotopts1_`i'' `twplotopts1' + parse_models `j' `k' `macval(plot_`j'_`k')' + // returns n_models_j_k, model_j_k_#, opts + if `n_models_`j'_`k''==1 & `"`model_`j'_`k'_1'"'=="_skip" { + if `"`opts'"'!="" { + di as err "options not allowed with _skip" + exit 198 + } + continue + } + if (`i'>`N_plots') { // get p#() from twoplotopts0 + parse_get_popt_i `i', `macval(twplotopts0)' // returns twplotopts0, plotopts + if `"`macval(plotopts)'"'!="" { + merge_plotopts, `macval(plotopts)' // to isolate the modelopts + local popt_mopts_`i' `macval(modelopts2)' + merge_plotopts, `macval(opts)' /// + _opts0(`macval(modelopts2)' `plotopts' `_opts0' `options') + // returns plotopts, modelopts2, options, _opts0 + local opts `macval(modelopts2)' `plotopts' `_opts0' `options' + } + } + else { + if `"`macval(popt_mopts_`i')'"'!="" { // add modelopts from p#() + merge_plotopts, `macval(opts)' _opts0(`macval(popt_mopts_`i')') + // returns plotopts, modelopts2, options, _opts0 + local opts `macval(modelopts2)' `plotopts' `_opts0' `options' + } + } + if ("`recycle'"=="" & `n_subgr'>1) { + combine_plotopts, `macval(opts)' _opts0(`macval(plotopts2_`i')') + local plotopts2_`i' `macval(plotopts)' + local opts `macval(plotopts)' `macval(options)' + } + merge_plotopts, `macval(opts)' _opts0(`macval(plotopts1)' `macval(modelopts1)') + // returns plotopts, modelopts2, options, _opts0 + if `"`_opts0'"'!="" error 198 + local modelopts_`j'_`k' `macval(modelopts2)' + local twplotopts_`i' `twplotopts_`i'' `options' + local plotopts_`i' `plotopts' + } + local lastplot_`j' `i' + local N_plots = max(`N_plots', `i') + } + // expand plotopts + local customoffset 0 + forv i = 1/`N_plots' { + parse_plotopts `i', `plotopts_`i'' + if `"`offset_`i''"'!="" local customoffset 1 + } + // parse cismooth + forv i = 1/`N_plots' { + local cis_`i' = `"`cismooth_`i''"'!="" + if `cis_`i'' { + if `"`cismooth_`i''"'=="cismooth" local cismooth_`i' + parse_cismooth `i', `cismooth_`i'' // returns cis_levels_i, + // cis_n_i, cis_lwidth_i, cis_intens_i, cis_color_i, cis_pstyle_i + } + else local cis_n_`i' = 0 + } + + // parse models and collect results + local i 0 + forv j = 1/`n_subgr' { + if "`recycle'"=="" local i 0 + forv k = 1/`n_plots_`j'' { + local ++i + if `n_models_`j'_`k''==1 & `"`macval(model_`j'_`k'_1)'"'=="_skip" { + continue + } + forv l = 1/`n_models_`j'_`k'' { + parse_model `macval(model_`j'_`k'_`l')' // returns model, matrix, opts + if `"`matrix'"'=="" { + if `"`model'"'=="." { + _est unhold `ecurrent' + } + else { + qui est restore `model' + } + } + collect_coefs `"`model'"' /// + "`matrix'" /// matrix mode? + "`atmode'" /// whether at() is used; will be replaced + `i' /// plot number + `j' /// subgraph number + "`cis_levels_`i''" /// + , `macval(opts)' _opts0(`macval(modelopts_`j'_`k')') + // returns equation, atmode, n_ci + // may reset mlabel_# and mlabvposition_# + local n_ci = `n_ci' - `cis_n_`i'' + if `"`matrix'"'=="" & `"`model'"'=="." { + _est hold `ecurrent', restore estsystem nullok + } + if "`n_ci_`i''"=="" local n_ci_`i' 0 + local n_ci_`i' = max(`n_ci_`i'', `n_ci') + mata: coefplot_add_label(COEFPLOT_STRUCT, "by", `j', "model", 0) + if `"`equation'"'!="" { + if `"`model'"'=="." local model `"`equation'"' + else local model `"`model'=`equation'"' + } + mata: coefplot_add_label(COEFPLOT_STRUCT, "plot", `i', "model", 0) + } + } + } + forv i = 1/`N_plots' { // expand ciopts + if `n_ci_`i''>0 { + parse_ciopts_nocilwincr `i', `ciopts_`i'' `cirecast_`i'' // returns nocilwincr_# + parse_ciopts `i' `n_ci_`i'' `ciopts_`i'' `cirecast_`i'' + } + } + mata: coefplot_set_r(COEFPLOT_STRUCT) // returns r, N_ci, N_aux, mlbllen + local mlbllen = max(1, min(c(maxstrvarlen), `mlbllen')) + if `r'==0 { + di as txt "(nothing to plot)" + exit + } + + // cleanup and and arrange + if `"`horizontal'`vertical'"'=="" { + if `atmode' local vertical vertical + else local horizontal horizontal + } + if `"`horizontal'"'!="" { + local xaxis y + local yaxis x + local offdir "-" + local reverse yscale(reverse) + local plotregion plotregion(margin(t=0 b=0)) + } + else { // vertical + local xaxis x + local yaxis y + local offdir "+" + local plotregion plotregion(margin(l=0 r=0)) + } + if `atmode' { + if "`bycoefs'"!="" { + di as err "at() and bycoefs not both allowed" + exit 198 + } + local grid `"`grid'`gridopts'"' + foreach opt in order coeflabels eqlabels relocate headings /// + groups grid { + if `"``opt''"'!="" { + di as err "at() and `opt'() not both allowed" + exit 198 + } + } + mata: coefplot_add_eq_and_grp(COEFPLOT_STRUCT) + local reverse + local plotregion + local meqs 0 + } + else { + if `"`eqstrict'"'!="" local meqs 1 + else { + mata: coefplot_multiple_eqs(COEFPLOT_STRUCT) // returns meqs + } + mata: coefplot_arrange(COEFPLOT_STRUCT) // updates local r + mata: coefplot_coeflbls(COEFPLOT_STRUCT) + coeflbls "`labels'" `"`clinteract'"' + if "`bycoefs'"!="" { + mata: coefplot_bycoefs(COEFPLOT_STRUCT) // returns n_subgr + local meqs 0 + } + mata: coefplot_catvals(COEFPLOT_STRUCT) + // modifies C.at; sets C.eq, C.grp; returns groups + mata: coefplot_headings(COEFPLOT_STRUCT) + // modifies C.at; returns hlbls + } + + // save results to variables + if `"`generate'"'=="" { + if (_N > `r') & "`nodrop'"=="" { + preserve + qui keep in 1/`r' // remove extra observations to speed up + } + else if (_N < `r') { + preserve + qui set obs `r' + } + } + else { + if (_N < `r') { + di as txt "need to create additional observations; " _c + di as txt "press break to abort" + more + set obs `r' + } + } + tempname by plot at mlbl mlpos b V se t df pval eq grp + qui gen `by' = . + qui gen `plot' = . + qui gen `at' = . + qui gen str`mlbllen' `mlbl' = "" + qui gen `mlpos' = . + qui gen `b' = . + qui gen `V' = . + qui gen `se' = . + qui gen `t' = . + qui gen `df' = . + qui gen `pval' = . + if `"`format'"'!="" { + format `format' `b' `V' `se' `t' `pval' + } + qui gen `eq' = . + qui gen `grp' = . + forv i = 1/`N_ci' { + tempname ll`i' ul`i' + qui gen `ll`i'' = . + qui gen `ul`i'' = . + if `"`format'"'!="" { + format `format' `ll`i'' `ul`i'' + } + } + forv i = 1/`N_aux' { + tempname aux`i' + qui gen `aux`i'' = . + if `"`format'"'!="" { + format `format' `aux`i'' + } + } + if `"`generate'"'!="" { + preserve + local returnvars + local i 0 + foreach v in by plot at mlbl mlpos b V se t df pval { + local ++i + local varl: word `i' of /// + "subgraph ID" /// + "plot ID" /// + "plot position (category axis)" /// + "marker label" /// + "marker label position" /// + "coefficient" /// + "variance" /// + "standard error" /// + "t or z statistic" /// + "degrees of freedom" /// + "p-value" + if "`replace'"!="" { + capt confirm new variable `generate'`v', exact + if _rc { + drop `generate'`v' + } + } + rename ``v'' `generate'`v' + lab var `generate'`v' `"`varl'"' + local `v' `generate'`v' + local returnvars `returnvars' `generate'`v' + } + forv i = 1/`N_ci' { + foreach v in ll ul { + if "`v'"=="ll" local varl "CI`i': lower limit" + else local varl "CI`i': upper limit" + if "`replace'"!="" { + capt confirm new variable `generate'`v'`i', exact + if _rc { + drop `generate'`v'`i' + } + } + rename ``v'`i'' `generate'`v'`i' + lab var `generate'`v'`i' `"`varl'"' + local `v'`i' `generate'`v'`i' + local returnvars `returnvars' `generate'`v'`i' + } + } + forv i = 1/`N_aux' { + local varl "Auxiliary variable `i'" + if "`replace'"!="" { + capt confirm new variable `generate'aux`i', exact + if _rc { + drop `generate'aux`i' + } + } + rename `aux`i'' `generate'aux`i' + lab var `generate'aux`i' `"`varl'"' + local aux`i' `generate'aux`i' + local returnvars `returnvars' `generate'aux`i' + } + } + mata: coefplot_put(COEFPLOT_STRUCT) + mata: coefplot_apply_transform(COEFPLOT_STRUCT) + qui compress `at' `df' `plot' `by' `eq' `grp' `mlpos' // not really needed + // get labels + set_by_and_plot_labels `plot' `by' + if `"`plotlabels'"'!="" { + set_labels "`plot'" "`N_plots'" `"`plotlabels'"' + } + if `"`pltrunc'`plwrap'"'!="" { + truncwrap_vlabels "`plot'" "`N_plots'" "`pltrunc'" /// + "`plwrap'" "`plbreak'" + } + if "`bycoefs'"=="" { + if `"`bylabels'"'!="" { + set_labels "`by'" "`n_subgr'" `"`bylabels'"' + } + if `"`bltrunc'`blwrap'"'!="" { + truncwrap_vlabels "`by'" "`n_subgr'" "`bltrunc'" /// + "`blwrap'" "`blbreak'" + } + } + if `atmode'==0 { + if "`grid'"=="" & "`xaxis'"=="y" { + if `N_plots'>1 & `"`offsets'"'=="" local grid between + else local grid within + } + get_axis_labels `at' `eq' `grp' "`grid'" `"`groups'"' + // => returns xlabels, xgrid, xrange, eqlabels, groups + if `meqs'==0 | "`noeqlabels'"!="" local eqlabels + if `"`cltrunc'`clwrap'"'!="" { + if "`bycoefs'"=="" { + truncwrap_labels xlabels "`cltrunc'" "`clwrap'" /// + "`clbreak'" `"`xlabels'"' + } + else { + truncwrap_vlabels "`by'" "`n_subgr'" "`cltrunc'" /// + "`clwrap'" "`clbreak'" + } + } + if "`bycoefs'"!="" { + if `"`bylabels'"'!="" { + reset_xlabels `"`bylabels'"' `"`xlabels'"' + } + if `"`bltrunc'`blwrap'"'!="" { + truncwrap_labels xlabels "`bltrunc'" "`blwrap'" /// + "`blbreak'" `"`xlabels'"' + } + } + if `"`clangle'"'=="" local clangle angle(horizontal) + local xlabel `xaxis'label(`xlabels', nogrid `clangle' `clopts') + local xrange `xaxis'scale(range(`xrange')) + if !inlist("`grid'", "", "none") { + local xtick `xaxis'tick(`xgrid', notick tlstyle(none) grid `gridopts') + // note: tlstyle(none) is required to prevent by() from drawing + // the ticks + } + else local xtick + if "`eqashead'"!="" { + merge_eqlabels_hlbls `"`eqlabels'"' `"`hlbls'"' + // => returns hlbls and clears eqlabels + } + if `"`eqtrunc'`eqwrap'"'!="" { + if `"`eqlabels'"'!="" { + truncwrap_labels eqlabels "`eqtrunc'" "`eqwrap'" /// + "`eqbreak'" `"`eqlabels'"' + } + } + if `"`gtrunc'`gwrap'"'!="" { + if `"`groups'"'!="" { + truncwrap_labels groups "`gtrunc'" "`gwrap'" /// + "`gbreak'" `"`groups'"' + } + } + if `"`htrunc'`hwrap'"'!="" { + if `"`hlbls'"'!="" { + truncwrap_labels hlbls "`htrunc'" "`hwrap'" /// + "`hbreak'" `"`hlbls'"' + } + } + } + + // compute offsets + if `customoffset' { + forv i = 1/`N_plots' { + if "`offset_`i''"!="" { + qui replace `at' = `at' `offdir' `offset_`i'' if `plot'==`i' + } + } + } + else if `atmode'==0 & `"`offsets'"'=="" & `N_plots'>1 { + capt mata: coefplot_at_unique(COEFPLOT_STRUCT) // error if not true + if _rc==1 exit _rc + if _rc { + if "`recycle'"=="" | `n_subgr'==1 { + qui replace `at' = `at' - 0.5 + `plot'/(`N_plots'+1) + } + else { + forv j=1/`n_subgr' { + qui replace `at' = `at' - 0.5 + /// + (`plot'-`firstplot_`j''+1) / /// + (`lastplot_`j''-`firstplot_`j''+2) if `by'==`j' + } + } + } + } + + // inject tempvars + forv i=1/`N_plots' { + foreach opt in ifopt weightopt mlabel mlabvposition { + if `"``opt'_`i''"'!="" { + mata: coefplot_inject_temvars("`opt'_`i'", `N_ci', `N_aux') + } + } + } + + // handle string expressions in mlabel() + forv i=1/`N_plots' { + if `"`mlabel_`i''"'!="" { + if `"`mlabel_`i''"'=="mlabel(`mlbl')" continue + parse_mlabel_exp, `mlabel_`i'' // returns mlblexp + capt confirm variable `mlblexp' + if _rc==0 continue + capt replace `mlbl' = `mlblexp' if `plot'==`i' + if _rc { + di as err "invalid expression in mlabel()" + exit 198 + } + local mlabel_`i' mlabel(`mlbl') + } + } + + // compile plot + local addaxis 1 + local eqaxis 2 + local axisalt alt + if (`"`eqlabels'"'!="" & `"`groups'"'!="") | (`"`addplotbelow'"'!="") local axisalt + if `"`groups'"'!="" { + local ++eqaxis + local addaxis `addaxis' 2 + local groupsopts `xaxis'scale(axis(2) `axisalt' noline) /// + `xaxis'title("", axis(2)) /// + `xaxis'label(`groups', axis(2) noticks tlstyle(none) `gopts') + } + if `"`eqlabels'"'!="" { + local addaxis `addaxis' `eqaxis' + local eqaxisopts `xaxis'scale(axis(`eqaxis') `axisalt' noline) /// + `xaxis'title("", axis(`eqaxis')) /// + `xaxis'label(`eqlabels', axis(`eqaxis') noticks /// + tlstyle(none) `eqopts') + } + local axisalt + if "`addaxis'"!="1" { + local addaxis `xaxis'axis(`addaxis') + if `"`addplotbelow'"'!="" { + if (`"`eqlabels'"'!="")+(`"`groups'"'!="")==1 { + local axisalt `xaxis'scale(alt) + } + } + } + else local addaxis + if `"`hlbls'"'!="" { + local hlblsopts /// + `xaxis'label(`hlbls', custom add tlstyle(none) `hopts') + } + local j 0 + if `"`addplot'"'!="" { + mata: coefplot_inject_temvars("addplot", `N_ci', `N_aux') + if `"`addplotbelow'"'!="" { + capt two `addplot' ||, nodraw + if _rc==0 local j `.Graph.last_style' + capt confirm integer number `j' + if _rc local j 0 + } + } + local plots + local legendlbls + local legendorder + forv i=1/`N_plots' { + local key + if "`n_ci_`i''"=="" { + continue // plot does not exist (this can happen if _skip is + // specified together with norecycle) + } + local n_ci = `n_ci_`i'' + `cis_n_`i'' + if `n_ci'==0 & `"`cionly_`i''"'!="" { + continue // can happen if noci and cionly is specified + } + local axis + if `"`axis_`i''"'!="" { + local axis `yaxis'axis(`axis_`i'') + } + local ciplots + if (`n_ci')>0 { + get_pstyle_id `=mod(`i'-1,`pcycle')+1', `pstyle_`i'' // returns pstyle_id + forv k = 1/`n_ci' { + local lw + if `k'>`cis_n_`i'' { + local l = `k' - `cis_n_`i'' + local ciopts `ciopts_`i'_`l'' + parse_ciopts_recast_pstyle, `ciopts' + // returns cirecast, cipstyle, ciopts + if "`nocilwincr_`i''"=="" { + local lw = string(1 + log10(`l')/log10(2)) + local lw lwidth(*`lw') + } + local ciplotcmd rspike `ll`k'' `ul`k'' `at' + if (substr(`"`cirecast'"',1,2)=="pc") { // paired coordinates + local ciplotcmd `cirecast' `ll`k'' `at' `ul`k'' `at' + if `"`cirecast'"'=="pcrarrow" { + local ciplotcmd pcarrow `ul`k'' `at' `ll`k'' `at' + } + } + else if `"`cirecast'"'!="" { + local ciopts `ciopts' recast(`cirecast') + } + } + else { // cismooth + local l 0 + local cirecast + local cipstyle `cis_pstyle_`i'' + local lw: word `k' of `cis_lwidth_`i'' + local lw lwidth(*`lw') + local lcinten: word `k' of `cis_intens_`i'' + local ciopts lcolor("`cis_color_`i''*`lcinten'") `cipstyle' + local ciplotcmd rspike `ll`k'' `ul`k'' `at' + } + if `"`cipstyle'"'!="" local pstyle + else { + set_pstyle `pstyle_id' `"`cirecast'"' // returns pstyle + } + local ciplots `ciplots' /// + (`ciplotcmd' if `plot'==`i'`ifopt_`i'', `addaxis' /// + `pstyle' `lw' `axis' `ciopts' `horizontal') + } + } + if "`citop_`i''"=="" & `n_ci'>0 { + local plots `plots' `ciplots' + local j = `j' + `cis_n_`i'' + if inrange(`key_`i'', 1, `n_ci_`i'') { + local key = `j' + `key_`i'' + } + local j = `j' + `n_ci_`i'' + } + if `"`cionly_`i''"'=="" { + if `"`pstyle_`i''"'!="" local pstyle `pstyle_`i'' + else { + set_pstyle `=mod(`i'-1,`pcycle')+1' `"`recast_`i''"' // returns pstyle + } + if `"`recast_`i''"'!="" local recast recast(`recast_`i'') + else local recast + if `"`horizontal'"'=="" | inlist(`"`recast_`i''"', /// + "area", "bar", "spike", "dropline", "dot") { + local plots `plots' /// + (scatter `b' `at' /// + if `plot'==`i'`ifopt_`i''`weightopt_`i'', /// + `addaxis' `pstyle' `twplotopts0' `twplotopts1_`i'' /// + `axis' `recast' `mlabel_`i'' `mlabvposition_`i'' /// + `twplotopts_`i'' `horizontal') + } + else { + local plots `plots' /// + (scatter `at' `b' /// + if `plot'==`i'`ifopt_`i''`weightopt_`i'', /// + `addaxis' `pstyle' `twplotopts0' `twplotopts1_`i'' /// + `axis' `recast' `mlabel_`i'' `mlabvposition_`i'' /// + `twplotopts_`i'') + } + local ++j + if `key_`i''==0 { + local key `j' + } + } + local plotlab `"`: lab `plot' `i''"' + gettoken trash : plotlab, qed(hasquotes) + if `hasquotes'==0 { + local plotlab `"`"`plotlab'"'"' + } + if "`citop_`i''"!="" & `n_ci'>0 { + local plots `plots' `ciplots' + local j = `j' + `cis_n_`i'' + if inrange(`key_`i'', 1, `n_ci_`i'') { + local key = `j' + `key_`i'' + } + local j = `j' + `n_ci_`i'' + } + if "`key'"!="" { + local legendlbls `legendlbls' label(`key' `plotlab') + local legendorder `legendorder' `key' + } + } + if `"`legendorder'"'!="" { + local legendorder all order(`legendorder') + if `N_plots'==1 { + if `n_subgr'==1 & `"`legend'"'=="" { + local legendorder `legendorder' off + } + } + } + else local legendorder off + if `n_subgr'>1 { + local byopt `by', note("") + if (`N_plots'==1 & `"`bylegend'"'=="") | `"`legendorder'"'=="off" { + local byopt `byopt' legend(off) + } + local byopt by(`byopt' `bylegend' `byopts') + } + else local byopt + if `"`plots'"'=="" { + di as txt "(nothing to plot)" + exit + } + if `"`addplot'"'!="" { + if `"`addplotbelow'"'!="" { + local plots `addplot' || `plots' || + } + else { + local plots `plots' || `addplot' || + } + } + local plots two `plots', `axisalt' `groupsopts' `eqaxisopts' /// + `xlabel' `hlblsopts' `xtick' `xrange' `reverse' yti("") xti("") /// + legend(`legendlbls' `legendorder') `legend' `plotregion' `byopt' `twopts' + `plots' + + // return + if `"`generate'"'!="" { + restore, not + di as txt _n "Generated variables:" _c + describe `returnvars' + } + return local graph `plots' + return local labels `"`xlabels'"' + return local eqlabels `"`eqlabels'"' + return local groups `"`groups'"' + return local headings `"`hlbls'"' + return local legend `"`legendlbls' `legendorder'"' + return scalar n_plots = `N_plots' + return scalar n_subgr = `n_subgr' + return scalar n_ci = `N_ci' +end + +program parse_subgraphs // input: "subgr || subgr ..., opts" + local i 0 + local empty 1 + while (`"`macval(0)'"'!="") { + gettoken subgraph 0 : 0, parse("|") bind + if `"`macval(subgraph)'"'=="|" { + gettoken subgraph 0 : 0, parse("|") bind + if `"`macval(subgraph)'"'!="|" error 198 // require "||" + if `empty' { + local ++i + c_local subgr_`i' "." // use active model + } + else local empty 1 + continue + } + if `"`0'"'=="" { // get opts if last + _parse comma subgraph opts : subgraph + } + if `"`macval(subgraph)'"'!="" { // skip last if empty + local empty 0 + local ++i + c_local subgr_`i' `"`macval(subgraph)'"' + } + } + if `i'==0 { // check if empty + local i 1 + c_local subgr_1 "." // use active model + } + c_local n_subgr `i' + c_local opts `macval(opts)' +end + +program parse_globalopts + syntax [, /// + /// globalopts + HORizontal /// + VERTical /// + sort SORT2(str) /// + orderby(str) /// + order(str asis) /// + BYCoefs /// + noRECycle /// + grid(str) /// + noOFFsets /// + format(str) /// + noLABels /// + COEFLabels(str asis) /// + NOEQLABels /// + EQLabels(str asis) /// + eqstrict /// + HEADings(str asis) /// + GROUPs(str asis) /// + PLOTLabels(str asis) /// + bylabels(str asis) /// + GENerate GENerate2(name) /// + RELOCate(str asis) /// + replace /// + addplot(str asis) /// + NODROP /// + LEGend(passthru) /// + BYOPts(str asis) /// + Bname(passthru) /// so that b() is not b1title() + rename(passthru) /// + EQREName(passthru) /// + PCYCle(int 15) /// + /// twoway options not captured by _get_gropts, gettwoway + play(passthru) /// + XOVERHANGs /// + YOVERHANGs /// + fxsize(passthru) /// + fysize(passthru) /// + * /// + ] + _get_gropts, graphopts(`options') gettwoway + local twopts `s(twowayopts)' `play' `xoverhangs' `yoverhangs' `fxsize' `fysize' + local opts0 `bname' `macval(rename)' `macval(eqrename)' `s(graphopts)' + if `"`sort'`sort2'"'!="" { + parse_sort `sort2' // returns local sort + } + if `"`orderby'"'!="" { + parse_orderby `orderby', `recycle' // returns local orderby + } + if `"`generate'"'!="" & `"`generate2'"'=="" { + local generate "__" + } + else local generate `"`generate2'"' + if `"`grid'"'!="" { + parse_grid, `grid' // returns local grid, gridopts + } + if `"`coeflabels'"'!="" { + parse_coeflabels `coeflabels' + // returns coeflabels, cltrunc, clwrap, clbreak, clinteract, clangle, clopts + } + if `"`clinteract'"'=="" { + local clinteract `"" # ""' + } + parse_eqlabels "`noeqlabels'" `eqlabels' + // returns eqlabels, eqashead, eqxlab + // if eqashead=="": also eqgap, eqwrap, eqtrunc, eqbreak, eqopts + // if eqashead!="": also hoff, hgap, hwrap, htrunc, hbreak, hopts + if `"`headings'"'!="" { + if `"`eqashead'"'!="" { + di as err "eqlabels(, asheadings) and headings() not both allowed" + exit 198 + } + parse_headings `headings' // returns headings, hoff, hgap, hopts + } + else if `"`hgap'"'=="" local hgap 0 + if `"`eqashead'"'!="" { + if "`bycoefs'"!="" { + di as err "eqlabels(, asheadings) and bycoefs not both allowed" + exit 198 + } + } + if `"`groups'"'!="" { + parse_groups `groups' // returns groups, ggap, gwrap, gtrunc, gbreak, gopts + } + else local ggap 0 + if `"`plotlabels'"'!="" { + parse_plotlabels `plotlabels' // returns plotlabels, plwrap, pltrunc, plbreak + } + if `"`bylabels'"'!="" { + parse_bylabels `bylabels' // returns bylabels, blwrap, bltrunc, blbreak + } + if `"`format'"'!="" { + confirm numeric format `format' + } + if `"`horizontal'"'!="" { + if `"`vertical'"'!="" { + di as err "horizontal and vertical not both allowed" + exit 198 + } + } + if `"`addplot'"'!="" { + parse_addplot `addplot' // returns addplot, addplotbelow + } + parse_byopts, `byopts' // returns bylegend, byopts + foreach opt in /// + horizontal /// + vertical /// + sort /// + orderby /// + order /// + bycoefs /// + recycle /// + grid gridopts /// + offsets /// + format /// + labels /// + coeflabels cltrunc clwrap clbreak clinteract clangle clopts /// + noeqlabels /// + eqlabels eqashead eqxlab eqgap eqtrunc eqwrap eqbreak eqopts /// + eqstrict /// + headings hxlab hoff hgap htrunc hwrap hbreak hopts /// + groups ggap gtrunc gwrap gbreak gopts /// + plotlabels plwrap pltrunc plbreak /// + bylabels blwrap bltrunc blbreak /// + relocate /// + generate /// + replace /// + addplot addplotbelow /// + nodrop /// + legend /// + bylegend /// + byopts /// + { + c_local `opt' `"``opt''"' + } + c_local pcycle `pcycle' + c_local twopts `twopts' + merge_subgropts, `macval(opts0)' + c_local subgropts0 `subgropts' + c_local plotopts0 `plotopts1' + c_local modelopts0 `macval(modelopts1)' + c_local twplotopts0 `twplotopts1' +end + +program parse_sort + syntax [anything] [, Descending by(str) ] + if `"`anything'"'=="" { + local subgr . + local plot . + } + else { + gettoken subgr rest : anything, parse(":") + if `"`rest'"'=="" { // sort(#) + local plot `"`subgr'"' + local subgr . + } + else if `"`subgr'"'==":" { // sort(:#) + local subgr . + gettoken plot rest : rest + } + else { // sort(#:#) + gettoken colon rest : rest, parse(":") + if `"`colon'"'!=":" { + di as err "sort(): invalid syntax" + exit 198 + } + gettoken plot rest : rest + } + if `"`rest'"'!="" { + di as err "sort(): invalid syntax" + exit 198 + } + foreach t in subgr plot { + if `"``t''"'=="" local `t' . + else if `"``t''"'!="." { + capt confirm integer number ``t'' + if _rc==0 { + local rc = (``t''<=0) + } + else local rc 1 + if `rc' { + di as err "sort(): invalid syntax" + exit 198 + } + } + } + } + capt parse_sort_by, `by' + if _rc { + di as err `"sort(): '`by'' not allowed in by()"' + exit 198 + } + local descending = ("`descending'"!="") + c_local sort `"`subgr' `plot' `descending' "`by'""' +end +program parse_sort_by + syntax [, b v se t tabs df p ll ul aux * ] + local by `b' `v' `se' `t' `tabs' `df' `p' `ll' `ul' `aux' + if `: list sizeof by'>1 exit 198 + if inlist("`by'","ll", "ul", "aux") { // by(ll/ul/aux #) + if `"`options'"'=="" local by `by' 1 + else { + capt confirm integer number `options' + if _rc==0 { + local rc = (`options'<=0) + } + else local rc 1 + if `rc' exit 198 + local by `by' `options' + } + } + else if `"`options'"'!="" exit 198 + if `"`by'"'=="" local by b + c_local by `by' +end + +program parse_orderby + syntax [anything] [, norecycle ] + gettoken subgr rest : anything, parse(":") + if `"`rest'"'=="" { // orderby(#) + local plot `"`subgr'"' + local subgr 1 + } + else if `"`subgr'"'==":" { // ordeby(:#) + local subgr 1 + gettoken plot rest : rest + } + else { // orderby(#:#) + gettoken colon rest : rest, parse(":") + if `"`colon'"'!=":" { + di as err "orderby(): invalid syntax" + exit 198 + } + if "`recycle'"!="" { + di as err "orderby(): subgraph not allowed with norecycle" + exit 198 + } + gettoken plot rest : rest + } + if `"`rest'"'!="" { + di as err "orderby(): invalid syntax" + exit 198 + } + foreach t in subgr plot { + if `"``t''"'=="" local `t' 1 + else { + capt confirm integer number ``t'' + if _rc==0 { + local rc = (``t''<=0) + } + else local rc 1 + if `rc' { + di as err "orderby(): invalid syntax" + exit 198 + } + } + } + if "`recycle'"!="" local subgr . + c_local orderby `subgr' `plot' +end + +program parse_grid + syntax [, Between Within None * ] + if ("`between'"!="") + ("`within'"!="") + ("`none'"!="") > 1 { + di as err "grid(): only one of between, within, and none allowed" + exit 198 + } + c_local grid `between' `within' `none' + c_local gridopts `options' +end + +program parse_coeflabels + mata: coefplot_parsecomma("coeflabels", "0", "0") + syntax [, Truncate(numlist integer max=1 >0) /// + Wrap(numlist integer max=1 >0) noBreak /// + Interaction(str asis) angle(passthru) * ] + c_local coeflabels `"`coeflabels'"' + c_local cltrunc `truncate' + c_local clwrap `wrap' + c_local clbreak `break' + c_local clinteract `"`interaction'"' + c_local clangle `angle' + c_local clopts `options' +end + +program parse_eqlabels + gettoken noeqlabels 0 : 0 + mata: st_local("0", strltrim(st_local("0"))) + mata: coefplot_parsecomma("eqlabels", "0", "0") + syntax [, LABels LABels2(str asis) /// + OFFset(real 0) ASHEADings noGap Gap2(numlist max=1) /// + Truncate(numlist integer max=1 >0) /// + Wrap(numlist integer max=1 >0) noBreak * ] + if "`labels'"!="" & `"`labels2'"'=="" { + local labels2 `"" # ""' + } + if "`gap'"!="" local gap2 0 + else if "`gap2'"=="" { + if "`noeqlabels'"!="" local gap2 0 + else local gap2 1 + } + if "`asheadings'"!="" { + c_local hoff `offset' + c_local hgap `gap2' + c_local htrunc `truncate' + c_local hwrap `wrap' + c_local hbreak `break' + c_local hopts `options' + c_local eqgap 0 + } + else { + if `offset'!=0 { + di as err "eqlabels(): offset() only allowed with asheadings" + exit 198 + } + c_local eqgap `gap2' + c_local eqtrunc `truncate' + c_local eqwrap `wrap' + c_local eqbreak `break' + c_local eqopts `options' + } + c_local eqxlab `"`labels2'"' + c_local eqlabels `"`eqlabels'"' + c_local eqashead `asheadings' +end + +program parse_headings + mata: coefplot_parsecomma("headings", "0", "0") + syntax [, OFFset(real 0) noGap Gap2(real 1) /// + Truncate(numlist integer max=1 >0) /// + Wrap(numlist integer max=1 >0) noBreak * ] + if "`gap'"!="" local gap2 0 + c_local headings `"`headings'"' + c_local hoff `offset' + c_local hgap `gap2' + c_local htrunc `truncate' + c_local hwrap `wrap' + c_local hbreak `break' + c_local hopts `options' +end + +program parse_groups + mata: coefplot_parsecomma("groups", "0", "0") + syntax [, noGap Gap2(real 1) /// + Truncate(numlist integer max=1 >0) /// + Wrap(numlist integer max=1 >0) noBreak * ] + if "`gap'"!="" local gap2 0 + c_local groups `"`groups'"' + c_local ggap `gap2' + c_local gtrunc `truncate' + c_local gwrap `wrap' + c_local gbreak `break' + c_local gopts `options' +end + +program parse_plotlabels + mata: coefplot_parsecomma("plotlabels", "0", "0") + syntax [, Truncate(numlist integer max=1 >0) /// + Wrap(numlist integer max=1 >0) noBreak ] + c_local plotlabels `"`plotlabels'"' + c_local pltrunc `truncate' + c_local plwrap `wrap' + c_local plbreak `break' +end + +program parse_bylabels + mata: coefplot_parsecomma("bylabels", "0", "0") + syntax [, Truncate(numlist integer max=1 >0) /// + Wrap(numlist integer max=1 >0) noBreak ] + c_local bylabels `"`bylabels'"' + c_local bltrunc `truncate' + c_local blwrap `wrap' + c_local blbreak `break' +end + +program parse_addplot + _parse expand addplot addplotopts : 0 , /// + common(below by() nodraw draw name() SAVing() play()) + forv i=1/`addplot_n' { + local addplot `addplot' (`addplot_`i'') + } + local addplot `addplot' `addplotopts_if' `addplotopts_in' + _parse combop addplotopts_op : addplotopts_op, option(below) opsin rightmost + parse_addplotopts, `addplotopts_op' // returns below, options + if `"`options'"'!="" { + local addplot `addplot', `options' + } + c_local addplot `addplot' + c_local addplotbelow `below' +end +program parse_addplotopts + syntax [, below by(passthru) NODRAW draw name(passthru) /// + SAVing(passthru) play(passthru) * ] + foreach opt in nodraw draw { + if `"``opt''"'!="" { + di as err "`opt' not allowed within addplot()" + exit 198 + } + } + foreach opt in by name saving play { + if `"``opt''"'!="" { + di as err "`opt'() not allowed within addplot()" + exit 198 + } + } + local options: subinstr local options "below" "", all word // to be sure + c_local below `below' + c_local options `options' +end + +program parse_byopts + syntax [, LEGend(passthru) * ] + c_local bylegend `legend' + c_local byopts `options' +end + +program merge_subgropts + merge_plotopts `macval(0)' // returns modelopts2, plotopts, options, _opts0 + _merge_subgropts, `options' + _merge_subgropts _opt0_, `_opts0' + if `"`_opt0_options'"'!="" error 198 + if `"`bylabel'"'!="" { + c_local subgropts `bylabel' + } + else { + c_local subgropts `_opt0_bylabel' + } + c_local modelopts1 `macval(modelopts2)' + c_local plotopts1 `plotopts' + c_local twplotopts1 `options' +end +program _merge_subgropts + syntax [anything] [, BYLABel(passthru) * ] + c_local `anything'bylabel `bylabel' + c_local `anything'options `options' +end + +program parse_subgropts + syntax anything [, BYLABel(str asis) ] + gettoken lbl rest : bylabel, qed(qed) // remove outer quotes + if `"`lbl'"'!="" & `"`rest'"'=="" & `qed' { + local bylabel `"`lbl'"' + } + mata: coefplot_add_label(COEFPLOT_STRUCT, "by", `anything', "bylabel", 0) +end + +program parse_plots // input: "j (plot) (plot) ..., opts" + gettoken j 0 : 0 + _parse comma 0 opts : 0 // get opts + gettoken comma opts : opts, parse(",") // strip comma + local i 0 + while (`"`macval(0)'"'!="") { + gettoken plot 0: 0, match(hasparen) + local estexpand 0 // expand wildcards? + if `"`hasparen'"'=="" { // - not if "(...)" + if strpos(`"`plot'"',"(")==0 { // - not it "mat(...)" + if strpos(`"`plot'"',"*") | strpos(`"`plot'"',"?") { + local estexpand 1 + } + } + } + if `estexpand' { + qui estimates dir `plot' + foreach plot in `r(names)' { + local ++i + c_local plot_`j'_`i' `"`macval(plot)'"' + } + } + else { + local ++i + c_local plot_`j'_`i' `"`macval(plot)'"' + } + } + if `i'==0 { // check if empty + local i 1 + c_local plot_`j'_1 "." // use active model + } + c_local n_plots_`j' `i' + c_local opts `macval(opts)' +end + +program parse_get_popt_i + gettoken i 0 : 0, parse(",") + syntax [, p`i'(str asis) * ] + c_local plotopts `macval(p`i')' + c_local twplotopts0 `macval(options)' +end + +program combine_plotopts + syntax [, _opts0(str asis) * ] + _merge_plotopts, `macval(options)' + _merge_plotopts _opt0_, `macval(_opts0)' + if `"`mlabels'"'!="" local _opt0_mlabel + if `"`mlabel'"'!="" local _opt0_mlabels + if `"`cismooths'"'!="" local _opt0_cismooth + if `"`cismooth'"'!="" local _opt0_cismooths + if `"`nokey'`key'`key2'"'!="" { + local _opt0_nokey + local _opt0_key + local _opt0_key2 + } + local 0 + foreach opt of local opts { // opts is set by _merge_plotopts + if `"``opt''"'!="" { + local 0 `0' ``opt'' + } + else { + local 0 `0' `_opt0_`opt'' + } + } + c_local plotopts `0' + c_local options `macval(options)' +end + +program merge_plotopts + merge_modelopts `macval(0)' // returns modelopts, mlabels2, options, _opts0 + _merge_plotopts, `options' + _merge_plotopts _opt0_, `_opts0' + if `"`mlabels'"'!="" local _opt0_mlabel + if `"`mlabel'"'!="" local _opt0_mlabels + if `"`mlabels'`mlabel'"'!="" local mlabels2 + if `"`cismooths'"'!="" local _opt0_cismooth + if `"`cismooth'"'!="" local _opt0_cismooths + if `"`nokey'`key'`key2'"'!="" { + local _opt0_nokey + local _opt0_key + local _opt0_key2 + } + local 0 + foreach opt of local opts { // opts is set by _merge_plotopts + if `"``opt''"'!="" { + local 0 `0' ``opt'' + } + else { + local 0 `0' `_opt0_`opt'' + } + } + c_local modelopts2 `macval(modelopts)' `mlabels2' + c_local plotopts `0' + c_local options `options' + c_local _opts0 `_opt0_options' +end + +program _merge_plotopts + syntax [anything] [, /// + LABel(passthru) /// + offset(passthru) /// + PSTYle(passthru) /// + AXis(passthru) /// + recast(passthru) /// + MLabels MLabel(passthru) /// + MLABVposition(passthru) /// + cionly /// + citop /// + CISmooths CISmooth(passthru) /// + CIOPts(passthru) /// + CIREcast(passthru) /// + IFopt(passthru) /// + Weightopt(passthru) /// + NOKEY key KEY2(passthru) /// + * ] + if `"`mlabel'"'!="" local mlabels + if `"`cismooth'"'!="" local cismooths + local opts /// + label /// + offset /// + pstyle /// + axis /// + recast /// + mlabels mlabel /// + mlabvposition /// + cionly /// + citop /// + cismooths cismooth /// + ciopts /// + cirecast /// + ifopt /// + weightopt /// + nokey key key2 + foreach opt of local opts { + c_local `anything'`opt' ``opt'' + } + c_local `anything'options `options' + c_local opts `opts' +end + +program parse_plotopts + syntax anything [, /// + LABel(str asis) /// + offset(str asis) /// + PSTYle(passthru) /// + AXis(numlist integer max=1 >0 <10) /// + recast(str) /// + MLabels MLabel(passthru) /// + MLABVposition(passthru) /// + cionly /// + citop /// + CISmooths CISmooth(str asis) /// + CIOPts(str asis) /// + CIREcast(str) /// + IFopt(str asis) /// + Weightopt(str asis) /// + NOKEY key KEY2(str) /// + ] + if `"`label'"'!="" { + gettoken lbl rest : label, qed(qed) // remove outer quotes + if `"`lbl'"'!="" & `"`rest'"'=="" & `qed' { + local label `"`lbl'"' + } + mata: coefplot_add_label(COEFPLOT_STRUCT, "plot", `anything', "label", 1) + } + if `"`mlabels'"'!="" local mlabel mlabel(@b) + if `"`cismooths'"'!="" local cismooth cismooth + if `"`cirecast'"'!="" local cirecast recast(`cirecast') + if `"`offset'"'!="" { + capt parse_offset `offset' + if _rc { + di as err `"invalid offset(): `offset'"' + exit 198 + } + } + if `"`ifopt'"'!="" local ifopt `" & (`ifopt')"' + if `"`weightopt'"'!="" local weightopt `" [aw=`weightopt']"' + local key0 0 // default + if `"`nokey'"'!="" local key0 -1 // suppress key + if `"`key'`key2'"'!="" { + if `key0'<0 { + di as err "nokey and key() not both allowed" + exit 198 + } + if `"`key2'"'!="" { + gettoken key2ci key2 : key2 + if `"`key2ci'"'!="ci" { + di as err "invalid syntax in key()" + exit 198 + } + if `"`key2'"'!="" { + capt confirm integer number `key2' + if _rc==0 { + capt assert (`key2'>0) + } + if _rc { + di as err "invalid syntax in key()" + exit 198 + } + } + else local key2 1 + local key0 `key2' + } + } + local key `key0' + foreach opt in /// + offset /// + pstyle /// + axis /// + recast /// + mlabel /// + mlabvposition /// + cionly /// + citop /// + cismooth /// + ciopts /// + cirecast /// + ifopt /// + weightopt /// + key /// + { + c_local `opt'_`anything' `"``opt''"' + } +end + +program parse_offset + local offset = `0' + local 0 `", offset(`offset')"' + syntax [, offset(numlist max=1) ] + c_local offset `offset' +end + +program parse_models // input: "j k model \ model ..., opts" + gettoken j 0 : 0 + gettoken k 0 : 0 + local i 0 + local empty 1 + while (`"`macval(0)'"'!="") { + gettoken model 0 : 0, parse("\") bind + if `"`macval(model)'"'=="\" { + if `empty' { + local ++i + c_local model_`j'_`k'_`i' "." // use active model + } + else local empty 1 + continue + } + _parse comma model opts : model + if `"`macval(0)'"'=="" { // get plotopts if last + gettoken comma plotopts : opts, parse(",") // strip comma + local opts + } + if `"`model'`macval(opts)'"'!="" { // skip last if empty + local empty 0 + if strpos(`"`model'"',"(") { // mat(...) + local ++i + c_local model_`j'_`k'_`i' `"`model'`macval(opts)'"' + } + else { + if `"`model'"'=="" local model . + foreach ename of local model { + if strpos(`"`ename'"',"*") | strpos(`"`ename'"',"?") { + qui estimates dir `ename' + local ename `"`r(names)'"' + } + foreach mm of local ename { + local ++i + c_local model_`j'_`k'_`i' `"`mm'`macval(opts)'"' + } + } + } + } + } + if `i'==0 { // check if empty + local i 1 + c_local model_`j'_`k'_1 "." // use active model + } + c_local n_models_`j'_`k' `i' + c_local opts `macval(plotopts)' +end + +program parse_model // input: "name, opts" or "matrix(name[...]), opts" + _parse comma 0 opts : 0 + gettoken comma opts : opts, parse(",") // strip comma + capt parse_model_matrix, `0' // returns model, matrix + if _rc { + gettoken model rest : 0 + if `"`rest'"'!="" { + di as err `"`rest' not allowed"' + exit 198 + } + if `"`model'"'=="" local model . + } + c_local model `"`model'"' + c_local matrix `"`matrix'"' + c_local opts `macval(opts)' +end +program parse_model_matrix + syntax, Matrix(str) + gettoken model : matrix, parse(" [") + if `"`model'"'=="" error 198 + c_local model `"`model'"' + c_local matrix `"`matrix'"' +end + +program merge_modelopts + syntax [, _opts0(str asis) * ] + _merge_modelopts, `macval(options)' + _merge_modelopts _opt0_, `macval(_opts0)' + if `"`sename'"'!="" local _opt0_vname + if `"`vname'"'!="" local _opt0_sename + if `"`asequation'"'!="" local _opt0_asequation2 + local 0 + foreach opt of local opts { // opts is set by _merge_modelopts + if `"``opt''"'!="" { + if "`opt'"=="mlabels2" { + c_local mlabels2 ``opt'' + } + else { + local 0 `macval(0)' `macval(`opt')' + } + } + else { + if "`opt'"=="mlabels2" { + c_local mlabels2 `_opt0_`opt'' + } + else { + local 0 `macval(0)' `macval(_opt0_`opt')' + } + } + } + c_local modelopts `macval(0)' + c_local options `options' + c_local _opts0 `_opt0_options' +end +program _merge_modelopts + syntax [anything] [, /// + OMITted /// + BASElevels /// + Bname(passthru) /// + ATname ATname2(passthru) /// + SWAPnames /// + keep(passthru) /// + drop(passthru) /// + rename(passthru) /// + EQREName(passthru) /// + ASEQuation ASEQuation2(passthru) /// + eform EFORM2(passthru) /// + rescale(passthru) /// + TRANSform(passthru) /// + MLABELS2(passthru) /// + noci /// + Levels(passthru) /// + CIname(passthru) /// + Vname(passthru) /// + SEname(passthru) /// + DFname(passthru) /// + citype(passthru) /// + aux(passthru) /// + * ] + if "`atname'"!="" & `"`atname2'"'=="" local atname2 "atname2(at)" + if `"`asequation2'"'!="" local asequation + if "`eform'"!="" & `"`eform2'"'=="" local eform2 "eform2(*)" + if `"`sename'"'!="" & `"`vname'"'!="" { + di as err "se() and v() not both allowed" + exit 198 + } + local opts /// + omitted /// + baselevels /// + bname /// + atname2 /// + swapnames /// + keep /// + drop /// + rename /// + eqrename /// + asequation /// + asequation2 /// + eform2 /// + rescale /// + transform /// + mlabels2 /// + ci /// + levels /// + ciname /// + vname /// + sename /// + dfname /// + citype /// + aux + foreach opt of local opts { + c_local `anything'`opt' `macval(`opt')' + } + c_local `anything'options `options' + c_local opts `opts' +end + +program collect_coefs + gettoken model 0 : 0 + gettoken matrix 0 : 0 // matrix mode? + gettoken atmode 0 : 0 // whether at() is used + gettoken i 0 : 0 // plot number + gettoken j 0 : 0 // subgraph number + gettoken cis 0 : 0 // cismooth levels + + // get options + merge_modelopts `macval(0)' // returns modelopts, mlabels2, options, _opts0 + local 0 , `macval(modelopts)' `mlabels2' `options' `_opts0' + syntax [, /// + OMITted /// + BASElevels /// + Bname(str) /// + ATname2(str) /// + SWAPnames /// + keep(str asis) /// + drop(str asis) /// + rename(str asis) /// + EQREName(str asis) /// + ASEQuation ASEQuation2(str) /// + EFORM2(str asis) /// + rescale(str asis) /// + TRANSform(str asis) /// + MLABELS2(str asis) /// + noci /// + Levels(numlist) /// + CIname(str asis) /// + Vname(str) /// + SEname(str) /// + DFname(str) /// + citype(str) /// + aux(str) /// + ] + if `"`atname2'"'!="" { + if "`atmode'"=="0" { + di as err "must specify at for all or none" + exit 198 + } + local atmode 1 + parse_at_transform `atname2' // replaces atname2, returns attransform + capt parse_at_is_matrix, `atname2' // syntax at(matrix(...)) + } + else { + if "`atmode'"=="1" { + di as err "must specify at for all or none" + exit 198 + } + local atmode 0 + } + if `"`macval(rename)'"'!="" { + parse_rename `macval(rename)' + } + if `"`macval(eqrename)'"'!="" { + parse_eqrename `macval(eqrename)' + } + if "`cis'"!="" local ci // disable noci + parse_cilevels `"`levels'"' `"`ciname'"' "`cis'" // returns levels, ciname + parse_citype, `citype' // replaces citype + if `"`matrix'"'!="" local bname `"`matrix'"' + if "`asequation'"!="" & `"`asequation2'"'=="" { + if "`model'"=="." local asequation2 "_" + else local asequation2 `"`model'"' + } + if `"`aux'"'!="" { + parse_aux `aux' // returns aux + } + // collect results + local empty 0 + local equation + mata: coefplot_keepdrop(COEFPLOT_STRUCT) // returns empty, n_ci, equation + if `empty' { + local n_ci 0 + di as txt /// + `"(`model': no coefficients found, all dropped, or none kept)"' + } + + // returns + c_local equation `equation' + c_local atmode `atmode' + c_local n_ci `n_ci' + if `"`mlabels2'"'!="" { + c_local mlabel_`i' mlabel(@mlbl) + c_local mlabvposition_`i' mlabvposition(@mlpos) + } +end + +program parse_at_transform + syntax [anything] [, Transform(str) ] + if `"`anything'"'=="" local anything at + c_local atname2 `"`anything'"' + c_local attransform `"`transform'"' +end + +program parse_at_is_matrix + syntax, Matrix(str) + c_local atname2 `"`matrix'"' + c_local atismatrix "matrix" +end + +program parse_rename + mata: coefplot_parsecomma("rename", "0", "0") + syntax [, Regex] + c_local `eq'rename `"`macval(rename)'"' + c_local `eq'renameregex `regex' +end + +program parse_eqrename + mata: coefplot_parsecomma("rename", "0", "0") + syntax [, Regex] + c_local eqrename `"`macval(rename)'"' + c_local eqrenameregex `regex' +end + +program parse_aux // remove spaces in aux() + gettoken s 0 : 0, bind + while (`"`s'"'!="") { + local s = subinstr(`"`s'"', " ", "", .) + if substr(`"`s'"', 1, 1)=="[" { + local aux `aux'`s' + } + else { + local aux `aux' `s' + } + gettoken s 0 : 0, bind + } + c_local aux `"`aux'"' +end + +program parse_citype + local citypes logit probit atanh log + syntax [, `citypes' NORMal ] + local citype `logit' `probit' `atanh' `log' `normal' + if `: list sizeof citype'>1 { + di as err "citype(): only one of logit, probit, atanh, log, and normal allowed" + exit 198 + } + local citype: list posof "`citype'" in citypes + c_local citype `citype' +end + +program parse_cilevels + args levels names cis + if "`cis'"!="" { + foreach level of local cis { + local ll `ll' `level' + local nn `"`nn'`space'"""' + local space " " + } + } + while (1) { + gettoken l levels : levels + gettoken n names : names, match(paren) + if `"`l'`n'"'=="" { + continue, break + } + if `"`n'"'=="" { + parse_cilevel, levels(`l') // returns level + local ll `ll' `level' + local nn `"`nn'`space'"""' + } + else { + capt confirm number `n' + if _rc { + gettoken empty : n + if `"`empty'"'=="" { // set default level + parse_cilevel // returns level + local ll `ll' `level' + } + else { + local ll `ll' . + } + local nn `"`nn'`space'`"`n'"'"' + } + else { + parse_cilevel, levels(`n') // returns level + local ll `ll' `level' + local nn `"`nn'`space'"""' + } + } + local space " " + } + if `"`ll'"'=="" { // set default level + parse_cilevel // returns level + local ll `level' + local nn `""""' + } + c_local levels `ll' + c_local ciname `"`nn'"' +end +program parse_cilevel + syntax [, level(cilevel) levels(numlist min=1 max=1 >0 <100) ] + if `"`levels'"'=="" { + c_local level `level' + } + else { + c_local level `levels' + } +end + +program parse_cismooth + syntax anything(name=j) [, Color(str) PSTYle(passthru) n(int 50) /// + Intensity(numlist min=2 max=2 >=0 <=100) /// + LWidth(numlist min=2 max=2 >=0 <=1000) ] + if `n'<4 { + di as err "cismooth(n()) must be >= 4" + exit 198 + } + if "`intensity'"!="" { + gettoken imin imax : intensity + } + else { + local imin = (1+3) / (ceil(`n'/2)+3) * 100 + local imax 100 + } + if "`lwidth'"!="" { + gettoken wmin wmax : lwidth + local lwidth + } + else { + local wmin 2 + local wmax 15 + } + local d = 100/`n' + local lmax = 100 - `d'/2 + forv i = 1/`n' { + if mod(`i',2)==0 { + local l = `d'/2 + (`i'/2-1)*`d' + } + else { + local l = `d'/2 + (`n'-`i'/2-.5)*`d' + } + local levels `levels' `=string(`l')' + local inten = (`imin' + (`imax'-`imin') / (ceil(`n'/2)-1) * /// + (ceil(`i'/2)-1))/100 + local intens `intens' `=string(`inten')' + local lw = 4 + (`l'-1)/(`lmax'-1) * (100-4) // if n=50 max lw is 25 + local lw = 100 / `lw' + local lw = `wmin' + (`lw'-1) / (25-1) * (`wmax'-`wmin') + local lwidth `lwidth' `=string(`lw')' + } + c_local cis_levels_`j' `levels' + c_local cis_n_`j' `n' + c_local cis_intens_`j' `intens' + c_local cis_lwidth_`j' `lwidth' + c_local cis_color_`j' `"`color'"' + c_local cis_pstyle_`j' `"`pstyle'"' +end + +program get_pstyle_id + syntax anything(name=i) [, PSTYle(str) ] + if `"`pstyle'"'=="" { + local id `i' + } + else { + local id = substr(`"`pstyle'"', 2, 2) // p##... + capt confirm number `id' + if _rc { + local id = substr(`"`pstyle'"', 2, 1) // p#... + capt confirm number `id' + } + if _rc { // invalid pstyle + local id `i' + } + } + c_local pstyle_id `id' +end + +program parse_ciopts_nocilwincr + syntax anything(name=i) [, recast(str) LWidth(str) * ] + if `"`recast'`lwidth'"'!="" { + c_local nocilwincr_`i' 1 + } +end + +program parse_ciopts_recast_pstyle + syntax [, recast(str) PSTYle(str) * ] + if `"`pstyle'"'!="" { + local options `options' pstyle(`pstyle') + } + c_local cirecast `"`recast'"' + c_local cipstyle `"`pstyle'"' + c_local ciopts `options' +end + +program set_pstyle + args i recast + if `"`recast'"'=="" { + c_local pstyle pstyle(p`i') + exit + } + if inlist(`"`recast'"', "line", "rline") /// + c_local pstyle pstyle(p`i'line) + else if inlist(`"`recast'"', "area", "rarea") /// + c_local pstyle pstyle(p`i'area) + else if inlist(`"`recast'"', "bar", "rbar") /// + c_local pstyle pstyle(p`i'bar) + else if inlist(`"`recast'"', "dot") /// + c_local pstyle pstyle(p`i'dot) + else c_local pstyle pstyle(p`i') +end + +program parse_ciopts + gettoken j 0 : 0 + gettoken n 0 : 0 + gettoken opt 0 : 0, bind + local opts + while (`"`opt'"'!="") { // get rid of possible spaces between opt and () + gettoken paren: opt, parse("(") + if `"`paren'"'=="(" { + local opts `opts'`opt' + } + else { + local opts `opts' `opt' + } + gettoken opt 0 : 0, bind + } + local ciopts `", `opts'"' + gettoken opt opts : opts, bind + local i 0 + while (`"`opt'"'!="") { + local ++i + gettoken optname optcontents : opt, parse("(") + if `"`optcontents'"'=="" { + gettoken opt opts : opts, bind + continue + } + _parse factor ciopts : ciopts, option(`optname') to(`optname'(X)) + gettoken opt opts : opts, bind + } + _parse factordot ciopts : ciopts, n(`n') + // _parse combine only works up to p20 + gettoken opt ciopts : ciopts // get rid of comma + gettoken opt ciopts : ciopts, bind // get first opt + while (`"`opt'"'!="") { + mata: coefplot_combine_ciopts() // appends opt_# or options + gettoken opt ciopts : ciopts, bind // get next opt + } + forv i=1/`n' { + c_local ciopts_`j'_`i' `opt_`i'' `options' + } +end + +program parse_mlabel_exp + syntax [ , MLabel(str asis) ] + c_local mlblexp `"`mlabel'"' +end + +program coeflbls + args labels interact + mata: coefplot_get_coefs(COEFPLOT_STRUCT) + local i 0 + foreach v of local coefs { + local ++i + if (`"`v'"'=="") continue // gap from order() + if (`"`v'"'==`"`last'"') { + mata: coefplot_add_label(COEFPLOT_STRUCT, "coef", `i', "coeflbl", 1) + continue + } + mata: coefplot_get_coeflbl(COEFPLOT_STRUCT, `i') + if `"`coeflbl'"'=="" { + if `"`labels'"'!="" { + local coeflbl `"`v'"' + } + else { + compile_xlabel, v(`v') interact(`interact') // returns coeflbl + } + } + mata: coefplot_add_label(COEFPLOT_STRUCT, "coef", `i', "coeflbl", 1) + } +end + +program compile_xlabel + syntax [, v(str) interact(str) ] + gettoken vi vrest: v, parse("#") + while (`"`vi'"') !="" { + local xlabi + if `"`vi'"'=="#" { + local xlabi `"`interact'"' + } + else if strpos(`"`vi'"',".")==0 { + capt confirm variable `vi', exact + if _rc==0 { + local xlabi: var lab `vi' + } + if `"`xlabi'"'=="" { + local xlabi `"`vi'"' + } + } + else { + gettoken li vii : vi, parse(".") + gettoken dot vii : vii, parse(".") + capt confirm variable `vii', exact + if _rc==0 & `"`vii'"'!="" { + capt confirm number `li' + if _rc { + local xlabi: var lab `vii' + if (`"`xlabi'"'=="") local xlabi `"`vii'"' + if (substr(`"`li'"',1,1)=="c") /// + local li = substr(`"`li'"',2,.) + if (`"`li'"'!="") local xlabi `"`li'.`xlabi'"' + } + else { + local viilab : value label `vii' + if `"`viilab'"'!="" { + local xlabi: label `viilab' `li' + } + else { + local viilab: var lab `vii' + if (`"`viilab'"'=="") local viilab `"`vii'"' + local xlabi `"`viilab'=`li'"' + } + } + } + if `"`xlabi'"'=="" { + local xlabi `"`vi'"' + } + } + local xlab `"`xlab'`xlabi'"' + gettoken vi vrest: vrest, parse("#") + } + c_local coeflbl `"`xlab'"' +end + +program set_by_and_plot_labels + args plot by + // plot + capt label drop `plot' + qui levelsof `plot', local(levels) + foreach l of local levels { + mata: coefplot_get_plotlbl(COEFPLOT_STRUCT, `l') // returns plotlbl + lab def `plot' `l' `"`plotlbl'"', add + } + lab val `plot' `plot', nofix + // by + capt label drop `by' + qui levelsof `by', local(levels) + foreach l of local levels { + mata: coefplot_get_bylbl(COEFPLOT_STRUCT, `l') // returns bylbl + lab def `by' `l' `"`bylbl'"', add + } + lab val `by' `by', nofix +end + +program get_axis_labels + args x eq grp grid groups + // eqlabels + qui levelsof `eq', local(levels) + local j 0 + foreach l of local levels { + local ++j + su `x' if `eq'==`l', meanonly + local pos = string(r(min) + (r(max)-r(min))/2) + local pos: list retok pos + mata: coefplot_get_eqlbl(COEFPLOT_STRUCT, `j') // returns eqlbl + local eqlabels `eqlabels' `pos' `"`eqlbl'"' + } + c_local eqlabels `"`eqlabels'"' + // groups + if `"`groups'"'!="" { + local j 0 + foreach glab of local groups { + local ++j + foreach l of local levels { // equations (from above) + su `x' if `grp'==`j' & `eq'==`l', mean + if r(N)>0 { + local pos = string(r(min) + (r(max)-r(min))/2) + local pos: list retok pos + local glbls `glbls' `pos' `"`glab'"' + } + } + } + c_local groups `glbls' + } + // ticks and xlabels + mata: coefplot_ticks_and_labels(COEFPLOT_STRUCT) + c_local xrange `xrange' + c_local xlabels `xlabels' + c_local xgrid `xgrid' +end + +program merge_eqlabels_hlbls + args eqlab hlab + gettoken lab eqlab : eqlab // skip value + gettoken lab eqlab : eqlab, quotes + while (`"`lab'"'!="") { + gettoken val hlab : hlab + local hlbls `"`hlbls'`val' `lab' "' + gettoken lab eqlab : eqlab // skip value + gettoken lab eqlab : eqlab, quotes + } + c_local hlbls `"`hlbls'"' + c_local eqlabels "" +end + +program truncwrap_vlabels + args v n trunc wrap break + forv i = 1/`n' { + local lbl: label `v' `i' + truncwrap_label lbl "`trunc'" "`wrap'" "`break'" `"`lbl'"' + // may fail if label contains compound quotes + lab def `v' `i' `"`lbl'"', modify + } +end + +program truncwrap_labels + args local trunc wrap break lbls + local labels + local skip 1 + foreach lbl of local lbls { + if `skip' { + local labels `labels' `lbl' + local skip 0 + continue + } + truncwrap_label lbl "`trunc'" "`wrap'" "`break'" `"`lbl'"' + // may fail if label contains compound quotes + local labels `labels' `"`lbl'"' + local skip 1 + } + c_local `local' `"`labels'"' +end + +program truncwrap_label + args local trunc wrap break lbl + capt mata: coefplot_lbl_is_multiline() // error if label is multiline + if _rc exit + if "`break'"!="" local break ", `break'" + if "`trunc'"!="" { + local lbl: piece 1 `trunc' of `"`lbl'"'`break' + capt truncwrap_label_check_quotes `"`lbl'"' + if _rc exit + } + if "`wrap'"!="" { + local i 0 + local space + while (1) { + local ++i + local piece: piece `i' `wrap' of `"`lbl'"'`break' + capt truncwrap_label_check_quotes `"`piece'"' + if _rc exit + if `"`piece'"'=="" { + if `i'==1 { + local newlbl `"`lbl'"' // lbl is empty + } + else if `i'==2 { // workaround for multiline label graph bug + local newlbl `"`newlbl'`space'"""' + } + continue, break + } + local newlbl `"`newlbl'`space'`"`piece'"'"' + local space " " + } + local lbl `"`newlbl'"' + } + c_local `local' `"`lbl'"' +end +program truncwrap_label_check_quotes // checks for unmatched compound quotes + syntax [anything] +end + +program set_labels + args v n lbls + local i 0 + foreach lbl of local lbls { + local ++i + if `i'>`n' continue, break + lab def `v' `i' `"`lbl'"', modify + } +end + +program reset_xlabels + args lbls xlbls + local labels + local skip 1 + foreach lbl of local xlbls { + if `skip' { + local labels `labels' `lbl' + local skip 0 + continue + } + if `"`lbls'"'!="" { + gettoken lbl lbls : lbls + } + local labels `labels' `"`lbl'"' + local skip 1 + } + c_local xlabels `"`labels'"' +end + +version 11 +mata: +mata set matastrict on + +struct coefplot_struct +{ + real scalar r, xmin + + real colvector b, V, se, t, df, pval, at, plot, by, eq, grp, mlpos + real matrix ci, aux + + string colvector coefnm, eqnm, coeflbl, eqlbl, plotlbl, bylbl, mlbl, trans +} + +struct coefplot_struct scalar coefplot_struct_init() +{ + struct coefplot_struct scalar C + + return(C) +} + +void coefplot_keepdrop(struct coefplot_struct scalar C) +{ + real scalar i, j, level, r, brow, bcol, row, col, emode, + firsteqonly, meqs, citype + real colvector b, p, at, V, se, t, df, pval, mlpos + real matrix ci, aux, tmp + string scalar model, bname, cname, rename, attrans, attmp + string rowvector keep, drop, cnames, levels, llul + string colvector eqnm, coefnm, mlbl, trans + + // get results + emode = (st_local("matrix")=="") + // - coefficients + model = st_local("model") + bname = st_local("bname") + if (bname=="") { + if (st_global("e(mi)")=="mi") bname = "b_mi" + else bname = "b" + } + coefplot_parse_input(model, "b", bname, brow, bcol) + if (emode) bname = "e(" + bname + ")" + b = st_matrix(bname) + if (b==J(0,0,.)) { + st_local("empty", "1") + return + } + coefplot_invalid_subscript(model, bname, b, brow, bcol) + b = b[brow, bcol] + if (brow<.) { + b = b' + eqnm = st_matrixcolstripe(bname)[.,1] + coefnm = st_matrixcolstripe(bname)[.,2] + } + else { + eqnm = st_matrixrowstripe(bname)[.,1] + coefnm = st_matrixrowstripe(bname)[.,2] + } + _editvalue(eqnm, "", "_") + meqs = !allof(eqnm, eqnm[1]) + r = rows(b) + // - get variances and standard errors + se = J(r,1,.) + V = J(r,1,.) + if ((cname = st_local("sename"))!="") { + coefplot_parse_cname(model, "se", cname, row, col, emode, 0, bname, brow, bcol) + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp)==0) { + coefplot_invalid_subscript(model, cname, tmp, row, col) + tmp = tmp[row, col] + if (row<.) tmp = tmp' + if (coefplot_notconformable(model, cname, tmp, r, 1)==0) { + se = tmp + V = tmp:^2 + } + } + } + else if ((cname = st_local("vname"))!="") { + if (emode) cname = "e(" + cname + ")" + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp)==0) { + if (coefplot_notconformable(model, cname, tmp, r, r)==0) { + V = diagonal(tmp) + se = sqrt(V) + } + } + } + else if (emode){ + if (st_global("e(mi)")=="mi") cname = "e(V_mi)" + else cname = "e(V)" + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp, 1)==0) { + if (coefplot_notconformable(model, cname, tmp, r, r, 1)==0) { + V = diagonal(tmp) + se = sqrt(V) + } + } + } + // - get DFs + df = J(r,1,.) + if ((cname = st_local("dfname"))!="") { + if (strtoreal(cname)<. | cname==".") { + df = J(r, 1, strtoreal(cname)) + } + else { + coefplot_parse_input(model, "df", cname, row, col) + if (emode) cname = "e(" + cname + ")" + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp)==0) { + coefplot_invalid_subscript(model, cname, tmp, row, col) + tmp = tmp[row, col] + if (row<.) tmp = tmp' + if (coefplot_notconformable(model, cname, tmp, r, 1)==0) { + df = tmp + } + } + } + } + else if (emode) { + if ((st_global("e(mi)")=="mi") & + (tmp=st_matrix("e(df_mi)")')!=J(0,0,.)) { + if (coefplot_notconformable(model, "e(df_mi)", tmp, r, 1)==0) { + df = tmp + } + } + else if (st_numscalar("e(df_r)")!=J(0,0,.)) { + df = J(r, 1, st_numscalar("e(df_r)")) + } + } + // - CIs + ci = J(r, 0, .) + if (st_local("ci")=="") { + cnames = tokens(st_local("ciname")) + levels = tokens(st_local("levels")) + citype = strtoreal(st_local("citype")) + for (j=1; j<=cols(levels); j++) { + ci = ci, J(r, 2, .) + if ((cname = strtrim(cnames[j]))!="") { + if (cols(tokens(cname))==1) { // "name" + llul = (cname+"[1]", cname+"[2]") + } + else { + if (strpos(cname, "[")==0) { // "ll ul" + llul = tokens(cname) + if (cols(llul)!=2) { + printf("{txt}(%s: invalid syntax in %s)\n", + model, "ci()") + exit(error(198)) + } + } + else { + if (strpos(cname, "]")==strlen(cname)) { // "ll ul[]" + llul = (substr(cname, 1, strpos(cname, " ")-1), + substr(cname, strpos(cname, " ")+1, .)) + } + else { // "ll[] ul[]" + llul = (substr(cname, 1, strpos(cname, "]")), + substr(cname, strpos(cname, "]")+1, .)) + } + } + } + for (i=1; i<=2; i++) { + cname = llul[i] + coefplot_parse_cname(model, "ci", cname, row, col, emode, + 0, bname, brow, bcol) + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp)==0) { + coefplot_invalid_subscript(model, cname, tmp, row, col) + tmp = tmp[row, col] + if (row<.) tmp = tmp' + if (coefplot_notconformable(model, cname, tmp, r, 1)==0) { + ci[,(j*2-2+i)] = tmp + } + } + } + } + else { + level = 1 - (1 - strtoreal(levels[j])/100)/2 + tmp = J(r, 1, .) + for (i=1; i<=r; i++) { + tmp[i] = df[i]>2e17 ? invnormal(level) : + invttail(df[i], 1-level) + } + if (citype==1) { // logit + tmp = tmp :* se :/ (b:* (1 :- b)) + ci[|1,(j*2-1) \ .,(j*2)|] = + invlogit((logit(b) :- tmp, logit(b) :+ tmp)) + } + else if (citype==2) { // probit + tmp = tmp :* se :/ normalden(invnormal(b)) + ci[|1,(j*2-1) \ .,(j*2)|] = + normal((invnormal(b) :- tmp, invnormal(b) :+ tmp)) + } + else if (citype==3) { // atanh + tmp = tmp :* se :/ (1 :- b:^2) // missing if b in {-1,1} + ci[|1,(j*2-1) \ .,(j*2)|] = + tanh((atanh(b) :- tmp, atanh(b) :+ tmp)) + } + else if (citype==4) { // log + tmp = tmp :* se :/ b + ci[|1,(j*2-1) \ .,(j*2)|] = exp((ln(b) :- tmp, ln(b) :+ tmp)) + } + else { // normal + ci[|1,(j*2-1) \ .,(j*2)|] = (b :- tmp:*se, b :+ tmp:*se) + } + } + } + } + // - at + at = J(r,1,1) + cname = st_local("atname2") + if ((cname!="") & (cname!="_coef") & (cname!="_eq")) { + if ( emode & + st_global("e(cmd)")=="margins" & + (st_local("bname")=="" | st_local("bname")=="b") & + (st_numscalar("e(k_at)")!=J(0,0,.) ? + st_numscalar("e(k_at)")>0 : 0) & + (cname=="at" | strtoreal(cname)<.) + ) + { + if (cname=="at") cname = "1" + at = coefplot_get_margins_at("e", strtoreal(cname), coefnm)' + // (modifies coefnm) + } + else { + coefplot_parse_cname(model, "at", cname, row, col, emode, + st_local("atismatrix")!="", bname, brow, bcol) + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp)==0) { + coefplot_invalid_subscript(model, cname, tmp, row, col) + tmp = tmp[row, col] + if (row<.) tmp = tmp' + if (coefplot_notconformable(model, cname, tmp, r, 1)==0) { + at = tmp + } + } + } + } + // - auxiliary variables + cnames = tokens(st_local("aux")) // spaces in mspec not allowed + aux = J(r, cols(cnames), .) + for (j=1; j<=cols(cnames); j++) { + cname = cnames[j] + coefplot_parse_cname(model, "aux", cname, row, col, emode, 0, bname, brow, bcol) + tmp = st_matrix(cname) + if (coefplot_notfound(model, cname, tmp)==0) { + coefplot_invalid_subscript(model, cname, tmp, row, col) + tmp = tmp[row, col] + if (row<.) tmp = tmp' + if (coefplot_notconformable(model, cname, tmp, r, 1)==0) { + aux[,j] = tmp + } + } + } + + // keep, drop, etc. + // - clear "bn" + coefnm = subinstr(coefnm,"bn.", ".") // #bn. + coefnm = subinstr(coefnm,"bno.", "o.") // #bno. + // - remove omitted + p = J(r, 1, 1) + if (st_local("omitted")=="") { + p = p :* (!strmatch(coefnm, "*o.*")) + } + else { + coefnm = substr(coefnm, 1:+2*(substr(coefnm, 1, 2):=="o."), .) // o. + coefnm = subinstr(coefnm, "o.", ".") // #o. + } + // - remove baselevels + if (st_local("baselevels")=="") { + p = p :* (!strmatch(coefnm, "*b.*")) + } + else { + coefnm = subinstr(coefnm, "b.", ".") // #b. + } + // keep + firsteqonly = 1 + keep = st_local("keep") + if (keep!="") { + keep = coefplot_parse_namelist(keep, "", "keep") + if (!allof(keep[,1], "")) firsteqonly = 0 + keep[,1] = editvalue(keep[,1],"","*") + p = p :* (rowsum(strmatch(eqnm, keep[,1]') :& + strmatch(coefnm, keep[,2]')):>0) + } + // drop + drop = st_local("drop") + if (drop!="") { + drop = coefplot_parse_namelist(drop, "", "drop") + if (!allof(drop[,1], "")) firsteqonly = 0 + drop[,1] = editvalue(drop[,1],"","*") + p = p :* (!(rowsum(strmatch(eqnm, drop[,1]') :& + strmatch(coefnm, drop[,2]')))) + } + // equation + if (firsteqonly) { + for (i=1; i<=r; i++) { // look for first nonzero equation + if (p[i]==1) { + p = p :* (eqnm:==eqnm[i]) + break + } + } + } + // apply selection + if (allof(p, 0)) { + st_local("empty", "1") + return + } + b = select(b, p) + V = select(V, p) + se = select(se, p) + df = select(df, p) + ci = select(ci, p) + eqnm = select(eqnm, p) + coefnm = select(coefnm, p) + at = select(at, p) + aux = select(aux, p) + r = rows(b) + // t and p values + t = b:/se + pval = J(r,1,.) + for (i=1; i<=r; i++) { + pval[i] = (df[i]>=. ? 1-normal(abs(t[i])) : ttail(df[i],abs(t[i])))*2 + } + + // eform + coefplot_eform(b, ci, eqnm, coefnm) + + // rescale + coefplot_rescale(b, ci, eqnm, coefnm) + + // collect transforms + trans = coefplot_collect_transforms(eqnm, coefnm) + + // collect mlabels + mlbl = J(r,1,"") + mlpos = J(r,1,.) + coefplot_mlabels(mlbl, mlpos, eqnm, coefnm) + + // rename + rename = st_local("rename") + if (rename!="") { + coefplot_rename(rename, st_local("renameregex")!="", + eqnm, coefnm) // modifies coefnm + } + + // rename equations and swap names + if (st_local("asequation2")!="") { + eqnm = J(r, 1, st_local("asequation2")) + } + rename = st_local("eqrename") + if (rename!="") { + coefplot_rename(rename, st_local("eqrenameregex")!="", + J(r, 1, ""), eqnm) // modifies eqnm + } + + // at is coef or eq + if (st_local("atname2")=="_coef") { + at = strtoreal(coefnm) + } + else if (st_local("atname2")=="_eq") { + at = strtoreal(eqnm) + } + + // apply at transform + if ((attrans=st_local("attransform"))!="") { + attmp = st_tempname() + attrans = subinstr(attrans, "@", "scalar(" + attmp + ")") + for (i=1; i<=r; i++) { + st_numscalar(attmp, at[i]) + stata("scalar " + attmp + " = " + attrans) + at[i] = st_numscalar(attmp) + } + } + + // check missings + coefplot_missing(model, b) + for (j=1; j<=cols(levels); j++) { + coefplot_cimissing(model, j, ci[|1,(j*2-1) \ .,(j*2)|]) + } + coefplot_atmissing(model, at) + + // return + if (st_local("swapnames")!="") swap(coefnm, eqnm) + st_local("n_ci", strofreal(cols(ci)/2)) + if (cols(C.ci)>cols(ci)) { + ci = ci, J(rows(ci), cols(C.ci)-cols(ci), .) + } + else if (cols(C.ci)cols(aux)) { + aux = aux, J(rows(aux), cols(C.aux)-cols(aux), .) + } + else if (cols(C.aux)rows(b)) | (col<. & col>cols(b))) { + printf("{err}%s: invalid subscript for %s\n", model, opt) + exit(503) + } +} + +void coefplot_parse_input(string scalar model, string scalar opt, + string scalar s, real scalar row, real scalar col) +{ + transmorphic t + string scalar r, c + string rowvector tokens + + t = tokeninit(" ", ("[", "]", ",")) + tokenset(t, s) + tokens = tokengetall(t) + if (cols(tokens)>6) coefplot_parse_input_error(model, opt) + if (!st_isname(tokens[1])) coefplot_parse_input_error(model, opt) + s = tokens[1] + if (cols(tokens)==1) { // "name" + row = 1; col = . + return + } + if (cols(tokens)<4) coefplot_parse_input_error(model, opt) + if (tokens[2]!="[" | tokens[cols(tokens)]!="]") + coefplot_parse_input_error(model, opt) + if (cols(tokens)==4) { // name[#] + r = tokens[3] + c = "." + } + else if ((r=tokens[3])==",") { // name[,#] + if (cols(tokens)!=5) coefplot_parse_input_error(model, opt) + r = "." + c = tokens[4] + } + else { // name[#,] or name[#,.] or name[.,#] + if (tokens[4]!=",") coefplot_parse_input_error(model, opt) + r = tokens[3] + if (cols(tokens)==5) c = "." + else c = tokens[5] + } + if (((r==".") + (c=="."))!=1) coefplot_parse_input_error(model, opt) + if (r==".") row = . + else row = coefplot_parse_input_num(model,opt, r) + if (c==".") col = . + else col = coefplot_parse_input_num(model,opt, c) +} + +real scalar coefplot_parse_input_num(string scalar model, string scalar opt, + string scalar s) +{ + real scalar num + + num = strtoreal(s) + if (missing(num)) coefplot_parse_input_error(model, opt) + return(num) +} + +void coefplot_parse_input_error(string scalar model, string scalar opt) +{ + printf("{err}%s: invalid syntax in %s()\n", model, opt) + exit(198) +} + +void coefplot_parse_cname(string scalar model, string scalar opt, + string scalar cname, real scalar row, real scalar col, real scalar emode, + real scalar atismat, string scalar bname, real scalar brow, real scalar bcol) +{ + if (emode) { + coefplot_parse_input(model, opt, cname, row, col) + if (!atismat) cname = "e(" + cname + ")" + } + else { + if (coefplot_parse_input_isposint(cname)) { + row = brow; col = bcol + if (row<.) row = strtoreal(cname) + else col = strtoreal(cname) + cname = bname + } + else coefplot_parse_input(model, opt, cname, row, col) + } +} + +real scalar coefplot_parse_input_isposint(string scalar cname) +{ + real scalar n + + n = strtoreal(cname) + return(!missing(n) & n==trunc(n) & n>0) +} + +real scalar coefplot_notfound( + string scalar model, string scalar name, real matrix e, | real scalar q) +{ + if (e==J(0,0,.)) { + if (args()<4 | q==0) { + printf("{txt}(%s: %s not found)\n", model, name) + } + return(1) + } + return(0) +} + +real scalar coefplot_notconformable( + string scalar model, string scalar name, real matrix e, + real scalar r, real scalar c, | real scalar q) +{ + if (rows(e)!=r | cols(e)!=c) { + if (args()<6 |q==0) { + printf("{txt}(%s: %s not conformable)\n", model, name) + } + return(1) + } + return(0) +} + +void coefplot_eform(real colvector b, real matrix ci, string colvector eq, + string colvector coef) +{ + real scalar i + string matrix eform + real colvector match, p + + eform = st_local("eform2") + if (eform=="") { + return + } + if (eform=="*") { + b = exp(b) + ci = exp(ci) + return + } + eform = coefplot_parse_namelist(eform, "*", "eform") + match = J(rows(b), 1, 0) + for (i=1; i<=rows(eform); i++) { + p = select(1::rows(match), strmatch(eq, eform[i,1]) :& + strmatch(coef, eform[i,2])) + if (rows(p)==0) continue + match[p] = J(rows(p), 1, 1) + } + p = select(1::rows(match), match) + if (rows(p)==0) return + b[p] = exp(b[p]) + ci[p,] = exp(ci[p,.]) +} + +void coefplot_rescale(real colvector b, real matrix ci, string colvector eq, + string colvector coef) +{ + real scalar i, j + string matrix rescale, names + real colvector c, match, p + + rescale = st_local("rescale") + if (rescale=="") { + return + } + if (strtoreal(rescale)<.) { + c = strtoreal(rescale) + } + else { + rescale = coefplot_parse_matchlist(rescale, "rescale") + c = J(rows(b), 1, 1) + match = J(rows(b), 1, 0) + for (i=1; i<=rows(rescale); i++) { + if (strtoreal(rescale[i,2])>=.) { + display("{err}rescale(): invalid value") + exit(198) + } + names = coefplot_parse_namelist(rescale[i,1], "*", "rescale") + for (j=1; j<=rows(names); j++) { + p = select(1::rows(match), strmatch(eq, names[j,1]) :& + strmatch(coef, names[j,2]) :& (match:==0)) + if (rows(p)==0) continue + c[p] = J(rows(p), 1, strtoreal(rescale[i,2])) + match[p] = J(rows(p), 1, 1) + } + } + } + b = b :* c + ci = ci :* c +} + +string colvector coefplot_collect_transforms(string colvector eq, + string colvector coef) +{ + real scalar i, j + string matrix trans, names + string colvector T + real colvector match, p + + T = J(rows(eq), 1, "") + trans = st_local("transform") + if (trans=="") { + return(T) + } + trans = coefplot_parse_matchlist(trans, "transform") + match = J(rows(eq), 1, 0) + for (i=1; i<=rows(trans); i++) { + names = coefplot_parse_namelist(trans[i,1], "*", "transform") + for (j=1; j<=rows(names); j++) { + p = select(1::rows(match), strmatch(eq, names[j,1]) :& + strmatch(coef, names[j,2]) :& (match:==0)) + if (rows(p)==0) continue + T[p] = J(rows(p), 1, trans[i,2]) + match[p] = J(rows(p), 1, 1) + } + } + return(T) +} + +void coefplot_mlabels(string colvector mlbl, real colvector mlpos, + string colvector eq, string colvector coef) +{ + real scalar i, j + string matrix mlab, names + real colvector match, p + + mlab = st_local("mlabels2") + if (mlab=="") { + return + } + mlab = coefplot_parse_matchlist(mlab, "mlabels", 2) + match = J(rows(eq), 1, 0) + for (i=1; i<=rows(mlab); i++) { + names = coefplot_parse_namelist(mlab[i,1], "*", "mlabels") + for (j=1; j<=rows(names); j++) { + p = select(1::rows(match), strmatch(eq, names[j,1]) :& + strmatch(coef, names[j,2]) :& (match:==0)) + if (rows(p)==0) continue + mlbl[p] = J(rows(p), 1, mlab[i,3]) + mlpos[p] = J(rows(p), 1, strtoreal(mlab[i,2])) + match[p] = J(rows(p), 1, 1) + } + } +} + +void coefplot_rename(string scalar rename, real scalar regex, + string colvector eq, string colvector coef) +{ + real scalar i, j, rl + real colvector p, p0, match + string matrix names + + if (regex) { + rename = coefplot_parse_matchlist(rename, "rename") + p0 = 1::rows(coef) + for (i=1; i<=rows(rename); i++) { + names = coefplot_parse_namelist(rename[i,1], "*", "rename") + for (j=1; j<=rows(names); j++) { + if (rows(p0)==0) return + match = (strmatch(eq[p0], names[j,1]) :& regexm(coef[p0], names[j,2])) + p = select(p0, match) + if (rows(p)==0) continue + coef[p] = coefplot_regexr(coef[p], names[j,2], rename[i,2]) + p0 = select(p0, match:==0) + } + } + return + } + rename = coefplot_parse_matchlist(rename, "rename") + p0 = 1::rows(coef) + for (i=1; i<=rows(rename); i++) { + // syntax: *abc for suffix rename + // abc* for prefix rename + // abc for exact rename + names = coefplot_parse_namelist(rename[i,1], "*", "rename") + for (j=1; j<=rows(names); j++) { + if (rows(p0)==0) return + if (substr(names[j,2],1,1)=="*") { + rl = strlen(names[j,2])-1 + match = strmatch(eq[p0], names[j,1]) :& + (substr(coef[p0], -rl, .):==substr(names[j,2], -rl, .)) + p = select(p0, match) + if (rows(p)==0) continue + coef[p] = substr(coef[p], 1, strlen(coef[p]) :- rl) :+ rename[i,2] + } + else if (substr(names[j,2],-1,1)=="*") { + rl = strlen(names[j,2])-1 + match = strmatch(eq[p0], names[j,1]) :& + (substr(coef[p0], 1, rl):==substr(names[j,2], 1, rl)) + p = select(p0, match) + if (rows(p)==0) continue + coef[p] = rename[i,2] :+ substr(coef[p], 1 + rl, .) + } + else { + match = strmatch(eq[p0], names[j,1]) :& (coef[p0]:==names[j,2]) + p = select(p0, match) + if (rows(p)==0) continue + coef[p] = J(rows(p), 1, rename[i,2]) + } + p0 = select(p0, match:==0) + } + } +} + +string matrix coefplot_regexr(string matrix x, string matrix y, string matrix z) +{ + string matrix res + real scalar r, R, c, C + transmorphic scalar rx, cx, ry, cy, rz, cz + + R = max((rows(x),rows(y),rows(z))) + C = max((cols(x),cols(y),cols(z))) + rx = (rows(x)==1 ? &1 : (rows(x)cols(atstats)) continue + j = atstats[pos] + p = select(1..c, strmatch(coefnm, strofreal(i) + "._at*")') + at[p] = J(1, cols(p), at0[i,j]) + } + return(at) +} + +void coefplot_missing(string scalar model, real colvector b) +{ + if (hasmissing(b)) { + printf("{txt}(%s: b missing for some coefficients)\n", model) + } +} + +void coefplot_atmissing(string scalar model, real colvector at) +{ + if (hasmissing(at)) { + if (nonmissing(at)) { + printf("{txt}(%s: 'at' missing for some coefficients)\n", model) + return + } + printf("{txt}(%s: could not determine 'at')\n", model) + } +} + +void coefplot_cimissing(string scalar model, real scalar ci, real matrix tmp) +{ + if (hasmissing(tmp)) { + if (nonmissing(tmp)) { + printf("{txt}(%s: CI%g missing for some coefficients)\n", + model, ci) + return + } + printf("{txt}(%s: could not determine CI%g)\n", model, ci) + } +} + +void coefplot_add_label(struct coefplot_struct scalar C, + string scalar name, real scalar i, string scalar lbl, real scalar force) +{ + pointer(string colvector) scalar l + + if (name=="plot") l = &C.plotlbl + else if (name=="by") l = &C.bylbl + else if (name=="coef") l = &C.coeflbl + else return + if (rows(*l)1) c = strtoreal(s[2]) + else c = 1 + s = s[1] + // get relevant data + p = p0 + if (subgr<.) { + p = select(p, C.by[p]:==subgr) + if (rows(p)==0) return("") + } + if (plot<.) { + p = select(p, C.plot[p]:==plot) + if (rows(p)==0) return("") + } + if (s=="b") v = C.b[p] + else if (s=="v") v = C.V[p] + else if (s=="se") v = C.se[p] + else if (s=="t") v = C.t[p] + else if (s=="tabs") v = abs(C.t[p]) + else if (s=="df") v = C.df[p] + else if (s=="p") v = C.pval[p] + else if (s=="ll") { + if ((c*2)>cols(C.ci)) return("") + v = C.ci[p,c*2-1] + } + else if (s=="ul") { + if ((c*2)>cols(C.ci)) return("") + v = C.ci[p,c*2] + } + else if (s=="aux") { + if (c>cols(C.aux)) return("") + v = C.aux[p,c] + } + // establish sort order (within equations) + if (desc) v = -v + eqs = C.eqnm[p] + eq = J(rows(eqs),1,1) + for (i=2; i<=rows(eq); i++) { + if (eqs[i]!=eqs[i-1]) eq[i] = eq[i-1]+1 + else eq[i] = eq[i-1] + } + p = sort((eq, v, p), 1..3)[,3] + // collect ordered names + eqs = ("`"+`"""') :+ C.eqnm[p] :+ (`"""' + "'") + coefs = ("`"+`"""') :+ C.coefnm[p] :+ (`"""' + "'") + s = eqs[1] + ": " + coefs[1] + for (i=2; i<=rows(p); i++) { + if (eqs[i]!=eqs[i-1]) s = s + " * " + eqs[i] + ":" + s = s + " " + coefs[i] + } + s = s + " *" + return(s) +} + +void coefplot_order(real colvector p0, string scalar order, + struct coefplot_struct scalar C) +{ + real scalar i, j, k + real colvector p, tag, tmp + string colvector eqs + + if (order=="") return + order = coefplot_parse_namelist(order, "", "order") + if (order[1,1]=="" & !allof(order[,1], "")) { + display("{err}inconsistent order(): " + + "specify equations for all or for none") + exit(198) + } + p = J(0,1,.) + tag = J(C.r, 1, 0) + k = 0 + // order coefficients (general) + if (order[1,1]!="") { + for (i=1; i<=rows(order); i++) { + k++ + if (order[i,2]==".") { + coefplot_appendemptyrow(C, order[i,1], tag, p0) + p = p \ C.r + continue + } + tag = tag :+ (k * strmatch(C.coefnm[p0], order[i,2]) :* !tag :* + (C.eqnm[p0]:==order[i,1])) + if (anyof(tag, k)) { + p = p \ select(p0, tag:==k) + } + } + if (anyof(tag, 0)) { + p = p \ select(p0, tag:==0) + } + swap(p,p0); return + } + // order coefficients within equations + order = order[,2]' + eqs = C.eqnm[p0[1]] + for (i=2; i<=C.r; i++) { // get equations (in right order) + if (C.eqnm[p0[i]]!=C.eqnm[p0[i-1]]) { + eqs = eqs \ C.eqnm[p0[i]] + } + } + for (j=1; j<=rows(eqs); j++) { + for (i=1; i<=cols(order); i++) { + k++ + if (order[i]==".") { + coefplot_appendemptyrow(C, eqs[j], tag, p0) + p = p \ C.r + continue + } + tag = tag :+ (k * strmatch(C.coefnm[p0], order[i]) :* !tag :* + (C.eqnm[p0]:==eqs[j])) + if (anyof(tag, k)) { + p = p \ select(p0, tag:==k) + } + } + tmp = (tag:==0):&(C.eqnm[p0]:==eqs[j]) + if (any(tmp)) { + p = p \ select(p0, tmp) + tag = tag :+ tmp + } + } + swap(p,p0); return +} + +void coefplot_appendemptyrow(struct coefplot_struct scalar C, + string scalar eq, real colvector tag, real colvector p0) +{ + C.b = C.b \ . + C.V = C.V \ . + C.se = C.se \ . + C.t = C.t \ . + C.df = C.df \ . + C.pval = C.pval \ . + C.at = C.at \ 1 + C.aux = C.aux \ J(1, cols(C.aux), .) + C.mlbl = C.mlbl \ "" + C.mlpos = C.mlpos \ . + C.plot = C.plot \ . + C.by = C.by \ . + C.ci = C.ci \ J(1, cols(C.ci), .) + C.coefnm = C.coefnm \ "" + C.eqnm = C.eqnm \ eq + C.trans = C.trans \ "" + tag = tag \ . + C.r = C.r + 1 + p0 = p0 \ C.r +} + +void coefplot_coeflbls(struct coefplot_struct scalar C) +{ + real scalar i, j, k + real colvector tag + string matrix labels, names + + C.coeflbl = J(C.r, 1, "") + tag = J(C.r, 1, 0) + labels = coefplot_parse_matchlist(st_local("coeflabels"), "coeflabels") + for (j=1; j<=rows(labels); j++) { + names = coefplot_parse_namelist(labels[j,1], "*", "coeflabels") + for (k=1; k<=rows(names); k++) { + tag = tag :+ j * strmatch(C.eqnm, names[k,1]) :* + strmatch(C.coefnm, names[k,2]) :* (tag:==0) + } + } + _editmissing(tag, 0) + for (i=1; i<=C.r; i++) { + if (tag[i] & C.plot[i]<.) { // C.plot is missing for gaps from order() + C.coeflbl[i] = labels[tag[i],2] + } + } +} + +void coefplot_multiple_eqs(struct coefplot_struct scalar C) +{ + real scalar i, meqs + string matrix eq + + eq = C.eqnm, strofreal(C.plot), strofreal(C.by) + + _sort(eq, (3,2,1)) + meqs = 0 + for (i=2; i<=rows(eq); i++) { + if (eq[i,1]!=eq[i-1,1] & eq[i,2]==eq[i-1,2] & eq[i,3]==eq[i-1,3]) { + meqs = 1 + break + } + } + if (meqs==0) { + C.eqnm = J(rows(C.eqnm), 1, "_") + } + st_local("meqs", strofreal(meqs)) +} + +void coefplot_bycoefs(struct coefplot_struct scalar C) +{ + real scalar i, j, k, meqs + real colvector p + string scalar eql + string colvector bylbl, last, eqlbls + + meqs = (st_local("meqs")!="0") & (st_local("noeqlabels")=="") + if (meqs) eqlbls = tokens(st_local("eqlabels"))' + bylbl = C.bylbl + C.bylbl = C.coeflbl + swap(C.at, C.by) + j = 0 + k = 1 + for (i=1; i<=C.r; i++) { + if (i>1) { + if (C.eqnm[i]!=C.eqnm[i-1]) k++ + } + if ((C.eqnm[i], C.coefnm[i])!=last) j++ + last = (C.eqnm[i], C.coefnm[i]) + C.by[i] = j + if (meqs) { + if (k<=rows(eqlbls)) eql = eqlbls[k] + else eql = C.eqnm[i] + C.bylbl[j] = eql + ": " + C.bylbl[i] + } + else C.bylbl[j] = C.bylbl[i] + } + C.bylbl = C.bylbl[|1 \ j|] + C.eqnm = J(C.r, 1, "_") + C.coefnm = strofreal(C.at) + C.coeflbl = J(C.r, 1, "") + for (i=1; i<=C.r; i++) { + C.coeflbl[i] = bylbl[C.at[i]] + } + C.at = J(C.r, 1, 1) + p = coefplot_niceorder(C.eqnm, C.coefnm, C.plot, C.by, C.r) + C.b = C.b[p] + C.V = C.V[p] + C.se = C.se[p] + C.t = C.t[p] + C.df = C.df[p] + C.pval = C.pval[p] + C.at = C.at[p] + C.aux = C.aux[p,] + C.mlbl = C.mlbl[p] + C.mlpos = C.mlpos[p] + C.plot = C.plot[p] + C.by = C.by[p] + C.ci = C.ci[p,] + C.coefnm = C.coefnm[p] + C.eqnm = C.eqnm[p] + C.trans = C.trans[p] + C.coeflbl = C.coeflbl[p] + st_local("n_subgr", strofreal(rows(C.bylbl))) +} + +void coefplot_catvals(struct coefplot_struct scalar C) +{ + real scalar i, j, k, e, eqgap, ggap + real colvector pos, at0 + string scalar glbls, xlab + string colvector eqs + string matrix groups, names + + // determine plot positions + for (i=2; i<=C.r; i++ ) { + C.at[i] = C.at[i-1] + + ((C.coefnm[i]!=C.coefnm[i-1]) // new coefficient + | (C.eqnm[i]!=C.eqnm[i-1]) // new equation + | (C.plot[i]==.)) // gap from order() + } + + // reposition + coefplot_relocate(C) + C.xmin = min(C.at) + + // equation numbers and labels + eqs = tokens(st_local("eqlabels"))' + xlab = st_local("eqxlab") + C.eq = J(C.r, 1, 1) + C.eqlbl = J(C.r, 1, "") + j = 1 + if (j>rows(eqs)) C.eqlbl[j] = coefplot_get_xlab(C.eqnm[1], xlab) + else C.eqlbl[j] = eqs[j] + for (i=2; i<=C.r; i++ ) { + C.eq[i] = C.eq[i-1] + if (C.eqnm[i]!=C.eqnm[i-1]) { + j++ + if (j>rows(eqs)) C.eqlbl[j] = coefplot_get_xlab(C.eqnm[i], xlab) + else C.eqlbl[j] = eqs[j] + C.eq[i] = C.eq[i] + 1 + } + } + C.eqlbl = C.eqlbl[|1,1 \ j,.|] + + // group IDs + groups = st_local("groups") + C.grp = J(C.r, 1, 0) + if (groups!="") { + groups = coefplot_parse_matchlist(groups, "groups") + eqs = uniqrows(C.eqnm) + for (e=1; e<=rows(eqs); e++) { + for (j=1; j<=rows(groups); j++) { + names = coefplot_parse_namelist(groups[j,1], "*", "groups") + for (k=1; k<=rows(names); k++) { + C.grp = C.grp :+ j * strmatch(C.eqnm, names[k,1]) :* + strmatch(C.coefnm, names[k,2]) :* (C.grp:==0) :* + (C.eqnm:==eqs[e]) :* (C.plot:<.) + } + for (i=1; i<=C.r; i++) { + pos = select(1::C.r, (C.grp:==j) :& (C.eqnm:==eqs[e])) + if (length(pos)>0) { + C.grp[|pos[1] \ pos[rows(pos)]|] = + J((pos[rows(pos)]-pos[1]+1), 1, j) + } + } + } + } + for (j=1; j<=rows(groups); j++) { + glbls = glbls + " " + "`" + `"""' + groups[j,2] + `"""' + "'" + } + st_local("groups", strtrim(glbls)) + } + + // add gaps between equations and groups + eqgap = strtoreal(st_local("eqgap")) + ggap = strtoreal(st_local("ggap")) + if (eqgap==0 & ggap==0) return + at0 = C.at + for (i=2; i<=C.r; i++ ) { + C.at[i] = C.at[i-1] + + (at0[i,1] - at0[i-1,1]) + // update downstream + (C.eqnm[i]!=C.eqnm[i-1])*eqgap + // new eq + (C.eqnm[i]==C.eqnm[i-1] & C.grp[i]!=C.grp[i-1])*ggap // new group + } +} + +string scalar coefplot_get_xlab(string scalar v, string scalar interact) +{ + if (interact=="") return(v) + stata("compile_xlabel, v(" + v + ") interact(" + interact + ")") + return(st_local("coeflbl")) +} + +void coefplot_relocate(struct coefplot_struct scalar C) +{ + real scalar i, j, k + real colvector tag, p + string matrix pos, names + + if (st_local("relocate")=="") return + + // set positions + tag = J(C.r, 1, 0) + pos = coefplot_parse_matchlist(st_local("relocate"), "relocate") + for (j=1; j<=rows(pos); j++) { + names = coefplot_parse_namelist(pos[j,1], "*", "relocate") + for (k=1; k<=rows(names); k++) { + tag = tag :+ j * strmatch(C.eqnm, names[k,1]) :* + strmatch(C.coefnm, names[k,2]) :* (tag:==0) + } + } + _editmissing(tag, 0) + p = strtoreal(pos[,2]) + for (i=1; i<=C.r; i++) { + if (tag[i] & (C.plot[i]<.)) { + C.at[i] = p[tag[i]] + } + } + + // reorder + p = order((C.at, (1::C.r)), (1,2)) + C.b = C.b[p] + C.V = C.V[p] + C.se = C.se[p] + C.t = C.t[p] + C.df = C.df[p] + C.pval = C.pval[p] + C.at = C.at[p] + C.aux = C.aux[p,] + C.mlbl = C.mlbl[p] + C.mlpos = C.mlpos[p] + C.plot = C.plot[p] + C.by = C.by[p] + C.ci = C.ci[p,] + C.coefnm = C.coefnm[p] + C.eqnm = C.eqnm[p] + C.trans = C.trans[p] + C.coeflbl = C.coeflbl[p] +} + +void coefplot_headings(struct coefplot_struct scalar C) +{ + real scalar i, j, k, off, gap, fskip + real colvector tag + string scalar hlbls + string matrix headings, names + + off = strtoreal(st_local("hoff")) + gap = strtoreal(st_local("hgap")) + + // if headings are equations + if (st_local("eqashead")!="") { + fskip = C.at[1] + j = 0 + for (i=1; i<=C.r; i++) { + if (C.eq[i]!=j) { + C.at[|i \ .|] = C.at[|i \ .|] :+ 1 :+ (gap*(i>1)) + hlbls = hlbls + " " + strofreal(C.at[i] - fskip + off) + } + j = C.eq[i] + } + st_local("hlbls", strtrim(hlbls)) + return + } + + // if headings are not equations + headings = st_local("headings") + if (headings=="") { + st_local("hlbls", "") + return + } + tag = J(C.r, 1, 0) + if (C.r>1) { + tag[|2 \ .|] = tag[|2 \ .|] :+ + (C.at[|2 \ .|]:==C.at[|1 \ C.r-1|]) + _editvalue(tag, 1, .) + } + headings = coefplot_parse_matchlist(headings, "headings") + for (j=1; j<=rows(headings); j++) { + names = coefplot_parse_namelist(headings[j,1], "*", "headings") + for (k=1; k<=rows(names); k++) { + tag = tag :+ j * strmatch(C.eqnm,names[k,1]) :* + strmatch(C.coefnm, names[k,2]) :* (tag:==0) + } + } + _editmissing(tag, 0) + for (i=1; i<=C.r; i++) { + if (tag[i] & (C.plot[i]<.)) { + C.at[|i \ .|] = C.at[|i \ .|] :+ 1 :+ (gap*(i>1)) + hlbls = hlbls + " " + strofreal(C.at[i] - 1 + off) + " " + + "`" + `"""' + headings[tag[i],2] + `"""' + "'" + } + } + st_local("hlbls", strtrim(hlbls)) +} + +string matrix coefplot_parse_matchlist(string scalar s, string scalar opt, + | real scalar nc0) +{ + real scalar c, a, b, j, i, nc + real rowvector eqpos + string rowvector stok + string matrix res + + if (args()==3) nc = nc0 + else nc = 1 + if (strtrim(s)=="") return(J(0, 1+nc, "")) + stok = _coefplot_parse_matchlist_stok(s) + c = cols(stok) + a = b = i = 1 + eqpos = select(1::c, stok':=="=")' + res = J(cols(eqpos), 1+nc, "") + for (j=1; j<=cols(eqpos); j++) { + b = eqpos[j] + if (b==a | b==c | (b+nc)>cols(stok)) { + printf("{err}%s(): invalid matchlist\n", opt) + exit(198) + } + res[i,] = (invtokens(stok[|a \ b-1|]), stok[|b+1 \ b+nc|]) + a = b + 1 + nc + i++ + } + if (a<=c) { + printf("{err}%s(): invalid matchlist\n", opt) + exit(198) + } + nc = nc + 1 + for (i=1; i<=rows(res); i++) { // strip quotes in last column + if (substr(res[i,nc], 1, 1)==`"""') { + res[i,nc] = substr(res[i,nc], 2, strlen(res[i,nc])-2) + } + else if (substr(res[i,nc], 1, 2)=="`" + `"""') { + res[i,nc] = substr(res[i,nc], 3, strlen(res[i,nc])-4) + } + } + return(res) +} + +string rowvector _coefplot_parse_matchlist_stok(string scalar s) +{ + real scalar i, c, j + string scalar tok, t0 + string rowvector pchars, stok + transmorphic t + + // Step 1: split input at blanks and equal signs while binding on quotes + // and parentheses + pchars = (" ", "=") + t = tokeninit("", pchars, (`""""', `"`""'"', "()")) + tokenset(t, s) + stok = tokengetall(t) + // Step 2: remove blanks and merge "(...)" with surrounding tokens (unless + // blank or equal sign) + c = cols(stok) + j = c + 1 + for (i=c; i; i--) { + tok = stok[i] + if (tok==" ") continue // remove blanks + if (i>1) { + if (substr(tok,1,1)=="(") { + if (!anyof(pchars, stok[i-1])) { + stok[i-1] = stok[i-1] + tok + continue + } + } + else if (tok!="=") { + if (substr(stok[i-1],-1,1)==")") { + stok[i-1] = stok[i-1] + tok + continue + } + } + } + stok[--j] = tok + } + // return result + if (j>c) return(J(1,0,"")) + return(stok[|j \ .|]) +} + +string matrix coefplot_parse_namelist(string scalar s, string scalar defeq, + string scalar opt) +{ + transmorphic t + real scalar i, c + string rowvector stok + string matrix res + + if (s=="") return(J(0,2,"")) + t = tokeninit(" ", ":") + tokenset(t, s) + stok = tokengetall(t) + c = cols(stok) + for (i=1; i<=c; i++) { // strip quotes + if (substr(stok[i], 1, 1)==`"""') { + stok[i] = substr(stok[i], 2, strlen(stok[i])-2) + } + else if (substr(stok[i], 1, 2)=="`" + `"""') { + stok[i] = substr(stok[i], 3, strlen(stok[i])-4) + } + } + res = J(0, 2, "") + for (i=1;i<=c;i++) { + if (i+1<=c) { + if (stok[i+1]==":") { + if (stok[i]==":") { + printf("{err}%s(): invalid namelist\n", opt) + exit(198) + } + if (i+2<=c) { + if (stok[i+2]==":") { + printf("{err}%s(): invalid namelist\n", opt) + exit(198) + } + if (i+3<=c) { + if (stok[i+3]==":") { // "... eq: eq: ..." + res = res \ (stok[i], "*") + i++ + continue + } + } + res = res \ (stok[i], stok[i+2]) // "... eq:name ..." + defeq = stok[i] + i = i + 2 + continue + } + res = res \ (stok[i], "*") // "... eq:" + i++ + continue + } + } + if (stok[i]==":") { + printf("{err}%s(): invalid namelist\n", opt) + exit(198) + } + res = res \ (defeq, stok[i]) + } + return(res) +} + +void coefplot_put(struct coefplot_struct scalar C) +{ + real scalar i + real rowvector vi + + st_store((1,C.r), st_local("b"), C.b) + st_store((1,C.r), st_local("V"), C.V) + st_store((1,C.r), st_local("se"), C.se) + st_store((1,C.r), st_local("t"), C.t) + st_store((1,C.r), st_local("df"), C.df) + st_store((1,C.r), st_local("pval"), C.pval) + st_store((1,C.r), st_local("at"), C.at) + st_sstore((1,C.r), st_local("mlbl"), C.mlbl) + st_store((1,C.r), st_local("mlpos"), C.mlpos) + st_store((1,C.r), st_local("plot"), C.plot) + st_store((1,C.r), st_local("by"), C.by) + st_store((1,C.r), st_local("eq"), C.eq) + st_store((1,C.r), st_local("grp"), C.grp) + vi = J(1, cols(C.ci), .) + for (i=1; i<=(cols(vi)/2); i++) { + vi[1,i*2-1] = st_varindex(st_local("ll" + strofreal(i))) + vi[1,i*2] = st_varindex(st_local("ul" + strofreal(i))) + } + st_store((1,C.r), vi, C.ci) + vi = J(1, cols(C.aux), .) + for (i=1; i<=cols(vi); i++) { + vi[1,i] = st_varindex(st_local("aux" + strofreal(i))) + } + st_store((1,C.r), vi, C.aux) +} + +void coefplot_at_unique(struct coefplot_struct scalar C) +{ + assert(rows(uniqrows((C.by,C.at)))==C.r) +} + +void coefplot_apply_transform(struct coefplot_struct scalar C) +{ + real scalar i, j, rc, x0, x1, mis + string scalar trans0, trans, trans1 + string rowvector vname + real rowvector vpos + + if (allof(C.trans[i], "")) return + vname = J(1, cols(C.ci), "") + for (j=1; j<=(cols(C.ci)/2); j++) { + vname[j*2-1] = st_local("ll"+strofreal(j)) + vname[j*2] = st_local("ul"+strofreal(j)) + } + vname = st_local("b"), vname + vpos = st_varindex(vname) + mis = 0 + for (i=1; i<=C.r; i++) { + if (C.trans[i]=="") continue + if (C.trans[i]!=trans0) trans0 = C.trans[i] + trans = _coefplot_inject_temvars(trans0, cols(C.ci), cols(C.aux)) + for (j=1; j<=cols(vname); j++) { + trans1 = subinstr(trans, "@", vname[j]) + x0 = _st_data(i, vpos[j]) + rc = _stata("replace " + vname[j] + " = " + trans1 + " in " + strofreal(i), 1) + if (rc) { + printf("{err}transform '%s' invalid\n", trans0) + exit(198) + } + x1 = _st_data(i, vpos[j]) + if (x1>=. & x1!=x0) mis = 1 + } + } + if (mis) { + display("{txt}(transform missing for some coefficients or CIs)") + } +} + +void coefplot_lbl_is_multiline() +{ + string scalar lbl + + lbl = strtrim(st_local("lbl")) + if ((substr(lbl, 1, 1)==`"""' | substr(lbl, 1, 2)==("`" + `"""'))) { + return(499) + } +} + +void coefplot_get_coefs(struct coefplot_struct scalar C) +{ + st_local("coefs", + invtokens("`" :+ `"""' :+ C.coefnm' :+ `"""' :+ "'")) +} + +void coefplot_get_coeflbl(struct coefplot_struct scalar C, real scalar i) +{ + st_local("coeflbl", C.coeflbl[i]) +} + +void coefplot_get_plotlbl(struct coefplot_struct scalar C, real scalar i) +{ + st_local("plotlbl", C.plotlbl[i]) +} + +void coefplot_get_bylbl(struct coefplot_struct scalar C, real scalar i) +{ + st_local("bylbl", C.bylbl[i]) +} + +void coefplot_get_eqlbl(struct coefplot_struct scalar C, real scalar i) +{ + st_local("eqlbl", C.eqlbl[i]) +} + +void coefplot_ticks_and_labels(struct coefplot_struct scalar C) +{ + real scalar i, between + string scalar labels, grid, space + + between = (st_local("grid")=="between") + st_local("xrange", strofreal(C.xmin-0.5) + " " + + strofreal(max(C.at)+0.5)) + if (between & C.plot[1]<.) { + if (C.xmin1) { + if ((C.eqnm[i], C.coefnm[i])==(C.eqnm[i-1], C.coefnm[i-1])) { + continue + } + if (between & C.plot[i-1]<.) { + grid = grid + space + + strofreal(C.at[i-1] + min((0.5, (C.at[i]-C.at[i-1])/2))) + } + } + if (C.plot[i]<.) { + labels = labels + space + strofreal(C.at[i]) + + " `" + `"""' + C.coeflbl[i] + `"""' + "'" + if (between==0) grid = grid + space + strofreal(C.at[i]) + else if (i>1) { + grid = grid + space + + strofreal(C.at[i] - min((0.5, (C.at[i]-C.at[i-1])/2))) + } + space = " " + } + } + st_local("xlabels", labels) + st_local("xgrid", grid) +} + +void coefplot_combine_ciopts() +{ + string scalar opt, p + + opt = st_local("opt") + p = substr(opt, 2, strpos(opt,"(")-2) // get # from "p#(...)" + if (strtoreal(p)<.) { + opt = substr(opt, 3+strlen(p), strlen(opt)-3-strlen(p)) // get contents + st_local("opt_"+p, st_local("opt_"+p) + " " + opt) + } + else { + st_local("options", st_local("options") + " " + opt) + } +} + +void coefplot_parsecomma(string scalar lhs, string scalar rhs, string scalar lin) +{ + transmorphic t + string scalar l, r, token + + t = tokeninit("", ",", (`""""', `"`""'"'), 0, 0) + tokenset(t, st_local(lin)) + while ((token = tokenget(t))!="") { + if (token==",") { + r = token + while ((token = tokenget(t))!="") { + r = r + token + } + st_local(lhs, l) + st_local(rhs, r) + return + } + l = l + token + } + st_local(lhs, l) + st_local(rhs, r) +} + +void coefplot_inject_temvars(string scalar s, real scalar nci, + real scalar naux) +{ + st_local(s, _coefplot_inject_temvars(st_local(s), nci, naux)) +} +string scalar _coefplot_inject_temvars(string scalar s, real scalar nci, + real scalar naux) +{ + real scalar i + string scalar v, vname + string rowvector nspace + + nspace = J(1, nci*2 + naux, "") + for (i=1; i<=nci; i++) { + nspace[i*2-1] = "ll" + strofreal(i) + nspace[i*2] = "ul" + strofreal(i) + } + for (i=1; i<=naux; i++) { + nspace[nci*2+i] = "aux" + strofreal(i) + } + nspace = nspace, ("b", "V", "se", "t", "df", "pval", "at", "mlbl", "mlpos", "plot", "by") + while (regexm(s,"@[Va-z]+[0-9]*")) { + v = regexs() + vname = substr(v, 2, .) + if (vname=="ll" | vname=="ul" | vname=="aux") { + vname = vname + "1" + } + if (anyof(nspace, vname)==0) { + printf("{err}%s not found\n", v) + exit(error(111)) + } + s = subinstr(s, v, st_local(vname), 1) + } + return(s) +} + +end + + + diff --git a/110/replication_package/replication/ado/plus/c/coefplot.sthlp b/110/replication_package/replication/ado/plus/c/coefplot.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..4e4af83bb251fba96858733f8e7553bf633dffd0 --- /dev/null +++ b/110/replication_package/replication/ado/plus/c/coefplot.sthlp @@ -0,0 +1,1817 @@ +{smcl} +{* *! version 1.5.2 21apr2022 Ben Jann}{...} +{vieweralsosee "[G-2] graph" "help graph"}{...} +{vieweralsosee "[R] estimates" "help estimates"}{...} +{vieweralsosee "[R] marginsplot" "help marginsplot"}{...} +{vieweralsosee "[R] margins" "help margins"}{...} +{viewerjumpto "Syntax" "coefplot##syntax"}{...} +{viewerjumpto "Description" "coefplot##description"}{...} +{viewerjumpto "Options" "coefplot##options"}{...} +{viewerjumpto "Examples" "coefplot##examples"}{...} +{viewerjumpto "Remarks" "coefplot##remarks"}{...} +{viewerjumpto "Saved results" "coefplot##saved_results"}{...} +{viewerjumpto "References" "coefplot##references"}{...} +{viewerjumpto "Author" "coefplot##author"}{...} +{viewerjumpto "History" "coefplot##history"}{...} +{hi:help coefplot}{...} +{right:{browse "http://repec.sowi.unibe.ch/stata/coefplot"}} +{right:{browse "http://github.com/benjann/coefplot"}} +{hline} + +{title:Title} + +{pstd} + {hi:coefplot} {hline 2} Plotting regression coefficients and other + results + +{marker syntax}{...} +{title:Syntax} + +{p 8 15 2} + {cmd:coefplot} {it:subgraph} [ || {it:subgraph} || ... ] + [{cmd:,} {help coefplot##globalopts:{it:globalopts}} ] + +{pstd} + where {it:subgraph} is defined as + +{p 8 16 2} + {cmd:(}{it:plot}{cmd:)} [ {cmd:(}{it:plot}{cmd:)} ... ] + [, {help coefplot##subgropts:{it:subgropts}} ] + +{pstd} + and {it:plot} is either {cmd:_skip} (to skip a plot) or + +{p 8 16 2} + {it:model} [ \ {it:model} \ ... ] + [, {help coefplot##plotopts:{it:plotopts}} ] + +{pstd} + and {it:model} is + +{p 8 16 2} + {it:namelist} [{cmd:,} {help coefplot##modelopts:{it:modelopts}} ] + +{pstd} + where {it:namelist} is a list of names of stored models + (see help {helpb estimates}; type {cmd:.} or leave blank to refer to + the active model). The {cmd:*} and {cmd:?} wildcards are allowed + in {it:namelist}; see + {help coefplot##wildcards:{it:Using wildcards in model names}}. Furthermore, + {it:model} may also be + +{p 8 16 2} + {helpb coefplot##matrix:{ul:m}atrix({it:mspec})} [{cmd:,} {help coefplot##modelopts:{it:modelopts}} ] + +{pstd} + to plot results from a matrix (see + {help coefplot##matrix:{it:Plotting results from matrices}} below). + Parentheses around {it:plot} can be omitted if {it:plot} does not contain + spaces. + +{synoptset 25 tabbed}{...} +{marker modelopts}{synopthdr:modelopts} +{synoptline} +{syntab:Main} +{synopt:{helpb coefplot##omitted:{ul:omit}ted}}include omitted + coefficients + {p_end} +{synopt:{helpb coefplot##baselevels:{ul:base}levels}}include base levels + {p_end} +{synopt:{helpb coefplot##b:b({it:mspec})}}specify source to be plotted; default is to + plot {cmd:e(b)} + {p_end} +{synopt:{helpb coefplot##at:at{sf:[}({it:spec}){sf:]}}}get plot positions from + {cmd:e(at)}, or as specified by {it:spec} + {p_end} +{synopt:{helpb coefplot##keep:keep({it:coeflist})}}keep specified coefficients + {p_end} +{synopt:{helpb coefplot##drop:drop({it:coeflist})}}drop specified coefficients + {p_end} + +{syntab:Confidence intervals} +{synopt:{helpb coefplot##noci:noci}}omit confidence intervals + {p_end} +{synopt:{helpb coefplot##levels:{ul:l}evels({it:numlist})}}set level(s) for + conficence intervals + {p_end} +{synopt:{helpb coefplot##ci:ci({it:spec})}}provide confidence intervals + {p_end} +{synopt:{helpb coefplot##v:v({it:name})}}provide variances; default is to use + {cmd:e(V)} + {p_end} +{synopt:{helpb coefplot##se:se({it:mspec})}}provide standard errors + {p_end} +{synopt:{helpb coefplot##df:df({it:spec})}}provide degrees of freedom + {p_end} +{synopt:{helpb coefplot##citype:citype({it:method})}}method to compute + confidence intervals; default is {cmd:citype(normal)} + {p_end} + +{syntab:Transform results} +{synopt:{helpb coefplot##eform:eform{sf:[}({it:coeflist}){sf:]}}}plot + exponentiated point estimates and confidence intervals + {p_end} +{synopt:{helpb coefplot##rescale:rescale({it:spec})}}rescale point estimates + and confidence intervals + {p_end} +{synopt:{helpb coefplot##transform:{ul:trans}form({it:matchlist})}}transform + point estimates and confidence intervals + {p_end} + +{syntab:Names and labels} +{synopt:{helpb coefplot##rename:rename({it:spec})}}rename coefficients + {p_end} +{synopt:{helpb coefplot##eqrename:{ul:eqren}ame({it:spec})}}rename + equations + {p_end} +{synopt:{helpb coefplot##asequation:{ul:aseq}uation{sf:[}({it:string}){sf:]}}}set equation + to model name or {it:string} + {p_end} +{synopt:{helpb coefplot##swapnames:{ul:swap}names}}swap coefficient names and + equation names + {p_end} +{synopt:{helpb coefplot##mlabels:mlabels({it:matchlist})}}add custom marker labels + {p_end} + +{syntab:Auxiliary results} +{synopt:{helpb coefplot##aux:aux({sf:{it:mspec} [{it:mspec} ...]})}}make + additional results available as {cmd:@aux1}, {cmd:@aux2}, etc. + {p_end} +{synoptline} + +{synoptset 25 tabbed}{...} +{marker plotopts}{synopthdr:plotopts} +{synoptline} +{syntab:Passthru} +{synopt:{help coefplot##modelopts:{it:modelopts}}}plot-specific model options; + see {help coefplot##place:{it:Placement of options}} + {p_end} + +{syntab:Main} +{synopt:{helpb coefplot##label:{ul:lab}el({it:string})}}label to be used for + the plot in the legend + {p_end} +{synopt:{helpb coefplot##key:key{sf:[}(ci {sf:[}#{sf:]}){sf:]}}}key + symbol to be used for the plot in the legend + {p_end} +{synopt:{helpb coefplot##nokey:nokey}}do not include the plot in the legend + {p_end} +{synopt:{helpb coefplot##pstyle:{ul:psty}le({it:pstyle})}}overall + style of the plot + {p_end} +{synopt:{helpb coefplot##axis:{ul:ax}is({it:#})}}choice of axis for the plot, {cmd:1} {ul:<} {it:#} {ul:<} {cmd:9} + {p_end} +{synopt:{helpb coefplot##offset:offset({it:#})}}provide offset for plot + positions + {p_end} +{synopt:{helpb coefplot##ifopt:if({it:exp})}}restrict the contents of the plot + {p_end} +{synopt:{helpb coefplot##weight:{ul:w}eight({it:exp})}}scale size of markers + {p_end} + +{syntab:Markers} +{synopt:{it:{help marker_options}}}change look of + markers (color, size, etc.) + {p_end} +{synopt:{helpb coefplot##mlabel:{ul:ml}abel{sf:[}({it:spec}){sf:]}}}add marker + labels + {p_end} +{synopt:{it:{help marker_label_options}}}change the look and position of marker + labels + {p_end} +{synopt:{helpb coefplot##recast:recast({it:plottype})}}plot results using + {it:plottype} + {p_end} + +{syntab:Confidence spikes} +{synopt:{helpb coefplot##cionly:cionly}}plot confidence spikes only + {p_end} +{synopt:{helpb coefplot##citop:citop}}draw confidence spikes in front + of markers + {p_end} +{synopt:{helpb coefplot##cirecast:{ul:cire}cast({it:plottype})}}shorthand for {cmd:ciopts(recast())} + {p_end} +{synopt:{helpb coefplot##ciopts:{ul:ciop}ts({it:options})}}affect rendition + of confidence spikes + {p_end} +{synopt:{helpb coefplot##cismooth:{ul:cis}mooth{sf:[}({it:options}){sf:]}}}add smoothed + confidence intervals + {p_end} +{synoptline} + +{synoptset 25 tabbed}{...} +{marker subgropts}{synopthdr:subgropts} +{synoptline} +{syntab:Passthru} +{synopt:{help coefplot##modelopts:{it:modelopts}}}subgraph-specific model + options; see {help coefplot##place:{it:Placement of options}} + {p_end} +{synopt:{help coefplot##plotopts:{it:plotopts}}}subgraph-specific plot + options; see {help coefplot##place:{it:Placement of options}} + {p_end} + +{syntab:Main} +{synopt:{helpb coefplot##bylabel:{ul:bylab}el({it:string})}}label to be used + for the subgraph + {p_end} +{synoptline} + +{synoptset 25 tabbed}{...} +{marker globalopts}{synopthdr:globalopts} +{synoptline} +{syntab:Passthru} +{synopt:{help coefplot##modelopts:{it:modelopts}}}global model options; see + {help coefplot##place:{it:Placement of options}} + {p_end} +{synopt:{help coefplot##plotopts:{it:plotopts}}}global plot options; see + {help coefplot##place:{it:Placement of options}} + {p_end} +{synopt:{help coefplot##subgropts:{it:subgropts}}}global subgraph options; + see {help coefplot##place:{it:Placement of options}} + {p_end} + +{syntab:Main} +{synopt:{helpb coefplot##horizontal:{ul:hor}izontal}}coefficient values are + on x axis; general default + {p_end} +{synopt:{helpb coefplot##vertical:{ul:vert}ical}}coefficient values are on y + axis; default with {cmd:at()} + {p_end} +{synopt:{helpb coefplot##eqstrict:eqstrict}}be strict about equations + {p_end} +{synopt:{helpb coefplot##order:order({it:coeflist})}}order coefficients + {p_end} +{synopt:{helpb coefplot##orderby:orderby({it:spec})}}order coefficients by + specific model + {p_end} +{synopt:{helpb coefplot##sort:sort{sf:[}({it:spec}){sf:]}}}sort coefficients + {p_end} +{synopt:{helpb coefplot##relocate:{ul:reloc}ate({it:spec})}}assign + specific positions to coefficients + {p_end} +{synopt:{helpb coefplot##bycoefs:{ul:byc}oefs}}arrange subgraphs by + coefficients + {p_end} +{synopt:{helpb coefplot##norecycle:{ul:norec}ycle}}increment plot styles across + subgraphs + {p_end} +{synopt:{helpb coefplot##nooffsets:{ul:nooff}sets}}do not offset plot + positions + {p_end} +{synopt:{helpb coefplot##format:format({it:format})}}set the display format for + numeric labels + {p_end} +{synopt:{helpb coefplot##pnum:p{it:#}({it:plotopts})}}options for {it:#}th plot + {p_end} + +{syntab:Labels and grid lines} +{synopt:{helpb coefplot##nolabels:{ul:nolab}els}}use variable names instead of + labels + {p_end} +{synopt:{helpb coefplot##coeflabels:{ul:coefl}abels({it:spec})}}specify + custom labels for coefficients + {p_end} +{synopt:{helpb coefplot##noeqlabels:{ul:noeql}abels}}suppress equation labels + {p_end} +{synopt:{helpb coefplot##eqlabels:{ul:eql}abels({it:spec})}}specify labels + for equations + {p_end} +{synopt:{helpb coefplot##headings:{ul:head}ings({it:spec})}}add headings between + coefficients + {p_end} +{synopt:{helpb coefplot##groups:groups({it:spec})}}add labels for groups of + coefficients + {p_end} +{synopt:{helpb coefplot##plotlabels:{ul:plotl}abels({it:spec})}}(re)set plot + labels + {p_end} +{synopt:{helpb coefplot##bylabels:bylabels({it:spec})}}(re)set subgraph + labels + {p_end} +{synopt:{helpb coefplot##grid:grid({it:options})}}affect rendition of grid lines + {p_end} + +{syntab:Save results} +{synopt:{helpb coefplot##generate:{ul:gen}erate{sf:[}({it:prefix}){sf:]}}}generate + variables containing the graph data + {p_end} +{synopt:{helpb coefplot##replace:replace}}overwrite existing variables + {p_end} + +{syntab:Add plots} +{synopt:{helpb addplot_option:addplot({it:plot})}}add other plots to the + graph + {p_end} +{synopt:{helpb coefplot##nodrop:nodrop}}do not drop observations + {p_end} + +{syntab:Y axis, X axis, Titles, Legend, Overall, By} +{synopt:{it:{help twoway_options}}}twoway options, other than {cmd:by()} + {p_end} +{synopt:{cmdab:byop:ts(}{it:{help by_option:byopts}}{cmd:)}}how subgraphs + are combined + {p_end} +{synoptline} + + +{marker description}{...} +{title:Description} + +{pstd} + {cmd:coefplot} plots results from estimation commands or Stata matrices. + Results from multiple models or matrices can be combined in a single + graph. The default behavior of {cmd:coefplot} is to draw markers for + coefficients and horizontal spikes for confidence intervals. However, + {cmd:coefplot} can also produce various other types of graphs. + + +{marker options}{...} +{title:Options} +{dlgtab:Model options} + +{marker omitted}{...} +{phang} + {cmd:omitted} includes omitted coefficients. This may be useful if a model + contains coefficients that have been dropped due to collinearity. + +{marker baselevels}{...} +{phang} + {cmd:baselevels} includes base levels of factor variables. + +{marker b}{...} +{phang} + {cmd:b(}{it:mspec}{cmd:)} specifies the source from which the point + estimates and coefficient names are to be collected. The default is to use + (the first row of) {cmd:e(b)} (or {cmd:e(b_mi)} if plotting results from + {helpb mi estimate}). {cmd:b()} is discarded in matrix mode (see + {help coefplot##matrix:{it:Plotting results from matrices}} below). + {it:mspec} may be: + +{p2colset 13 25 27 2}{...} +{p2col:{it:name}}use first row of {cmd:e(}{it:name}{cmd:)} + {p_end} +{p2col:{it:name}{cmd:[}#{cmd:,.]}}use #th row of + {cmd:e(}{it:name}{cmd:)}; may also type {it:name}{cmd:[}#{cmd:,]} + or {it:name}{cmd:[}#{cmd:]} + {p_end} +{p2col:{it:name}{cmd:[.,}#{cmd:]}}use #th column of + {cmd:e(}{it:name}{cmd:)}; may also type {it:name}{cmd:[,}#{cmd:]} + {p_end} +{p2colreset}{...} + +{marker at}{...} +{phang} + {cmd:at}[{cmd:(}{it:spec}{cmd:)}] causes plot positions to be determined + by the values in {cmd:e(at)} (or matrix {cmd:at}) or as specified by + {it:spec}. The default is to create a categorical axis with coefficients + matched by their names. However, if {cmd:at} is specified, the axis is + treated as continuous. Note that labeling options + {cmd:coeflabels()}, {cmd:eqlabels()}, {cmd:headings()}, or {cmd:groups()} + are not allowed if {cmd:at} is specified. Also not allowed with {cmd:at} + are options {cmd:bycoefs}, {cmd:order()}, and {cmd:relocate()}. + Furthermore, note that {cmd:at} has to be specified for all models or + for none. {it:spec} is + + [{it:atspec}] [{cmd:,} {opt t:ransform(exp)}] + +{pmore} + where {it:atspec} may be + +{p2colset 13 27 29 2}{...} +{p2col:{it:mspec}}as above for {helpb coefplot##b:b()} + {p_end} +{p2col:#}use #th at-dimension ({helpb margins}) or #th row/column of main matrix + {p_end} +{p2col:{opt m:atrix(mspec)}}read from matrix instead of {cmd:e()} + {p_end} +{p2col:{opt _coef}}use coefficient names as plot positions + {p_end} +{p2col:{opt _eq}}use equation names as plot positions + {p_end} +{p2colreset}{...} + +{pmore} + If {cmd:at} is specified without argument, the plot positions are taken from the first row + of {cmd:e(at)} (or matrix {cmd:at}). A special case are results from + {helpb margins} where recovering the plot positions is more + complicated. The default in this case is to use the first + at-dimension. Type, e.g., {cmd:at(2)} if multiple at-dimension were specified + with {helpb margins} and you want to use the second dimension. Furthermore, + in matrix mode (see + {help coefplot##matrix:{it:Plotting results from matrices}} below), {cmd:at(2)} + would read the plot positions from the 2nd row (or column) of the main matrix. + +{pmore} + When plotting results from {cmd:e()} it is sometimes convenient to + maintain an external matrix with the plot positions instead of + adding plot positions to each {cmd:e()}-set. In this case you can use + syntax {cmd:at(matrix(}{it:mspec}{cmd:))} to read the plot positions. Note + that the vector of plot positions must have the same length as the + coefficient vectors of the plotted models; elements are matched by position, + not by name. + +{pmore} + Furthermore, {cmd:at(_coef)} or {cmd:at(_eq)} will use the coefficient names or + the equation names as plot positions, respectively. This is useful only if + the coefficient names or the equation names are numeric. Note that you may + use {helpb coefplot##rename:rename()} and + {helpb coefplot##eqrename:eqrename()} to strip a non-numeric prefix or suffix + from coefficient names or equation names. + +{pmore} + Suboption {cmd:transform()} transforms the plot positions before creating + the graph. Within the transformation expression, use {cmd:@} as a + placeholder for the value to be transformed. For example, to take the + antilogarithm of the plot positions type {cmd:transform(exp(@))}. + +{marker keep}{...} +{phang} + {cmd:keep(}{it:coeflist}{cmd:)} specifies the coefficients to be + plotted. The default is to include all coefficients from the + first (nonzero) equation of a model (and discard further equations). + {it:coeflist} is a space-separated list of + elements such as: + +{p2colset 13 25 27 2}{...} +{p2col:{it:coef}}keep coefficient {it:coef} + {p_end} +{p2col:{it:eq}{cmd::}}keep all coefficients from equation {it:eq} + {p_end} +{p2col:{it:eq}{cmd::}{it:coef}}keep coefficient {it:coef} from equation {it:eq} + {p_end} +{p2colreset}{...} + +{pmore} + where {it:eq} and {it:coef} may contain "{cmd:*}" (any string) and + "{cmd:?}" (any nonzero character) wildcards. For example, type {cmd:keep(*:)} or + {cmd:keep(*:*)} to plot all coefficients from all equations. + +{pmore} + If {it:eq} is specified, it is applied to all subsequent + names until a new {it:eq} is specified. For example, + {cmd:keep(3:mpg price 4:weight)} will plot coefficients "{cmd:mpg}" and + "{cmd:price}" from equation "{cmd:3}" and coefficient "{cmd:weight}" from + equation "{cmd:4}". + +{marker drop}{...} +{phang} + {cmd:drop(}{it:coeflist}{cmd:)} drops the specified coefficients, where + {it:coeflist} is as above for {helpb coefplot##keep:keep()}. + +{marker noci}{...} +{phang} + {cmd:noci} omits confidence intervals. + +{marker levels}{...} +{phang} + {cmd:levels(}{it:{help numlist}}{cmd:)} sets the level(s), as percentages, + for confidence intervals. Specified values may be between 10.00 and 99.99 + and can have at most two digits after the decimal point. The default is + {cmd:levels(95)} or as set by {helpb set level}. If multiple values are + specified, multiple confidence intervals are plotted. For example, type + {cmd:levels(99.9 99 95)} to plot the 99.9%, 99%, and 95% confidence + intervals. The default is to use (logarithmically) increasing line widths + for multiple confidence intervals. This behavior is disabled as soon as + {cmd:lwidth()} or {cmd:recast()} is specified within + {helpb coefplot##ciopts:ciopts()}. + +{marker ci}{...} +{phang} + {cmd:ci(}{it:spec}{cmd:)} specifies the source from which to collect + confidence intervals. Default is to compute confidence intervals for the + levels specified in {cmd:levels()} using variances/standard errors (and, + possibly, degrees of freedom). The {cmd:ci()} option is useful to + plot confidence intervals that have been provided by the estimation + command (such as, e.g., {helpb bootstrap}). {it:spec} is + + {it:cispec} [{it:cispec} ...] + +{pmore} + where {it:cispec} is {it:name} to get the lower and upper confidence limits + from rows 1 and 2 of {cmd:e(}{it:name}{cmd:)} (or matrix {it:name}), + respectively. Alternatively, {it:cispec} may be {cmd:(}{it:mspec} + {it:mspec}{cmd:)} to identify the lower and upper confidence limits, with + {it:mspec} as above for {helpb coefplot##b:b()}. For example, after + {helpb bootstrap}, {cmd:ci(ci_bc)} would get bias-corrected confidence intervals + from rows 1 and 2 of {cmd:e(ci_bc)}. The same could be achieved by + {cmd:ci((ci_bc[1] ci_bc[2]))}. + +{pmore} + {it:cispec} may also be # for a specific confidence level as in + {helpb coefplot##levels:levels()}. Hence, you may type, e.g., + {cmd:ci(95 myci)} to plot the usual 95% confidence intervals along with + custom confidence intervals provided in {cmd:e(myci)}. Levels specified + in {cmd:ci()} take precedence over levels specified in {cmd:levels()}), + however, you may also type {cmd:""} within {cmd:ci()} to leave a + position blank an use the specified level from {cmd:levels()}. + +{pmore} + In matrix mode (see + {help coefplot##matrix:{it:Plotting results from matrices}} below), + {it:cispec} may also be {cmd:(}# #{cmd:)}. For example, {cmd:ci((2 3))} would + read the lower confidence limit from the 2nd row (or column) and + the upper confidence limit from the 3rd row (or column) of the main matrix. + +{marker v}{...} +{phang} + {cmd:v(}{it:name}{cmd:)} specifies that the variances for confidence interval + computation are to be taken from the diagonal of {cmd:e(}{it:name}{cmd:)} + (or matrix {it:name}). Default is {cmd:e(V)} (or {cmd:e(V_mi)} if plotting + results from {helpb mi estimate}). + +{marker se}{...} +{phang} + {cmd:se(}{it:mspec}{cmd:)} provides standard errors to be used for + computation of confidence intervals. Default is to compute confidence + intervals based on the variances in {cmd:e(V)} + (see {helpb coefplot##v:v()} above). {it:mspec} is as above for + {helpb coefplot##b:b()}. + In matrix mode (see + {help coefplot##matrix:{it:Plotting results from matrices}} below), you may + also specify {cmd:se(}#{cmd:)} to read the standard errors from the #th + row (or column) of the main matrix. + +{marker df}{...} +{phang} + {cmd:df(}{it:spec}{cmd:)} specifies degrees of freedom (DF) to be taken into + account for confidence interval computation. Default is to obtain DF + from scalar {cmd:e(df_r)} if defined (as in, e.g., {helpb regress}) + or, for results from {helpb mi estimate}, from matrix {cmd:e(df_mi)}. Otherwise, + no DF are taken into account. Specify {cmd:df(}{it:spec}{cmd:)} to provide + custom DF. {it:spec} may be: + +{p2colset 13 25 27 2}{...} +{p2col:#}set DF for all coefficients to # + {p_end} +{p2col:{it:mspec}}as above for {helpb coefplot##b:b()} + {p_end} +{p2colreset}{...} + +{marker citype}{...} +{phang} + {cmd:citype(}{it:method}{cmd:)} specifies the method to be used to compute the limits of + confidence intervals. {it:method} can be {cmd:normal}, {cmd:logit}, {cmd:probit}, + {cmd:atanh}, or {cmd:log}. + +{pmore} + {cmd:citype(normal)}, the default, computes confidence + limits based on untransformed coefficients and standard errors. Let {it:b} be + the point estimate, {it:se} the standard error, and {it:t} the (1-{it:a}/2) + quantile of the standard normal distribution or the t-distribution (if degrees + of freedom are available; see above), where {it:a} is 1 minus the + confidence level (e.g. {it:a}=5% for a 95% confidence interval). Then the + limits of the confidence interval are computed as + + {it:b} +/- {it:t} * {it:se} + +{pmore} + {cmd:citype(logit)} uses the logit transformation to compute the limits + of confidence intervals. This is useful if the estimates to be plotted are + proportions and the confidence limits are supposed to lie between 0 and + 1. The limits are computed as + + invlogit(logit({it:b}) +/- {it:t} * {it:se} / ({it:b} * (1 - {it:b}))) + +{pmore} + {cmd:citype(probit)} is an alternative to {cmd:citype(logit)} and computes the + limits as + + normal(invnormal({it:b}) +/- {it:t} * {it:se} / normalden(invnormal({it:b}))) + +{pmore} + {cmd:citype(atanh)} uses the inverse hyperbolic tangent to compute the + confidence intervals. This is useful for estimates that lie between -1 and + 1, such as a correlation coefficient. The limits are computed as: + + tanh(atanh({it:b}) +/- {it:t} * {it:se} / (1 - {it:b}^2)) + +{pmore} + {cmd:citype(log)} computes log-transformed confidence intervals. This is useful + for estimates that may only be positive, such as a variance estimate. The limits + are computed as: + + exp(ln({it:b}) +/- {it:t} * {it:se} / {it:b}) + +{marker eform}{...} +{phang} + {cmd:eform}[{cmd:(}{it:coeflist}{cmd:)}] causes point estimates and + confidence intervals to be exponentiated. This is useful + if you want to plot hazard ratios (HR), incidence-rate ratios (IRR), + odds ratios (OR), or relative-risk ratios (RRR). If {cmd:eform} is + specified without arguments, then all coefficients of the model are + exponentiated. To exponentiate only selected coefficients, specify + {it:coeflist} as above for {helpb coefplot##keep:keep()}. + +{marker rescale}{...} +{phang} + {cmd:rescale(}{it:spec}{cmd:)} rescales point estimates and confidence + intervals. Type {cmd:rescale(}#{cmd:)} to rescale all coefficients + by a constant factor. For example, {cmd:rescale(100)} will multiply all + coefficients by 100. Alternatively, {it:spec} may be + + {it:coeflist} {cmd:=} # [{it:coeflist} {cmd:=} # ...] + +{pmore} + with {it:coeflist} as above for {helpb coefplot##keep:keep()}. + +{marker transform}{...} +{phang} + {cmd:transform(}{it:matchlist}{cmd:)} transforms point estimates and confidence + intervals. {it:machlist} is: + + {it:coeflist} {cmd:= "}{it:{help exp}}{cmd:"} [{it:coeflist} {cmd:= "}{it:{help exp}}{cmd:"} ...] + +{pmore} + with {it:coeflist} as above for {helpb coefplot##keep:keep()}. Within the + transformation expression, use {cmd:@} as a placeholder for + the value to be transformed. For example, to take the square root of all + coefficients type {cmd:transform(* = sqrt(@))}. In addition, internal + variables may be used as explained in + {help coefplot##tempvar:Accessing internal temporary variables}. The + transformation expression must be enclosed in double quotes if it contains + spaces. If specified, {cmd:eform()} and {cmd:rescale()} are applied before applying + {cmd:transform()}. + +{marker rename}{...} +{phang} + {cmd:rename(}{it:spec}{cmd:)} renames coefficients. {it:spec} is: + + {it:coeflist} {cmd:=} {it:newname} [{it:coeflist} {cmd:=} {it:newname} ...] [{cmd:,} {cmdab:r:egex}] + +{pmore} + with {it:coeflist} as above for {helpb coefplot##keep:keep()} except that + wildcards are only allowed in equation names, and coefficient names may + be specified as {it:prefix}{cmd:*} to replace a prefix or + {cmd:*}{it:suffix} to replace a suffix. For example, + {cmd:rename(*.foreign = .cartype)} will rename coefficients such as + {cmd:0.foreign} and {cmd:1.foreign} to {cmd:0.cartype} and + {cmd:1.cartype}. {it:newname} must be enclosed in double quotes if it + contains spaces. For labeling coefficients, also see + {helpb coefplot##coeflabels:coeflabels()}. + +{pmore} + Apply option {cmd:regex} to cause coefficient specifications (but not + equation specifications) to be interpreted as + {browse "https://en.wikipedia.org/wiki/Regular_expression":regular expressions}. In this + case, {it:newname} may contain {cmd:\1}, ..., {cmd:\9} to reference back to + matched subexpressions (and {cmd:\0} for the entire match). For example, type + {cmd:rename(^AA([0-9]+)BB$ = YY\1ZZ, regex)} to rename + coefficients such as {cmd:AA123BB}, {cmd:AA0BB}, or {cmd:AA99BB} to + {cmd:YY123ZZ}, {cmd:YY0ZZ}, or {cmd:YY99ZZ}. If the leading {cmd:^} or the + tailing {cmd:$} is omitted, only the matched part of a coefficient name is + subject to substitution; the rest of the name will remain unchanged. Include + the regular expressions in quotes or compound double quotes if they contain + funny characters (such as, e.g., quotes, equal signs, or commas). + +{marker eqrename}{...} +{phang} + {cmd:eqrename(}{it:spec}{cmd:)} renames equations. {it:spec} is: + + {it:eqlist} {cmd:=} {it:newname} [{it:eqlist} {cmd:=} {it:newname} ...] [{cmd:,} {cmdab:r:egex}] + +{pmore} + where {it:eqlist} is a space separated list of equation names. Equation + names may be {it:prefix}{cmd:*} to replace a prefix or + {cmd:*}{it:suffix} to replace a suffix. For example, + {cmd:eqrename(rep78* = reprec)} will rename equations such as + {cmd:rep78_3} and {cmd:rep78_4} to {cmd:reprec_3} and + {cmd:reprec_4}. {it:newname} must be enclosed in double quotes if it + contains spaces. For labeling equations, also see + {helpb coefplot##eqlabels:eqlabels()}. + +{pmore} + Apply option {cmd:regex} to cause equation specifications to be interpreted as + {browse "https://en.wikipedia.org/wiki/Regular_expression":regular expressions}. In this + case, {it:newname} may contain {cmd:\1}, ..., {cmd:\9} to reference back to + matched subexpressions (and {cmd:\0} for the entire match). For example, type + {cmd:eqrename(^eq([0-9])0$ = Outcome_\1, regex)} to rename + equations such as {cmd:eq20} or {cmd:eq90} to + {cmd:Outcome_1} or {cmd:Outcome_9}. If the leading {cmd:^} or the + tailing {cmd:$} is omitted, only the matched part of an equation name is + subject to substitution; the rest of the name will remain unchanged. Include the regular expressions in + quotes or compound double quotes if they contain funny characters (such as, e.g., quotes, + equal signs, or commas). + +{marker asequation}{...} +{phang} + {cmd:asequation}[{cmd:(}{it:string}{cmd:)}] sets the equation name for all + included coefficients from the model to {it:string}. This is useful if you + want to assign an equation name to results that have been stored without + information on equations. If {cmd:asequation} is specified without + argument, the name of the model is used. If you apply the + {cmd:asequation()} option you may also want to specify + {helpb coefplot##eqstrict:eqstrict}. + +{marker swapnames}{...} +{phang} + {cmd:swapnames} swaps coefficient names and equation names after collecting + the model's results. The names are swapped after applying model options + such as {cmd:keep()}, {cmd:drop()}, or {cmd:rename()} but + before applying global options such as {cmd:coeflabel()}, {cmd:order()}, + or {cmd:eqlabels()}. + +{marker mlabels}{...} +{phang} + {cmd:mlabels(}{it:matchlist}{cmd:)} specifies marker labels for + selected coefficients. {it:matchlist} is: + + {it:coeflist} {cmd:=} # "{it:label}" [{it:coeflist} {cmd:=} # "{it:label}" ...] + +{pmore} + where {it:coeflist} is as above for {helpb coefplot##keep:keep()} and # is a + number 0--12 for the location of the marker label (see + {manhelpi clockposstyle G-4}). Not all of Stata's plot types + support marker labels. For example, if you use + {helpb coefplot##recast:recast(bar)} to change the plot type to + {helpb twoway_bar:bar}, no marker labels will be displayed (this has + changed with the April 6, 2022, update to Stata 17; plot type + {helpb twoway_bar:bar} now displays marker labels). + +{marker aux}{...} +{phang} + {cmd:aux(}{it:mspec} [{it:mspec} ...]{cmd:)} collects additional results + and makes them available as internal variables. {it:mspec} is as above for + {helpb coefplot##b:b()}. The internal variables + are named {cmd:@aux1}, {cmd:@aux2}, ..., and can be used within + {helpb coefplot##ifopt:if()}, + {helpb coefplot##weight:weight()}, + {helpb coefplot##transform:transform()}, + {helpb marker_label_options:mlabel()}, + {helpb marker_label_options:mlabvposition()}, and + {helpb addplot_option:addplot()} (see + {help coefplot##tempvar:Accessing internal temporary variables} + below). In matrix mode (see + {help coefplot##matrix:{it:Plotting results from matrices}} below), you may + also specify {cmd:aux(}# [# ...]{cmd:)} to read the from corresponding + rows (or column) of the main matrix. + +{dlgtab:Plot options} + +{marker label}{...} +{phang} + {cmd:label(}{it:string}{cmd:)} provides a label for the plot to be used + in the legend. Use double quotes to create multiline labels. For example, + {cmd:label("This is a" "long label")} would create a two-line label. For + text effects (bold, italics, greek letters, etc.) use SMCL tags as + described in {it:{help graph_text}}. + +{marker key}{...} +{phang} + {cmd:key}[{cmd:(ci} [{cmd:#}]{cmd:)}] determines the key symbol + to be used for the plot in the legend. {cmd:key} without argument uses + the plot's marker symbol; this is the default. {cmd:key(ci)} determines + the key symbol from the (first) confidence interval. {cmd:key(ci #)} + determines the key symbol from the #th confidence interval; this is only + useful if multiple confidence intervals are included in the plot. + +{marker nokey}{...} +{phang} + {cmd:nokey} prevents including the plot in the legend. + +{marker pstyle}{...} +{phang}{cmd:pstyle(}{it:pstyle}{cmd:)} sets the overall style of the + plot; see help {it:{help pstyle}}. {cmd:pstyle()} affects both, + coefficient markers and confidence spikes. To use a different plot style + for confidence spikes, add {cmd:pstyle()} within + {helpb coefplot##ciopts:ciopts()}. + +{marker axis}{...} +{phang}{cmd:axis(}{it:#}{cmd:)} specifies the scale axis to be used for the + plot, where {cmd:1} {ul:<} {it:#} {ul:<} {cmd:9}. The default is to place + all plots on the same scale axis. + +{marker offset}{...} +{phang} + {cmd:offset(}{it:#}{cmd:)} specifies a custom offset for the plot + positions. The default is to create automatic offsets to prevent + overlap of confidence spikes as soon as there are + multiple plots. The spacing between coefficients is one unit, so + {it:#} should usually be within -0.5 and 0.5. {it:#} may also be a scalar + expression such as, say, {cmd:1/6}. + +{marker ifopt}{...} +{phang} + {cmd:if(}{it:exp}{cmd:)} restricts the contents of the plot to coefficients + satisfying {it:exp}. The option is useful when you want to select + coefficients, e.g., based on their values, plot positions, or confidence + limits. Within {it:exp} refer to internal temporary variables as explained + in {help coefplot##tempvar:Accessing internal temporary variables} below. + For example, to include positive coefficients only, you could type + {cmd:if(@b>=0)}. Note that {cmd:if()} does not affect the rendition of the + categorical axis (unless {helpb coefplot##at:at} is specified). That is, a + complete categorical axis is created including labels for all collected + coefficients, even for the ones that have been removed from the plot by + {cmd:if()}. + +{marker weight}{...} +{phang} + {cmd:weight(}{it:exp}{cmd:)} scales the size of the markers according to + the size of the specified weights (see + {help scatter##remarks14:Weighted markers} in help {helpb scatter}). Within + {it:exp} refer to internal temporary variables as explained in + {help coefplot##tempvar:Accessing internal temporary variables} below. For + example, to scale markers according to the inverse of standard errors, you + could type {cmd:weight(1/@se)}. {cmd:weight()} has no effect if marker + labels are specified. + +{phang} + {it:marker_options} change the look of the coefficient markers (color, + size, etc.); see help {it:{help marker_options}}. + +{marker mlabel}{...} +{phang} + {cmd:mlabel}[{cmd:(}{it:spec}{cmd:)}] adds marker labels to the + plot. For adding custom labels to specific markers also see model option + {helpb coefplot##mlabels:mlabels()} above. Furthermore, note that + not all of Stata's plot types support marker labels. For example, if you use + {helpb coefplot##recast:recast(bar)} to change the plot type to + {helpb twoway_bar:bar}, no marker labels will be displayed (this has + changed with the April 6, 2022, update to Stata 17; plot type + {helpb twoway_bar:bar} now displays marker labels). + +{pmore} + The {cmd:mlabel} option can be used in three different ways: + +{pmore2} + (1) {opt mlabel} without argument adds the values of the point estimates as + marker labels. Use global option + {helpb coefplot##format:format()} to set the display format. + +{pmore2} + (2) {opth mlabel(varname)} uses the values of the specified variable + as marker labels. {it:varname} may be an internal variable (see + {help coefplot##tempvar:Accessing internal temporary variables} below). For example, + {cmd:mlabel(@b)} is equivalent to {cmd:mlabel} without argument. + +{pmore2} + (3) {opt mlabel(strexp)} sets the marker labels to the evaluation of the + specified string expression. Internal variables can be used within {it:strexp} + (see {help coefplot##tempvar:Accessing internal temporary variables} + below). For example, you can type + +{pmore3} + mlabel("p = " + string(@pval,"%9.3f")) + +{pmore2} + to display labels such as "p = 0.001" or "p = 0.127". Furthermore, + +{pmore3} + mlabel(cond(@pval<.001, "***", cond(@pval<.01, "**", cond(@pval<.05, "*", "")))) + +{pmore2} + would display significance stars. + +{phang} + {it:marker_label_options} change the look and + position of marker labels; see help {it:{help marker_label_options}}. + +{marker recast}{...} +{phang} + {cmd:recast(}{it:plottype}{cmd:)} plots the coefficients using + {it:plottype}; supported plot types are + {helpb scatter}, + {helpb line}, + {helpb twoway_connected:connected}, + {helpb twoway_area:area}, + {helpb twoway_bar:bar}, + {helpb twoway_spike:spike}, + {helpb twoway_dropline:dropline}, and + {helpb twoway_dot:dot}. The default {it:plottype} is {helpb scatter}. The + chosen plot type affects the available plot options. For example, if + the plot type is {helpb twoway_bar:bar} then {it:{help barlook_options}} + will be available. See the plot type's help file for details. + +{marker cionly}{...} +{phang} + {cmd:cionly} causes markers for point estimates to be suppressed. + +{marker citop}{...} +{phang} + {cmd:citop} specifies that confidence intervals be drawn in front of + the markers for point estimates; the default is to draw confidence intervals + behind the markers. + +{marker cirecast}{...} +{phang} + {cmd:cirecast(}{it:plottype}{cmd:)} is shorthand notation for + {helpb coefplot##ciopts:ciopts(recast())}. If both are provided, the plot types + specified in {cmd:ciopts(recast())} take precedence over the plot types + specified in {cmd:cirecast()}. + +{marker ciopts}{...} +{phang} + {cmd:ciopts(}{it:options}{cmd:)} affect the rendition of confidence + intervals. {it:options} are: + +{p2colset 13 31 33 2}{...} +{p2col:{it:{help line_options}}}change look of spikes + {p_end} +{p2col:{cmd:recast(}{it:plottype}{cmd:)}}plot the confidence intervals using + {it:plottype} + {p_end} +{p2colreset}{...} + +{pmore} + Supported plot types are + {helpb twoway_rarea:rarea}, + {helpb twoway_rbar:rbar}, + {helpb twoway_rspike:rspike}, + {helpb twoway_rcap:rcap}, + {helpb twoway_rcapsym:rcapsym}, + {helpb twoway_rscatter:rscatter}, + {helpb twoway_rline:rline}, + {helpb twoway_rconnected:rconnected}, + {helpb twoway_pcspike:pcspike}, + {helpb twoway_pcspike:pccapsym}, + {helpb twoway_pcarrow:pcarrow} (or {cmd:pcrarrow} for the reverse), + {helpb twoway_pcbarrow:pcbarrow}, and + {helpb twoway_pcscatter:pcscatter}. The default {it:plottype} is + {helpb twoway_rspike:rspike}. The chosen plot type affects the available + options within {cmd:ciopts()}. For example, if the plot type is + {helpb twoway_rbar:rbar} then {it:{help barlook_options}} will be + available. See the plot type's help file for details. + +{pmore} + If multiple confidence intervals are requested, then + {it:{help stylelists}} may be specified in the options within + {cmd:ciopts()}. For example, {cmd:recast(rspike rcap ..)} would use + {helpb twoway_rspike:rspike} for the first confidence interval and + {helpb twoway_rcap:rcap} for the remaining confidence intervals; + {cmd:lwidth(thin medium thick)} would use thin lines for the first + confidence interval, medium width lines for the second, and thick lines + for the third. + +{marker cismooth}{...} +{phang} + {cmd:cismooth}[{cmd:(}{it:options}{cmd:)}] adds smoothed confidence + intervals. {it:options} are: + +{p2colset 13 33 35 2}{...} +{p2col:{cmd:n(}{it:n}{cmd:)}}number of (equally spaced) confidence levels; + default is {cmd:n(50)}; levels are placed in steps of 100/{it:n} from 100/2{it:n} to + 100-100/2{it:n} (e.g., 1, 3, 5, ..., 99 for {it:n}=50) + {p_end} +{p2col:{cmdab:lw:idth(}{it:min max}{cmd:)}}set range of + (relative) line widths; the default is {cmd:range(2 15)} + ({it:max} is exact only for {it:n}=50) + {p_end} +{p2col:{cmdab:i:ntensity(}{it:min max}{cmd:)}}set range of + color intensities, as percentages; the default is {cmd:intensity(}{it:min} {cmd:100)} + where {it:min} is determined as 4/(ceil({it:n}/2)+3)*100 (about 14 for n=50) + {p_end} +{p2col:{cmdab:c:olor(}{help colorstyle:{it:color}}{cmd:)}}set the color (without + intensity multiplier); the default color is determined by the graph scheme + {p_end} +{p2col:{cmdab:psty:le(}{help pstyle:{it:pstyle}}{cmd:)}}set the overall style; + this mainly affects the color + {p_end} +{p2colreset}{...} + +{pmore} + The confidence intervals produced by {cmd:cismooth} are placed behind + confidence intervals requested in {helpb coefplot##levels:levels()} and + {helpb coefplot##ci:ci()}. {helpb coefplot##ciopts:ciopts()} do not + apply to them. + +{dlgtab:Subgraph options} + +{marker bylabel}{...} +{phang} + {cmd:bylabel(}{it:string}{cmd:)} provides a label for the subgraph. Use + double quotes to create multiline labels. For example, + {cmd:bylabel("This is a" "long label")} would create a two-line label. For + text effects (bold, italics, greek letters, etc.) use SMCL tags as + described in {it:{help graph_text}}. + +{pmore} + Subgraphs are implemented in terms of {helpb graph}'s {cmd:by()} option; see + {helpb coefplot##byopts:byopts()} below for options on how to combine and + render the subgraphs. + +{dlgtab:Global options} + +{marker horizontal}{...} +{phang} + {cmd:horizontal} places coefficient values on the x axis. This is the + default unless {helpb coefplot##at:at} is specified. + +{marker vertical}{...} +{phang} + {cmd:vertical} places coefficient values on the y axis. This is the + default if {helpb coefplot##at:at} is specified. + +{marker eqstrict}{...} +{phang} + {cmd:eqstrict} causes equation names to be taken into account (i.e. match coefficients by + equation names and plot equation labels) even if there is only one equation per model. + +{marker order}{...} +{phang} + {cmd:order(}{it:coeflist}{cmd:)} specifies the order of coefficients + (not allowed with {helpb coefplot##at:at}). The default is to use + the order as found in the input models (and place {cmd:_cons} last, within + equations). {it:coeflist} is a + space-separated list of elements such as: + +{p2colset 13 25 27 2}{...} +{p2col:{cmd:.}}insert a gap + {p_end} +{p2col:{it:eq}{cmd::.}}insert a gap within equation {it:eq} + {p_end} +{p2col:{it:coef}}coefficient {it:coef} + {p_end} +{p2col:{it:eq}{cmd::}}all coefficients from equation {it:eq}, in their current order + {p_end} +{p2col:{it:eq}{cmd::}{it:coef}}coefficient {it:coef} from equation {it:eq} + {p_end} +{p2colreset}{...} + +{pmore} + where {it:coef} may contain "{cmd:*}" (any string) and "{cmd:?}" + (any nonzero character) wildcards. + +{pmore} + If no equations are specified, then the requested order of coefficients + is repeated within each equation (keeping the existing order of + equations). Otherwise, the requested order is applied across equations. + Note that in the later case the first element in {cmd:order()} must be an + equation name. {it:eq} is applied to all subsequent elements until a + new {it:eq} is specified. For example, + {cmd:order(5:weight mpg * 4:turn *)} would yield the following order: + "{cmd:weight}" from equation "{cmd:5}", "{cmd:mpg}" from equation "{cmd:5}", + remaining coefficients from equation "{cmd:5}", + "{cmd:turn}" from equation "{cmd:4}", remaining coefficients from equation + "{cmd:4}", remaining equations if any. + +{marker orderby}{...} +{phang} + {cmd:orderby(}[{it:subgraph}{cmd::}][{it:plot}]{cmd:)} orders the + coefficients by a specific model. By default, the coefficients are ordered + according to how they are provided to {cmd:coefplot}, with earlier plots + and subgraphs taking precedence over later ones (and placing {cmd:_cons} + last). This means that coefficients that only appear in later models will + be placed after the coefficients that appear in earlier models. Specify the + {cmd:orderby()} option if you want to change the default behavior and + arrange the coefficients according to their order in a specific model + (and, within each equation, place the other coefficients after these coefficients, but + before {cmd:_cons}). Arguments {it:subgraph} and {it:plot} select the relevant + model. For example, {cmd:orderby(2:3)} will order coefficients according to + the model that is displayed in the third plot of the second subgraph. If one + of the arguments is omitted, it defaults to one. Hence, {cmd:orderby(3)} will + order the coefficients according to the model displayed in the third plot + of the first subgraph; {cmd:orderby(2:)} will use the model displayed in the first + plot of the second subgraph. {cmd:orderby()} will do nothing if a specified subgraph or + plot does not exist. Furthermore, note that the {it:subgraph} argument + is not allowed if the {helpb coefplot##norecycle:norecycle} option has been + specified; plots are numbered uniquely across subgraphs in this case. + +{marker sort}{...} +{phang} + {cmd:sort}[{cmd:(}{it:spec}{cmd:)}] sorts the coefficients by size. {it:spec} is + + [{it:subgraph}{cmd::}][{it:plot}] [, {cmdab:d:escending} {cmd:by(}{it:stat}{cmd:)} ] + +{pmore} + where {it:subgraph} and {it:plot}, being equal to {cmd:.} or a positive + integer, identify the subgraph and plot to be used + to establish the sort order. For example, to sort based on all values in + the second subgraph (possibly including multiple plots), type + {cmd:sort(2:)} or {cmd:sort(2:.)}; to sort based on all values in the third + plot (possibly spanning multiple subgraphs), type {cmd:sort(3)} or + {cmd:sort(.:3)}; to sort based on the values of the third plot in the + second subgraph, type {cmd:sort(2:3)}. Specifying {cmd:sort} without + argument is equivalent to {cmd:sort(.:.)}, that is, to sort based on the + values in all available subgraphs and plots. If you specify a subgraph or + plot that does not exist, {cmd:sort()} will do nothing. Furthermore, if the + {helpb coefplot##norecycle:norecycle} option is specified, the {it:subgraph} + argument can be omitted as the plots will be uniquely numbered across + subgraphs. + +{pmore} + By default, the coefficients are sorted in ascending order of the values of + the point estimates. Specify suboption {cmd:descending} to use a + descending sort order. Furthermore, use {cmd:by(}{it:stat}{cmd:)} to change + the relevant statistic, where {it:stat} may be: + +{p2colset 13 25 27 2}{...} +{p2col:{cmd:b}}sort by point estimate (the default){p_end} +{p2col:{cmd:v} (or {cmd:se})}sort by variance (or standard error){p_end} +{p2col:{cmd:t}}sort by t (or z) statistic{p_end} +{p2col:{cmd:tabs}}sort by absolute t (or z) statistic{p_end} +{p2col:{cmd:p}}sort by p-value{p_end} +{p2col:{cmd:df}}sort by degrees of freedom{p_end} +{p2col:{cmd:ll} [#]}sort by (#th) lower confidence limit; # defaults to 1{p_end} +{p2col:{cmd:ul} [#]}sort by (#th) upper confidence limit; # defaults to 1{p_end} +{p2col:{cmd:aux} [#]}sort by (#th) auxiliary variable (see the + {helpb coefplot##aux:aux()} option); # defaults to 1{p_end} +{p2colreset}{...} + +{pmore} + In case of multiple equations, coefficients will be sorted separately + within each equation, keeping the original order of equations. Use the + {helpb coefplot##order:order()} option the change the order of the equations. + +{marker relocate}{...} +{phang} + {cmd:relocate(}{it:spec}{cmd:)} assigns specific positions to the + coefficients on the category axis. {it:spec} is: + + [{it:eq}{cmd::}]{it:coef} {cmd:=} # [[{it:eq}{cmd::}]{it:coef} {cmd:=} # ...] + +{pmore} + where {it:eq} and {it:coef} may contain "{cmd:*}" (any string) and + "{cmd:?}" (any nonzero character) wildcards. If {helpb coefplot##bycoefs:bycoefs} is + specified, use numbers (1, 2, ...) instead of {it:eq} and {it:coef} + to address the elements on the categorical axis. + +{pmore}The default for {cmd:coefplot} is to place coefficients + at integer values 1, 2, 3, ... (from top to bottom in horizontal mode, + from left to right in vertical mode). The {cmd:relocate()} option gives + you the possibility to specify alternative values. If, for example, you + want to place coefficient {cmd:mpg} at value 2.5 on the category axis, you + could type {cmd:relocate(mpg = 2.5)}. If you only want to change the + order of coefficients and are fine with integer positions, then use the + {helpb coefplot##order:order()} option. Note that the specified positions + are assigned before inserting gaps between equations, headings, and + groups (see {helpb coefplot##eqlabels:eqlabels()}, + {helpb coefplot##headings:headings()}, and + {helpb coefplot##groups:groups()}). Hence, the final plot positions might + deviate from the specified positions if there are equation labels, headings, + or group labels. + +{marker bycoefs}{...} +{phang} + {cmd:bycoefs} flips subgraphs and coefficients (not allowed with + {helpb coefplot##at:at}). If {cmd:bycoefs} is specified, a + separate subgraph is produced for each coefficient. In this + case, use integer numbers (1, 2, ...) instead of coefficient names + to address the elements on the categorical axis within options + {helpb coefplot##relocate:relocate()}, + {helpb coefplot##headings:headings()}, and + {helpb coefplot##groups:groups()}. + +{marker norecycle}{...} +{phang} + {cmd:norecycle} increments plot styles across subgraphs. The default is + to start over with each new subgraph. + +{marker nooffsets}{...} +{phang} + {cmd:nooffsets} suppresses automatic offsets for plot positions. + +{marker format}{...} +{phang} + {cmd:format(}{it:format}{cmd:)} sets the display format for + coefficients. This affects the rendition of the axis and marker + labels. {it:format} may be a numeric format or a date format + (see help {helpb format}). + +{marker pnum}{...} +{phang} + {cmd:p{it:#}(}{help coefplot##plotopts:{it:plotopts}}{cmd:)} specifies + options for the {it:#}th plot. For example, type {cmd:p2(nokey)} to exclude + plot 2 from the legend (see {helpb coefplot##nokey:nokey}). Use the {cmd:p#()} + options as an alternative to specifying options directly within a plot; in + case of conflict, options specified within a plot take precedence + over options specified via {cmd:p#()}. + +{marker nolabels}{...} +{phang} + {cmd:nolabels} causes coefficient names to be used as labels instead of + variable labels or value labels. + +{marker coeflabels}{...} +{phang} + {cmd:coeflabels(}{it:spec}{cmd:)} specifies custom labels for + coefficients (not allowed with {helpb coefplot##at:at}). {it:spec} is + +{p 12 14 2} + [{it:coeflist} {cmd:=} {cmd:"}{it:label}{cmd:"} [{it:coeflist} {cmd:=} {cmd:"}{it:label}{cmd:"} ...]] + [{cmd:,} {cmdab:t:runcate(}#{cmd:)} {cmdab:w:rap(}#{cmd:)} {cmdab:nob:reak} + {cmdab:i:nteraction(}{it:string}{cmd:)} + {it:{help axis_label_options:suboptions}}] + +{pmore} + with {it:coeflist} as above for {helpb coefplot##keep:keep()}. Enclose + {it:label} in double quotes + if it contains spaces, e.g. {bind:{cmd:coeflabels(foreign = "Car Type")}}. + Enclose {it:label} in compound double quotes to create a multiline + label, e.g. {bind:{cmd:coeflabels(foreign = `""This is a" "long label""')}}; + alternatively, apply the {cmd:wrap()} option. For text effects + (bold, italics, greek letters, etc.) use SMCL tags as described in + {it:{help graph_text}}. + +{pmore} + Option {cmd:truncate(}#{cmd:)} truncates coefficient labels to + a maximum length of # characters. Option {cmd:wrap(}#{cmd:)} divides + coefficient labels into multiple lines, where each line has a maximum + length of # characters. {cmd:truncate()} and {cmd:wrap()} operate on + words. That is, they try to fill to the maximum length without breaking + in the middle of a word. However, if a word is longer than # characters, + it will be split or truncated. Specify {cmd:nobreak} to prevent + {cmd:truncate()} and {cmd:wrap()} from splitting or truncating words + that are longer than # characters. If {cmd:truncate()} and {cmd:wrap()} + are both specified, {cmd:truncate()} is applied first. + {cmdab:interaction()} specifies the string to be used as + delimiter in labels for interaction terms; the default is + {cmd:interaction(" # ")}. {it:suboptions} are axis label suboptions as + described in {it:{help axis_label_options}}. + +{pmore} + Note: Labels containing multiple lines are left unchanged by {cmd:truncate()} + and {cmd:wrap()}. Therefore, if you don't like how {cmd:wrap()} breaks a + specific label, you can provide a custom variant of it in {cmd:coeflabels()} + while still using {cmd:wrap()} for the other labels. {cmd:truncate()} + and {cmd:wrap()} may fail to process a label if it contains compound + double quotes; the label will be left unchanged in this case. + +{marker noeqlabels}{...} +{phang} + {cmd:noeqlabels} suppresses equation labels. + +{marker eqlabels}{...} +{phang} + {cmd:eqlabels(}{it:spec}{cmd:)} specifies custom labels for equations, one after + the other (not allowed with {helpb coefplot##at:at}). {it:spec} is: + +{p 12 14 2} + [{cmd:"}{it:label}{cmd:"} [{cmd:"}{it:label}{cmd:"} ...]] [{cmd:,} + {cmdab:lab:els}[{cmd:(}{it:string}{cmd:)}] + [{cmd:{ul:no}}]{cmdab:g:ap}[{cmd:(}#{cmd:)}] {cmdab:ashead:ings} + {cmdab:off:set(}#{cmd:)} {cmdab:t:runcate(}#{cmd:)} {cmdab:w:rap(}#{cmd:)} + {cmdab:nob:reak} {it:{help axis_label_options:suboptions}} ] + +{pmore} + Enclose labels in double quotes if they contain spaces, + e.g. {bind:{cmd:eqlabels("EQ one" "EQ two")}}. Enclose labels in compound + double quotes to create multiline labels, + e.g. {bind:{cmd:eqlabels(`""This is a" "long label""')}}. Alternatively, + apply the {cmd:wrap()} option. For text effects + (bold, italics, greek letters, etc.) use SMCL tags as described in + {it:{help graph_text}}. + +{pmore} + Option {cmd:label} causes the equation names to be treated as + variable names; {cmd:coefplot} will then use the corresponding variable labels + (and, depending on context, value labels) to label the equations. Specify + {cmd:label(}{it:string}{cmd:)} to set the string to be used as + delimiter in labels for interaction terms; typing {cmd:label} without argument + is equivalent to {cmd:label(" # ")}. {cmd:gap()} specifies the size of the + gap between equations. The + default is {cmd:gap(1)}. {cmd:nogap} suppresses the gap between + equations. {cmdab:asheadings} treats equation labels as headings; + see {helpb coefplot##headings:headings()}. {cmd:offset()}, only + allowed with {cmd:asheadings}, offsets the labels. {cmd:truncate()}, + {cmd:wrap()}, {cmd:nobreak}, and {it:suboptions} are as above for + {helpb coefplot##coeflabels:coeflabels()}. + +{marker headings}{...} +{phang} + {cmd:headings(}{it:spec}{cmd:)} adds headings between + coefficients (not allowed with {helpb coefplot##at:at}). {it:spec} is: + +{p 12 14 2} + {it:coeflist} {cmd:=} {cmd:"}{it:label}{cmd:"} [{it:coeflist} {cmd:=} {cmd:"}{it:label}{cmd:"} ...] + [{cmd:,} [{cmd:{ul:no}}]{cmdab:g:ap}[{cmd:(}#{cmd:)}] + {cmdab:off:set(}#{cmd:)} {cmdab:t:runcate(}#{cmd:)} + {cmdab:w:rap(}#{cmd:)} {cmdab:nob:reak} + {it:{help axis_label_options:suboptions}} ] + +{pmore} + with {it:coeflist} as above for {helpb coefplot##keep:keep()}. If + {helpb coefplot##bycoefs:bycoefs} is specified, use numbers 1, 2, + ... instead of {it:coeflist} to address the elements on the categorical + axis. Enclose {it:label} in double quotes if it contains + spaces. For example, {bind:{cmd:headings(0.foreign = "Car Type")}} will + print the heading "{cmd:Car Type}" before coefficient "{cmd:0.foreign}". + Enclose {it:label} in compound double quotes to create a multiline + label, e.g. {bind:{cmd:headings(foreign = `""This is a" "long heading""')}}. + Alternatively, apply the {cmd:wrap()} option. For text effects (bold, + italics, greek letters, etc.) use SMCL tags as + described in {it:{help graph_text}}. + +{pmore} + {cmd:gap()} and {cmdab:offset()} are as above for + {helpb coefplot##eqlabels:eqlabels()}. {cmd:truncate()}, {cmd:wrap()}, + {cmd:nobreak}, and {it:suboptions} are as above for + {helpb coefplot##coeflabels:coeflabels()}. + +{marker groups}{...} +{phang} + {cmd:groups(}{it:spec}{cmd:)} adds labels for groups of + coefficients (not allowed with {helpb coefplot##at:at}). The specified + label will be printed beside (or, in vertical mode, below) the identified + group of coefficients. {it:spec} is: + +{p 12 14 2} + {it:coeflist} {cmd:=} {cmd:"}{it:label}{cmd:"} [{it:coeflist} {cmd:=} {cmd:"}{it:label}{cmd:"} ...] + [{cmd:,} [{cmd:{ul:no}}]{cmdab:g:ap}[{cmd:(}#{cmd:)}] + {cmdab:t:runcate(}#{cmd:)} {cmdab:w:rap(}#{cmd:)} + {cmdab:nob:reak} {it:{help axis_label_options:suboptions}} ] + +{pmore} + with {it:coeflist} as above for {helpb coefplot##keep:keep()}. If + {helpb coefplot##bycoefs:bycoefs} is specified, use numbers 1, 2, + ... instead of {it:coeflist} to address the elements on the categorical + axis. Enclose {it:label} in double quotes if + it contains spaces. Enclose {it:label} in compound double quotes to create + a multiline label. Alternatively, apply the {cmd:wrap()} option. For text + effects (bold, italics, greek letters, etc.) use SMCL tags as described in + {it:{help graph_text}}. + +{pmore} + {cmd:gap()} is as above for + {helpb coefplot##eqlabels:eqlabels()}. {cmd:truncate()}, {cmd:wrap()}, + {cmd:nobreak}, and {it:suboptions} are as above for + {helpb coefplot##coeflabels:coeflabels()}. + +{marker plotlabels}{...} +{phang} + {cmd:plotlabels(}{it:spec}{cmd:)} specifies labels for the plots to be + used in the legend. Labels specified via {cmd:plotlabels()} + take precedence over labels specified in the + {helpb coefplot##label:label()} plot option. {it:spec} is: + +{p 12 14 2} + [{cmd:"}{it:label}{cmd:"} [{cmd:"}{it:label}{cmd:"} ...]] [{cmd:,} {cmdab:t:runcate(}#{cmd:)} + {cmdab:w:rap(}#{cmd:)} {cmdab:nob:reak} ] + +{pmore} + Enclose labels in double quotes if they contain spaces. Enclose labels in + compound double quotes to create multiline labels. Alternatively, + apply the {cmd:wrap()} option. For text effects + (bold, italics, greek letters, etc.) use SMCL tags as described in + {it:{help graph_text}}. Options {cmd:truncate()}, {cmd:wrap()}, and {cmd:nobreak} are as + above for {helpb coefplot##coeflabels:coeflabels()}. + +{marker bylabels}{...} +{phang} + {cmd:bylabels(}{it:spec}{cmd:)} specifies labels for the subgraphs. Labels + specified via {cmd:bylabels()} + take precedence over labels specified in the + {helpb coefplot##bylabel:bylabel()} subgraph option. {it:spec} is: + +{p 12 14 2} + [{cmd:"}{it:label}{cmd:"} [{cmd:"}{it:label}{cmd:"} ...]] [{cmd:,} {cmdab:t:runcate(}#{cmd:)} + {cmdab:w:rap(}#{cmd:)} {cmdab:nob:reak} ] + +{pmore} + Enclose labels in double quotes if they contain spaces. Enclose labels in + compound double quotes to create multiline labels. Alternatively, + apply the {cmd:wrap()} option. For text effects + (bold, italics, greek letters, etc.) use SMCL tags as described in + {it:{help graph_text}}. Options {cmd:truncate()}, {cmd:wrap()}, and {cmd:nobreak} are as + above for {helpb coefplot##coeflabels:coeflabels()}. + +{marker grid}{...} +{phang} + {cmd:grid(}{it:options}{cmd:)} affects the rendition of grid lines on the + category axis (not allowed with {helpb coefplot##at:at}). {it:options} are: + +{p 12 14 2} + { {cmdab:b:etween} | {cmdab:w:ithin} | {cmdab:n:one} } {it:{help axis_label_options:suboptions}} + +{pmore} + {cmdab:b:etween} places grid lines between coefficient labels; + {cmdab:w:ithin} places grid lines at the center of coefficient labels; + {cmdab:n:one} suppress grid lines. {it:suboptions} are axis label suboptions + as described in {it:{help axis_label_options}}. In horizontal mode, the + default is {cmd:within} for single plots and {cmd:between} for multiple + plots. In vertical mode, the default is {cmd:none}. Alternatively, use + {helpb axis_label_options:ytick()} and {helpb axis_label_options:xtick()} + to set grid lines. + +{marker generate}{...} +{phang} + {cmd:generate}[{cmd:(}{it:prefix}{cmd:)}] generates variables containing + the graph data. The variable names will be prefixed by "{cmd:__}" + or as specified by {it:prefix}. + +{marker replace}{...} +{phang} + {cmd:replace} allows {cmd:coefplot} to overwrite existing variables. + +{marker addplot}{...} +{phang} + {cmd:addplot(}{it:plot}{cmd:)} adds other plots to the graph. See help + {it:{help addplot_option}}. By default {cmd:addplot()} has access only to + the first {it:r} observations in the dataset, where {it:r} is the number of + observations used by {cmd:coefplot} to store its internal results. If the + graph does not contain multiple subgraphs and + {helpb coefplot##generate:generate()} or {helpb coefplot##nodrop:nodrop} is + specified, {cmd:addplot()} has access to all observations. + +{marker nodrop}{...} +{phang} + {cmd:nodrop} causes {cmd:coefplot} to keep all observations when generating + the graph. The default is to eliminate unused observations temporarily + to increase speed. {cmd:nodrop} may be useful in connection with the + {helpb coefplot##addplot:addplot()} option, if the graph does not contain + multiple subgraphs. {cmd:nodrop} has no effect if + {helpb coefplot##generate:generate()} is specified. + {p_end} + +{phang} + {it:twoway_options} are general twoway options, other than + {cmd:by()}, as documented in help {it:{help twoway_options}}. + +{marker byopts}{...} +{phang} + {cmd:byopts(}{it:byopts}{cmd:)} determines how subgraphs + are combined. {it:byopts} are as described in help {it:{help by_option}}. + + +{marker examples}{...} +{title:Examples} + + . {stata sysuse auto} + . {stata regress price mpg headroom trunk length turn} + . {stata coefplot, drop(_cons) xline(0)} + + . {stata regress price mpg headroom trunk length turn if foreign==0} + . {stata estimates store domestic} + . {stata regress price mpg headroom trunk length turn if foreign==1} + . {stata estimates store foreign} + . {stata coefplot domestic foreign, drop(_cons) xline(0)} + + . {stata coefplot domestic || foreign, drop(_cons) xline(0)} + + . {stata coefplot domestic || foreign, yline(0) bycoefs vertical byopts(yrescale)} + +{pstd} + For further examples see the {browse "http://repec.sowi.unibe.ch/stata/coefplot":website}, + the {browse "http://www.stata-journal.com/article.html?article=gr0059":Stata Journal article}, or the + {browse "http://ideas.repec.org/p/bss/wpaper/1.html":working paper}. + + +{marker remarks}{...} +{title:Remarks} + +{pstd} + Remarks are presented under the following headings: + + {help coefplot##wildcards:Using wildcards in model names} + {help coefplot##place:Placement of options} + {help coefplot##matrix:Plotting results from matrices} + {help coefplot##tempvar:Accessing internal temporary variables} + + +{marker wildcards}{...} +{title:Using wildcards in model names} + +{pstd} + Instead of providing distinct model names to {cmd:coefplot}, you can also + specify a name pattern containing {cmd:*} (any string) + and {cmd:?} (any nonzero character) wildcards. {cmd:coefplot} + will then plot the results from all matching + models. If a name pattern is specified as part of a plot delimited by + parentheses, the results from the matching models will be combined into the + same plot. For example, if models {cmd:est11}, {cmd:est12}, {cmd:est13}, + {cmd:est21}, {cmd:est22}, and {cmd:est23} are in + memory, typing + +{com}{...} + . coefplot (est1*, {txt:{it:opts1}}) (est2*, {txt:{it:opts2}}) +{txt}{...} + +{pstd} + is equivalent to + +{com}{...} + . coefplot (est11 est12 est13, {txt:{it:opts1}}) (est21 est22 est23, {txt:{it:opts2}}) +{txt}{...} + +{pstd} + Likewise, typing + +{com}{...} + . coefplot (est*1, {txt:{it:opts1}} \ est*2, {txt:{it:opts2}} \, {txt:{it:opts3}}) +{txt}{...} + +{pstd} + is equivalent to + +{com}{...} + . coefplot (est11 est21, {txt:{it:opts1}} \ est12 est22, {txt:{it:opts2}} \, {txt:{it:opts3}}) +{txt}{...} + +{pstd} + If a name pattern is specified without parentheses, + the matching models are treated as separate plots. For example, typing + +{com}{...} + . coefplot est1* || est2* +{txt}{...} + +{pstd} + is equivalent to + +{com}{...} + . coefplot est11 est12 est13 || est21 est22 est23 +{txt}{...} + +{pstd} + or + +{com}{...} + . coefplot (est11) (est12) (est13) || (est21) (est22) (est23) +{txt}{...} + +{pstd} + Use global options {helpb coefplot##pnum:p1()}, {helpb coefplot##pnum:p2()}, + etc. to provide specific options to the different plots in this case. For + example, typing + +{com}{...} + . coefplot est1*, p1({txt:{it:opts1}}) p2({txt:{it:opts2}}) p3({txt:{it:opts3}}) +{txt}{...} + +{pstd} + is equivalent to + +{com}{...} + . coefplot (est11, {txt:{it:opts1}}) (est12, {txt:{it:opts2}}) (est13, {txt:{it:opts3}}) +{txt}{...} + + +{marker place}{...} +{title:Placement of options} + +{pstd} + {cmd:coefplot} has four levels of options: + +{phang}(1) {help coefplot##modelopts:{it:modelopts}} are options that apply to a single + model (or matrix). They specify the information to be displayed. + +{phang}(2) {help coefplot##plotopts:{it:plotopts}} are options that apply to a single + plot, possibly containing results from multiple models. They affect + the rendition of markers and confidence intervals and provide a label + for the plot. + +{phang}(3) {help coefplot##subgropts:{it:subgropts}} are options that + apply to a single subgraph, possibly containing multiple plots. + +{phang}(4) {help coefplot##globalopts:{it:globalopts}} are options that apply + to the overall graph. + +{pstd} + The levels are nested in the sense that upper level options include all + lower level options. That is, + {help coefplot##globalopts:{it:globalopts}} includes + {help coefplot##subgropts:{it:subgropts}}, + {help coefplot##plotopts:{it:plotopts}}, and + {help coefplot##modelopts:{it:modelopts}}; + {help coefplot##subgropts:{it:subgropts}} includes + {help coefplot##plotopts:{it:plotopts}}, and + {help coefplot##modelopts:{it:modelopts}}; + {help coefplot##plotopts:{it:plotopts}} includes + {help coefplot##modelopts:{it:modelopts}}. However, upper level options + may not be specified at a lower level. + +{pstd} + If lower level options are specified at an upper level, they serve as + defaults for all included lower levels elements. For example, if you want + to draw 99% and 95% confidence intervals for all included models, + specify {cmd:levels(99 95)} as global option: + +{com}{...} + . coefplot model1 model2 model3, levels(99 95) +{txt}{...} + +{pstd} + Options specified with an individual element override the defaults set + by upper level options. For example, if you want to draw 99% and 95% + confidence intervals for model 1 and model 2 and 90% confidence intervals + for model 3, you could type: + +{com}{...} + . coefplot model1 model2 (model3, level(90)), levels(99 95) +{txt}{...} + +{pstd} + There are some fine distinctions about the placement of options and how they + are interpreted. For example, if you type + +{com}{...} + . coefplot m1, {txt:{it:opts1}} || m2, {txt:{it:opts2}} {txt:{it:opts3}} +{txt}{...} + +{pstd} + then {it:opts2} and {it:opts3} are interpreted as global options. If you + want to apply {it:opts2} only to {cmd:m2} then type + +{com}{...} + . coefplot m1, {txt:{it:opts1}} || m2, {txt:{it:opts2}} ||, {txt:{it:opts3}} +{txt}{...} + +{pstd} + Similarly, if you type + +{com}{...} + . coefplot (m1, {txt:{it:opts1}} \ m2, {txt:{it:opts2}}) +{txt}{...} + +{pstd} + then {it:opts2} will be applied to both models. To apply {it:opts2} only to + {cmd:m2} type + +{com}{...} + . coefplot (m1, {txt:{it:opts1}} \ m2, {txt:{it:opts2}} \) +{txt}{...} + +{pstd} + or, if you also want to include {it:opts3} to be applied to both models, + type + +{com}{...} + . coefplot (m1, {txt:{it:opts1}} \ m2, {txt:{it:opts2}} \, {txt:{it:opts3}}) +{txt}{...} + +{pstd} + or + +{com}{...} + . coefplot (m1, {txt:{it:opts1}} \ m2, {txt:{it:opts2}} \), {txt:{it:opts3}} +{txt}{...} + +{pstd} + In case of multiple subgraphs there is some ambiguity about where to + specify the plot options (unless global option + {helpb coefplot##norecycle:norecycle} is specified). You can provide plot + options within any of the subgraphs as plot options are collected across + subgraphs. However, in case of conflict, the plot options from the rightmost + subgraph usually take precedence over earlier plot options. In addition, + you can also use global options {helpb coefplot##pnum:p1()}, + {helpb coefplot##pnum:p2()}, etc. to provide + options for specific plots. In case of conflict, options specified within a plot take + precedence over options provided via {helpb coefplot##pnum:p1()}, + {helpb coefplot##pnum:p2()}, etc. + +{marker matrix}{...} +{title:Plotting results from matrices} + +{pstd} + Use syntax {helpb coefplot##matrix:{ul:m}atrix({it:mspec})} instead of the + name of a stored model to plot results from a matrix. {it:mspec} may be: + +{p2colset 9 21 23 2}{...} +{p2col:{it:name}}use first row of matrix {it:name} + {p_end} +{p2col:{it:name}{cmd:[}#{cmd:,.]}}use #th row of + matrix {it:name}; may also type {it:name}{cmd:[}#{cmd:,]} or + {it:name}{cmd:[}#{cmd:]} + {p_end} +{p2col:{it:name}{cmd:[.,}#{cmd:]}}use #th column of + matrix {it:name}; may also type {it:name}{cmd:[,}#{cmd:]} + {p_end} +{p2colreset}{...} + +{pstd} + If the {cmd:matrix()} syntax is used, then option {helpb coefplot##b:b()} is discarded + and names given in {helpb coefplot##at:at()}, {helpb coefplot##ci:ci()}, + {helpb coefplot##v:v()}, {helpb coefplot##se:se()}, + {helpb coefplot##df:df()}, and {helpb coefplot##aux:aux()} refer to regular + matrices instead of {cmd:e()}-matrices. The matrix name may be omitted in these + options if results are to be read from the same matrix; only the + relevant row or column numbers have to be provided in this case (whether the + numbers are interpreted as row or column numbers + depends in how {cmd:matrix()} was specified). + +{pstd} + For example, to plot medians and their confidence intervals as computed + by {helpb centile} you could type: + +{com}{...} + sysuse auto, clear + matrix C = J(3,3,.) + matrix rownames C = median ll95 ul95 + matrix colnames C = mpg trunk turn + local i 0 + foreach v of var mpg trunk turn { + local ++ i + centile `v' + matrix C[1,`i'] = r(c_1) \ r(lb_1) \ r(ub_1) + } + matrix list C + coefplot matrix(C), ci((2 3)) +{txt}{...} + +{pstd} + This is equivalent to: + +{com}{...} + coefplot matrix(C[1]), ci((C[2] C[3])) +{txt}{...} + +{pstd} + Note that a single {cmd:coefplot} command can contain both regular syntax + and {cmd:matrix()} syntax. For example, to add means to the graph above + you could type: + +{com}{...} + mean mpg trunk turn + estimates store mean + coefplot (matrix(C), ci((2 3))) (mean) +{txt}{...} + + +{marker tempvar}{...} +{title:Accessing internal temporary variables} + +{pstd} + {cmd:coefplot} maintains a number of internal variables that can be + used within + {helpb coefplot##ifopt:if()}, + {helpb coefplot##weight:weight()}, + {helpb coefplot##transform:transform()}, + {helpb marker_label_options:mlabel()}, + {helpb marker_label_options:mlabvposition()}, and + {helpb addplot_option:addplot()}. These + variables are: + +{p2colset 9 21 23 2}{...} +{p2col:{cmd:@b}}point estimates + {p_end} +{p2col:{cmd:@ll}#}lower limits of confidence interval # (may use {cmd:@ll} for {cmd:@ll1}) + {p_end} +{p2col:{cmd:@ul}#}upper limits of confidence interval # (may use {cmd:@ul} for {cmd:@ul1}) + {p_end} +{p2col:{cmd:@V}}variances + {p_end} +{p2col:{cmd:@se}}standard errors + {p_end} +{p2col:{cmd:@t}}t or z statistics, computed as @b/@se + {p_end} +{p2col:{cmd:@df}}degrees of freedom + {p_end} +{p2col:{cmd:@pval}}p-values, computed as (1-normal(|@t|))*2 or ttail(@df,|@t|)*2, depending + on whether df are available + {p_end} +{p2col:{cmd:@at}}plot positions + {p_end} +{p2col:{cmd:@plot}}plot ID (labeled) + {p_end} +{p2col:{cmd:@by}}subgraph ID (labeled) + {p_end} +{p2col:{cmd:@mlbl}}Marker labels set by {helpb coefplot##mlabels:mlabels()} (string variable) + {p_end} +{p2col:{cmd:@mlpos}}Marker label positions set by {helpb coefplot##mlabels:mlabels()} + {p_end} +{p2col:{cmd:@aux}#}auxiliary variables collected by {helpb coefplot##aux:aux()} (may use {cmd:@aux} for {cmd:@aux1}) + {p_end} +{p2colreset}{...} + +{pstd} + The internal variables can be used like other variables in the + dataset. For example, option {cmd:mlabel(@plot)} would add plot labels as marker + labels or option {cmd:addplot(line @at @b)} would draw a connecting line + through all point estimates in the graph. + + +{marker saved_results}{...} +{title:Saved results} + +{pstd} + {cmd:coefplot} returns the following macros and scalars in {cmd:r()}: + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Scalars}{p_end} +{synopt:{cmd:r(n_ci)}}number of confidence intervals{p_end} +{synopt:{cmd:r(n_plot)}}number of plots{p_end} +{synopt:{cmd:r(n_subgr)}}number of subgraphs{p_end} + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Macros}{p_end} +{synopt:{cmd:r(graph)}}copy of graph command{p_end} +{synopt:{cmd:r(labels)}}coefficient labels{p_end} +{synopt:{cmd:r(eqlabels)}}equation labels{p_end} +{synopt:{cmd:r(groups)}}group labels{p_end} +{synopt:{cmd:r(headings)}}headings{p_end} +{synopt:{cmd:r(legend)}}contents of legend option{p_end} + + +{marker author}{...} +{title:Author} + +{pstd} + Ben Jann, University of Bern, ben.jann@unibe.ch + +{pstd} + Thanks for citing this software in one of the following ways: + +{pmore} + Jann, B. (2014). Plotting regression coefficients and other + estimates. The Stata Journal 14(4): 708-737. + +{pmore} + Jann, B. (2013). Plotting regression coefficients and other estimates + in Stata. University of Bern Social Sciences Working Papers + Nr. 1. Available from + {browse "http://ideas.repec.org/p/bss/wpaper/1.html"}. + +{pmore} + Jann, B. (2013). coefplot: Stata module to plot regression coefficients + and other results. Available from + {browse "http://ideas.repec.org/c/boc/bocode/s457686.html"}. + + diff --git a/110/replication_package/replication/ado/plus/c/confirmdir.ado b/110/replication_package/replication/ado/plus/c/confirmdir.ado new file mode 100644 index 0000000000000000000000000000000000000000..5ada2617a93299a2aa8afa134d59eee151d58196 --- /dev/null +++ b/110/replication_package/replication/ado/plus/c/confirmdir.ado @@ -0,0 +1,19 @@ +*! comfirmdir Version 1.1 dan_blanchette@unc.edu 22Jan2009 +*! the carolina population center, unc-ch +* Center of Entrepreneurship and Innovation Duke University's Fuqua School of Business +* confirmdir Version 1.1 dan_blanchette@unc.edu 17Jan2008 +* research computing, unc-ch +* - made it handle long directory names +** confirmdir Version 1.0 dan_blanchette@unc.edu 05Oct2003 +** the carolina population center, unc-ch + +program define confirmdir, rclass + version 8 + + local cwd `"`c(pwd)'"' + quietly capture cd `"`1'"' + local confirmdir=_rc + quietly cd `"`cwd'"' + return local confirmdir `"`confirmdir'"' + +end diff --git a/110/replication_package/replication/ado/plus/c/confirmdir.hlp b/110/replication_package/replication/ado/plus/c/confirmdir.hlp new file mode 100644 index 0000000000000000000000000000000000000000..0e883a5b26ec31fb7734212c176b862955b0b6f2 --- /dev/null +++ b/110/replication_package/replication/ado/plus/c/confirmdir.hlp @@ -0,0 +1,54 @@ +{smcl} +{* 17Jan2008}{...} +{* 28Oct2004}{...} +{* 19Nov2003}{...} +{hline} +help for {hi:confirmdir} {right:manual: {hi:[R] none}} +{right:dialog: {hi: none} } +{hline} + + +{title:Confirms if directory exists} + +{p 8 17 2} +{cmd:confirmdir} {it:full direcotory name} +{p_end} + + +{title:Description} + +{p 4 4 2}{cmd:confirmdir} is designed for programmers who want to know if a directory exists. +This is just like {cmd:confirm} command when used to confirm a file. If Stata allowed their +{cmd:confirm} command to also have the "dir" option, this program would not have been +written. Stata's {cmd:confirm file} will confirm a directory in UNIX/Linux but not in Windows. +{cmd:confirmdir} works in all operating systems.{p_end} + +{title:Examples} + +{p 4 8 2}{cmd:. confirmdir "c:\My Favorite Directory\Where I Keep Stuff\"}{p_end} + +{p 4 8 2}{cmd:. confirmdir /projects/ethiopia/survey2002/data/}{p_end} + + +{title:Saved Results} + +{p 4 8 2}The {cmd:confirmdir} command saves in {cmd:r()}:{p_end} + +{synoptset 20 tabbed}{...} +{p2col 5 20 24 2: Macros}{p_end} +{synopt:{cmd:r(confirmdir)}}return code returned by the {help cd:cd} command.{p_end} + + +{title:Author} + +{p 4 4 2} +Dan Blanchette {break} +The Carolina Population Center {break} +University of North Carolina - Chapel Hill, USA {break} +dan_blanchette@unc.edu{p_end} + + +{title:Also see} + +{p 4 13 2}On-line: {help confirm:confirm} {help cd:cd}, {help tmpdir:tmpdir} (if installed){p_end} + diff --git a/110/replication_package/replication/ado/plus/e/erepost.ado b/110/replication_package/replication/ado/plus/e/erepost.ado new file mode 100644 index 0000000000000000000000000000000000000000..e96f8dc9a50fbd96cd433a110a6f6ba8fe01fafb --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/erepost.ado @@ -0,0 +1,108 @@ +*! version 1.0.2, Ben Jann, 15jun2015 + +prog erepost, eclass + version 8.2 + syntax [anything(equalok)] [, cmd(str) noEsample Esample2(varname) REName /// + Obs(passthru) Dof(passthru) PROPerties(passthru) * ] + if "`esample'"!="" & "`esample2'"!="" { + di as err "only one allowed of noesample and esample()" + exit 198 + } +// parse [b = b] [V = V] + if `"`anything'"'!="" { + tokenize `"`anything'"', parse(" =") + if `"`7'"'!="" error 198 + if `"`1'"'=="b" { + if `"`2'"'=="=" & `"`3'"'!="" { + local b `"`3'"' + confirm matrix `b' + } + else error 198 + if `"`4'"'=="V" { + if `"`5'"'=="=" & `"`6'"'!="" { + local v `"`6'"' + confirm matrix `b' + } + else error 198 + } + else if `"`4'"'!="" error 198 + } + else if `"`1'"'=="V" { + if `"`4'"'!="" error 198 + if `"`2'"'=="=" & `"`3'"'!="" { + local v `"`3'"' + confirm matrix `v' + } + else error 198 + } + else error 198 + } +//backup existing e()'s + if "`esample2'"!="" { + local sample "`esample2'" + } + else if "`esample'"=="" { + tempvar sample + gen byte `sample' = e(sample) + } + local emacros: e(macros) + local emacros: subinstr local emacros "_estimates_name" "", word + if `"`properties'"'!="" { + local emacros: subinstr local emacros "properties" "", word + } + foreach emacro of local emacros { + local e_`emacro' `"`e(`emacro')'"' + } + local escalars: e(scalars) + if `"`obs'"'!="" { + local escalars: subinstr local escalars "N" "", word + } + if `"`dof'"'!="" { + local escalars: subinstr local escalars "df_r" "", word + } + foreach escalar of local escalars { + tempname e_`escalar' + scalar `e_`escalar'' = e(`escalar') + } + local ematrices: e(matrices) + if "`b'"=="" & `:list posof "b" in ematrices' { + tempname b + mat `b' = e(b) + } + if "`v'"=="" & `:list posof "V" in ematrices' { + tempname v + mat `v' = e(V) + } + local bV "b V" + local ematrices: list ematrices - bV + foreach ematrix of local ematrices { + tempname e_`ematrix' + matrix `e_`ematrix'' = e(`ematrix') + } +// rename + if "`b'"!="" & "`v'"!="" & "`rename'"!="" { // copy colnames from b + mat `v' = `b' \ `v' + mat `v' = `v'[2..., 1...] + mat `v' = `b'', `v' + mat `v' = `v'[1..., 2...] + } +// post results + if "`esample'"=="" { + eret post `b' `v', esample(`sample') `obs' `dof' `properties' `options' + } + else { + eret post `b' `v', `obs' `dof' `properties' `options' + } + foreach emacro of local emacros { + eret local `emacro' `"`e_`emacro''"' + } + if `"`cmd'"'!="" { + eret local cmd `"`cmd'"' + } + foreach escalar of local escalars { + eret scalar `escalar' = scalar(`e_`escalar'') + } + foreach ematrix of local ematrices { + eret matrix `ematrix' = `e_`ematrix'' + } +end diff --git a/110/replication_package/replication/ado/plus/e/erepost.hlp b/110/replication_package/replication/ado/plus/e/erepost.hlp new file mode 100644 index 0000000000000000000000000000000000000000..ddd9990bcd1173dfe8fe3acc98ed6bffb9689b2d --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/erepost.hlp @@ -0,0 +1,126 @@ +{smcl} +{* 15jun2015}{...} +{cmd:help erepost} +{hline} + +{title:Title} + +{p 4 4 2}{hi:erepost} {hline 2} Repost the estimation results + + +{title:Syntax} + +{p 8 16 2}{cmd:erepost} [{cmd:b =} {it:b}] [{cmd:V =} {it:V}] [{cmd:,} + {cmd:cmd(}{it:string}{cmd:)} + {cmdab:ren:ame} + [{ul:{cmd:no}}]{cmdab:e:sample:}[{cmd:(}{it:varname}{cmd:)}] + {it:ereturn_post_opts} + ] + +{p 4 4 2} +where {it:b} is a 1 x p coefficient vector +(matrix) and {it:V} is a p x p covariance matrix. + + +{title:Description} + +{p 4 4 2} +{cmd:erepost} changes the {cmd:b} or {cmd:V} matrix of the current estimation results +or changes the declared estimation sample. {cmd:erepost} is similar to +{helpb ereturn repost}. However, {cmd:erepost} is allowed +after estimation commands that do not post their results +using {cmd:ereturn post} (e.g. {cmd:logit}) and {cmd:erepost} +can be used outside of {cmd:eclass} programs (see help {helpb program}). + +{p 4 4 2} +Technical note: After applying {cmd:erepost} the original command (or +{cmd:estimates replay}) may not be able to replay the output. + + +{title:Options} + +{p 4 8 2} +{cmd:cmd(}{it:string}{cmd:)} sets the {cmd:e(cmd)} macro. + +{p 4 8 2} +{cmd:rename} causes the names from the coefficient vector to be used +as the labels for both the coefficient vector and the covariance matrix +in case of name conflicts. + +{p 4 8 2} +{cmd:esample(}{it:varname}{cmd:)} gives the name of the 0/1 variable +indicating the observations involved in the estimation. The variable is +removed from the data but is available for use as {hi:e(sample)}. Specify {cmd:noesample} +if you want to remove {cmd:e(sample)} from the estimation results. + +{p 4 8 2} +{it:ereturn_post_opts} are any other options allowed with {cmd:ereturn post} +(see help {helpb ereturn}). + + +{title:Examples} + +{p 4 4 2}Say, you are estimating a regression model that contains all interactions between the +regressors and a binary variable and you want to display the main effects and the +interaction effects in two columns side by side. One approach is to modify the names +in {cmd:e(b)} and {cmd:e(V)} so +that there is a "main equation" and an "interaction equation" and then tabulate +the model using {cmd:estout} or {cmd:esttab} with the {cmd:unstack} option. Example +(you will need the {cmd:estout} package to run this example; type +{net "describe http://fmwww.bc.edu/repec/bocode/e/estout.pkg":ssc describe estout}): + + {com}. sysuse auto + {txt}(1978 Automobile Data) + + {com}. generate foreign_mpg = foreign*mpg + {txt} + {com}. generate foreign_weight = foreign*weight + {txt} + {com}. quietly regress price foreign_mpg foreign_weight foreign mpg weight + {txt} + {com}. matrix b = e(b) + {txt} + {com}. matrix coleq b = foreign foreign foreign main main main + {txt} + {com}. matrix colname b = mpg weight _cons mpg weight _cons + {txt} + {com}. erepost b=b, rename + {txt} + {com}. esttab , unstack order(main: foreign:) + {res} + {txt}{hline 44} + {txt} (1) + {txt} price + {txt} main foreign + {txt}{hline 44} + {txt}mpg {res} 237.7 -257.5 {txt} + {res} {ralign 12:{txt:(}1.90{txt:)}} {ralign 12:{txt:(}-1.66{txt:)}} {txt} + + {txt}weight {res} 4.415*** 0.741 {txt} + {res} {ralign 12:{txt:(}5.18{txt:)}} {ralign 12:{txt:(}0.45{txt:)}} {txt} + + {txt}_cons {res} -13285.4* 8219.6 {txt} + {res} {ralign 12:{txt:(}-2.58{txt:)}} {ralign 12:{txt:(}1.13{txt:)}} {txt} + {txt}{hline 44} + {txt}N {res} 74 {txt} + {txt}{hline 44} + {txt}t statistics in parentheses + {txt}* p<0.05, ** p<0.01, *** p<0.001 + + +{title:Author} + +{p 4 4 2}Ben Jann, University of Bern, jann@soz.unibe.ch + +{p 4 4 2}Thanks for citing this software as follows: + +{p 8 8 2}Jann, B. (2007). erepost: Stata module to repost the estimation results. Available from + {browse "http://ideas.repec.org/c/boc/bocode/s456850.html"}. + + +{title:Also see} + +{psee} +Online: {helpb ereturn}, {help estcom}, +{helpb estimates} + diff --git a/110/replication_package/replication/ado/plus/e/estadd.ado b/110/replication_package/replication/ado/plus/e/estadd.ado new file mode 100644 index 0000000000000000000000000000000000000000..6b364f01487fb704764d4360243e521c634ed5ca --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estadd.ado @@ -0,0 +1,2465 @@ +*! version 2.3.5 05feb2016 Ben Jann +* 1. estadd and helpers +* 2. estadd_local +* 3. estadd_scalar +* 4. estadd_matrix +* 5. estadd_mean +* 6. estadd_sd +* 7. estadd_beta +* 8. estadd_coxsnell +* 9. estadd_nagelkerke +* 10. estadd_ysumm +* 11. estadd_summ +* 12. estadd_vif +* 13. estadd_ebsd +* 14. estadd_expb +* 15. estadd_pcorr +* 16. estadd_lrtest +* 17. estadd_brent +* 18. estadd_fitstat +* 19. estadd_listcoef +* 20. estadd_mlogtest +* 21. estadd_prchange +* 22. estadd_prvalue +* 23. estadd_asprvalue +* 24. estadd_margins +* 99. copy of erepost.ado + +* 1. +program estadd + version 8.2 + local caller : di _caller() + capt _on_colon_parse `macval(0)' + if !_rc { + local 0 `"`s(before)'"' // cannot apply macval() here + local names `"`s(after)'"' + } + syntax anything(equalok id="subcommand") [if] [in] [fw aw iw pw] [, * ] + if regexm(`"`anything'"',"^r\((.*)\)$") { // check -estadd r(name)- + capt confirm scalar `anything' + if _rc { + capt confirm matrix `anything' + if _rc { + di as err `"`anything' not found"' + exit 111 + } + else { + local anything `"matrix `anything'"' + } + } + else { + local anything `"scalar `anything'"' + } + } + gettoken subcommand : anything + capt confirm name `subcommand' + if _rc { + di as err "invalid subcommand" + exit 198 + } + if `"`options'"'!="" local options `", `options'"' + if `"`weight'`exp'"'!="" local wgtexp `"[`weight'`exp']"' + +//expand estimates names and backup current estimates if necessary + tempname rcurrent ecurrent + capt _return drop `rcurrent' + _return hold `rcurrent' + capt noisily { + local names: list retok names + if "`names'"=="" { + local names "." + local qui + } + else local qui quietly + foreach name of local names { + if "`name'"=="." { + capt est_expand "`name'" + if _rc local enames "`enames'`name' " + else local enames "`enames'`r(names)' " + } + else { + est_expand "`name'" //=> error if estimates not found + local enames "`enames'`r(names)' " + } + } + local names: list uniq enames + if "`names'"=="." local active + else { + capt est_expand . + if _rc local active "." + else local active "`r(names)'" + if "`active'"=="." | `:list posof "`active'" in names'==0 { + local active + _est hold `ecurrent', restore estsystem nullok + } + } + } + if _rc { + _return restore `rcurrent' + exit _rc + } + _return restore `rcurrent', hold + +// cases: +// - if active estimates not stored yet and "`names'"==".": simply execute +// estadd_subcmd to active estimates +// - else if active estimates not stored yet: backup/restore active estimates +// - else if active estimates stored but not in `names': backup/restore active estimates +// - else if active estimates stored: no backup but restore at end + +//loop over estimates names and run subcommand + nobreak { + foreach m of local names { + if "`names'"!="." { + if "`m'"=="." _est unhold `ecurrent' + else { + capt confirm new var _est_`m' // fix e(sample) + if _rc qui replace _est_`m' = 0 if _est_`m' >=. + _est unhold `m' + } + } + backup_estimates_name + capt n break `qui' version `caller': /// + estadd_`macval(anything)' `if' `in' `wgtexp' `options' + local rc = _rc + restore_estimates_name + if "`names'"!="." { + if "`m'"=="." _est hold `ecurrent', restore estsystem nullok + else _est hold `m', estimates varname(_est_`m') + } + if `rc' continue, break + } + if "`active'"!="" estimates restore `active', noh + } + _return restore `rcurrent' + if `rc' { + if `rc' == 199 di as error "invalid subcommand" + exit `rc' + } +end + +program define backup_estimates_name, eclass + ereturn local _estadd_estimates_name `"`e(_estimates_name)'"' + ereturn local _estimates_name "" +end +program define restore_estimates_name, eclass + local hold `"`e(_estadd_estimates_name)'"' + ereturn local _estadd_estimates_name "" + ereturn local _estimates_name `"`hold'"' +end + +program confirm_new_ename + capture confirm existence `e(`0')' + if !_rc { + di as err "e(`0') already defined" + exit 110 + } +end + +program confirm_esample + local efun: e(functions) + if `:list posof "sample" in efun'==0 { + di as err "e(sample) information not available" + exit 498 + } +end + +program confirm_numvar + args var + local ts = index("`var'",".") + confirm numeric variable `=substr("`var'",`ts'+1,.)' +end + +program define added_macro + args name + di as txt %25s `"e(`name') : "' `""{res:`e(`name')'}""' // cannot apply macval() here +end + +program define added_scalar + args name label + di as txt %25s `"e(`name') = "' " " as res e(`name') _c + if `"`label'"'!="" { + di as txt _col(38) `"(`label')"' + } + else di "" +end + +program define added_matrix + args name label + capture { + local r = rowsof(e(`name')) + local c = colsof(e(`name')) + } + if _rc { + tempname tmp + mat `tmp' = e(`name') + local r = rowsof(`tmp') + local c = colsof(`tmp') + } + di as txt %25s `"e(`name') : "' " " /// + as res "`r' x `c'" _c + if `"`label'"'=="_rown" { + local thelabel: rownames e(`name') + local thelabel: list retok thelabel + if `r'>1 { + local thelabel: subinstr local thelabel " " ", ", all + } + di as txt _col(38) `"(`thelabel')"' + } + else if `"`label'"'!="" { + di as txt _col(38) `"(`label')"' + } + else di "" +end + +* 2. +* -estadd- subroutine: add local +program estadd_loc + estadd_local `macval(0)' +end +program estadd_loca + estadd_local `macval(0)' +end +program estadd_local, eclass + version 8.2 + syntax anything(equalok) [, Prefix(name) Replace Quietly ] + gettoken name def : anything , parse(" =:") + if "`replace'"=="" { + confirm_new_ename `prefix'`name' + } + ereturn local `prefix'`name'`macval(def)' + di _n as txt "added macro:" + added_macro `prefix'`name' +end + +* 3. +* -estadd- subroutine: add scalar +program estadd_sca + estadd_scalar `0' +end +program estadd_scal + estadd_scalar `0' +end +program estadd_scala + estadd_scalar `0' +end +program estadd_scalar, eclass + version 8.2 + syntax anything(equalok) [, Prefix(name) Replace Quietly ] + if regexm("`anything'","^r\((.*)\)$") { // estadd scalar r(name) + local name = regexs(1) + capt confirm name `name' + confirm scalar `anything' + if _rc error 198 + local equ "`anything'" + } + else { + local isname 0 + gettoken name equ0: anything, parse(" =") + capt confirm name `name' + if _rc error 198 + else if `"`equ0'"'=="" { // estadd scalar name + local isname 1 + local equ "scalar(`name')" + } + else { // estadd scalar name [=] exp + gettoken trash equ : equ0, parse(" =") + if `"`trash'"'!="=" { + local equ `"`equ0'"' + } + } + } + if "`replace'"=="" { + confirm_new_ename `prefix'`name' + } + ereturn scalar `prefix'`name' = `equ' + di _n as txt "added scalar:" + added_scalar `prefix'`name' +end + +* 4. +* -estadd- subroutine: add matrix +program estadd_mat + estadd_matrix `0' +end +program estadd_matr + estadd_matrix `0' +end +program estadd_matri + estadd_matrix `0' +end +program estadd_matrix, eclass + version 8.2 + syntax anything(equalok) [, Prefix(name) Replace Quietly ] + if regexm("`anything'","^r\((.*)\)$") { // estadd matrix r(name) + local name = regexs(1) + capt confirm name `name' + if _rc error 198 + confirm matrix `anything' + local equ "`anything'" + } + else { + local isname 0 + gettoken name equ0: anything, parse(" =") + capt confirm name `name' + if _rc error 198 + else if `"`equ0'"'=="" { // estadd matrix name + local isname 1 + local equ "`name'" + } + else { // estadd matrix name [=] exp + gettoken trash equ : equ0, parse(" =") + if `"`trash'"'!="=" { + local equ `"`equ0'"' + } + } + } + if "`replace'"=="" { + confirm_new_ename `prefix'`name' + } + tempname M + mat `M' = `equ' + ereturn matrix `prefix'`name' = `M' + di _n as txt "added matrix:" + added_matrix `prefix'`name' +end + +* 5. +* -estadd- subroutine: means of regressors +program define estadd_mean, eclass + version 8.2 + syntax [, Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'mean +//use aweights with -summarize- + local wtype `e(wtype)' + if "`wtype'"=="pweight" local wtype aweight +//subpop? + local subpop "`e(subpop)'" + if "`subpop'"=="" local subpop 1 +//copy coefficients matrix and determine varnames + tempname results + mat `results' = e(b) + local vars: colnames `results' +//loop over variables: calculate -mean- + local j 0 + foreach var of local vars { + local ++j + capture confirm_numvar `var' + if _rc mat `results'[1,`j'] = .z + else { + capt su `var' [`wtype'`e(wexp)'] if e(sample) & `subpop', meanonly + mat `results'[1,`j'] = cond(_rc,.,r(mean)) + } + } +//return the results + ereturn matrix `prefix'mean = `results' + di _n as txt "added matrix:" + added_matrix `prefix'mean +end + +* 6. +* -estadd- subroutine: standard deviations of regressors +program define estadd_sd, eclass + version 8.2 + syntax [, noBinary Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'sd +//use aweights with -summarize- + local wtype `e(wtype)' + if "`wtype'"=="pweight" local wtype aweight +//subpop? + local subpop "`e(subpop)'" + if "`subpop'"=="" local subpop 1 +//copy coefficients matrix and determine varnames + tempname results + mat `results' = e(b) + local vars: colnames `results' +//loop over variables: calculate -mean- + local j 0 + foreach var of local vars { + local ++j + capture confirm_numvar `var' + if _rc mat `results'[1,`j'] = .z + else { + capture assert `var'==0 | `var'==1 if e(sample) & `subpop' + if _rc | "`binary'"=="" { + capt su `var' [`wtype'`e(wexp)'] if e(sample) & `subpop' + mat `results'[1,`j'] = cond(_rc,.,r(sd)) + } + else mat `results'[1,`j'] = .z + } + } +//return the results + ereturn matrix `prefix'sd = `results' + di _n as txt "added matrix:" + added_matrix `prefix'sd +end + +* 7. +* -estadd- subroutine: standardized coefficients +program define estadd_beta, eclass + version 8.2 + syntax [, Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'beta +//use aweights with -summarize- + local wtype `e(wtype)' + if "`wtype'"=="pweight" local wtype aweight +//subpop? + local subpop "`e(subpop)'" + if "`subpop'"=="" local subpop 1 +//copy coefficients matrix and determine varnames + tempname results sddep + mat `results' = e(b) + local vars: colnames `results' + local eqs: coleq `results', q + local depv "`e(depvar)'" +//loop over variables: calculate -beta- + local j 0 + local lastdepvar + foreach var of local vars { + local depvar: word `++j' of `eqs' + if "`depvar'"=="_" local depvar "`depv'" + capture confirm_numvar `depvar' + if _rc mat `results'[1,`j'] = .z + else { + if "`depvar'"!="`lastdepvar'" { + capt su `depvar' [`wtype'`e(wexp)'] if e(sample) & `subpop' + scalar `sddep' = cond(_rc,.,r(sd)) + } + capture confirm_numvar `var' + if _rc mat `results'[1,`j'] = .z + else { + capt su `var' [`wtype'`e(wexp)'] if e(sample) & `subpop' + mat `results'[1,`j'] = cond(_rc,.,`results'[1,`j'] * r(sd) / `sddep') + } + } + local lastdepvar "`depvar'" + } +//return the results + ereturn matrix `prefix'beta = `results' + di _n as txt "added matrix:" + added_matrix `prefix'beta +end + +* 8. +* -estadd- subroutine: Cox & Snell Pseudo R-Squared +program define estadd_coxsnell, eclass + version 8.2 + syntax [, Prefix(name) Replace Quietly ] +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'coxsnell +//compute statistic + tempname results + scalar `results' = 1 - exp((e(ll_0)-e(ll))*2/e(N)) // = 1 - exp(e(ll_0)-e(ll))^(2/e(N)) +//return the results + *di as txt "Cox & Snell Pseudo R2 = " as res `results' + ereturn scalar `prefix'coxsnell = `results' + di _n as txt "added scalar:" + added_scalar `prefix'coxsnell +end + +* 9. +* -estadd- subroutine: Nagelkerke Pseudo R-Squared +program define estadd_nagelkerke, eclass + version 8.2 + syntax [, Prefix(name) Replace Quietly ] +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'nagelkerke +//compute statistic + tempname results + scalar `results' = (1 - exp((e(ll_0)-e(ll))*2/e(N))) / (1 - exp(e(ll_0)*2/e(N))) + // = (1 - exp(e(ll_0)-e(ll))^(2/e(N))) / (1 - exp(e(ll_0))^(2/e(N))) +//return the results + *di as txt "Nagelkerke Pseudo R2 = " as res `results' + ereturn scalar `prefix'nagelkerke = `results' + di _n as txt "added scalar:" + added_scalar `prefix'nagelkerke +end + +* 10. +* -estadd- subroutine: summary statistics for dependent variable +program define estadd_ysumm, eclass + version 8.2 + syntax [, MEan SUm MIn MAx RAnge sd Var cv SEMean SKewness /// + Kurtosis MEDian p1 p5 p10 p25 p50 p75 p90 p95 p99 iqr q all /// + Prefix(passthru) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//default prefix + if `"`prefix'"'=="" local prefix y + else { + local 0 ", `prefix'" + syntax [, prefix(name) ] + } +//use aweights with -summarize- + local wtype `e(wtype)' + if "`wtype'"=="pweight" local wtype aweight +//subpop? + local subpop "`e(subpop)'" + if "`subpop'"=="" local subpop 1 +//determine list of stats + tempname results + local Stats p99 p95 p90 p75 p50 p25 p10 p5 p1 kurtosis /// + skewness var sd max min sum mean + if "`all'"!="" { + local stats `Stats' + local range range + local cv cv + local semean semean + local iqr iqr + local sumtype detail + } + else { + if "`q'"!="" { + local p25 p25 + local p50 p50 + local p75 p75 + } + if "`median'"!="" local p50 p50 + foreach stat of local Stats { + if "``stat''"!="" { + local stats: list stats | stat + } + } + if "`stats'"=="" & "`range'"=="" & "`cv'"=="" & /// + "`semean'"=="" & "`iqr'"=="" local stats sd max min mean + local sumtype sum mean min max + if "`:list stats - sumtype'"=="" & "`cv'"=="" & /// + "`semean'"=="" & "`iqr'"=="" local sumtype meanonly + else { + local sumtype `sumtype' Var sd + if "`:list stats - sumtype'"=="" & "`iqr'"=="" local sumtype + else local sumtype detail + } + } + local Stats: subinstr local stats "var" "Var" + local nstats: word count `iqr' `semean' `cv' `range' `stats' + if "`replace'"=="" { + foreach stat in `iqr' `semean' `cv' `range' `stats' { + confirm_new_ename `prefix'`=lower("`stat'")' + } + } +//calculate stats + local var: word 1 of `e(depvar)' + mat `results' = J(`nstats',1,.z) + qui su `var' [`wtype'`e(wexp)'] if e(sample) & `subpop', `sumtype' + local i 0 + if "`iqr'"!="" { + mat `results'[`++i',1] = r(p75) - r(p25) + } + if "`semean'"!="" { + mat `results'[`++i',1] = r(sd) / sqrt(r(N)) + } + if "`cv'"!="" { + mat `results'[`++i',1] = r(sd) / r(mean) + } + if "`range'"!="" { + mat `results'[`++i',1] = r(max) - r(min) + } + foreach stat of local Stats { + mat `results'[`++i',1] = r(`stat') + } +//return the results + local i 0 + di as txt _n "added scalars:" + foreach stat in `iqr' `semean' `cv' `range' `stats' { + local sname = lower("`stat'") + ereturn scalar `prefix'`sname' = `results'[`++i',1] + added_scalar `prefix'`sname' + } +end + +* 11. +* -estadd- subroutine: various summary statistics +program define estadd_summ, eclass + version 8.2 + syntax [, MEan SUm MIn MAx RAnge sd Var cv SEMean SKewness /// + Kurtosis MEDian p1 p5 p10 p25 p50 p75 p90 p95 p99 iqr q all /// + Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//use aweights with -summarize- + local wtype `e(wtype)' + if "`wtype'"=="pweight" local wtype aweight +//subpop? + local subpop "`e(subpop)'" + if "`subpop'"=="" local subpop 1 +//determine list of stats + tempname results results2 + local Stats p99 p95 p90 p75 p50 p25 p10 p5 p1 kurtosis /// + skewness var sd max min sum mean + if "`all'"!="" { + local stats `Stats' + local range range + local cv cv + local semean semean + local iqr iqr + local sumtype detail + } + else { + if "`q'"!="" { + local p25 p25 + local p50 p50 + local p75 p75 + } + if "`median'"!="" local p50 p50 + foreach stat of local Stats { + if "``stat''"!="" { + local stats: list stats | stat + } + } + if "`stats'"=="" & "`range'"=="" & "`cv'"=="" & /// + "`semean'"=="" & "`iqr'"=="" local stats sd max min mean + local sumtype sum mean min max + if "`:list stats - sumtype'"=="" & "`cv'"=="" & /// + "`semean'"=="" & "`iqr'"=="" local sumtype meanonly + else { + local sumtype `sumtype' Var sd + if "`:list stats - sumtype'"=="" & "`iqr'"=="" local sumtype + else local sumtype detail + } + } + local Stats: subinstr local stats "var" "Var" + local nstats: word count `iqr' `semean' `cv' `range' `stats' + if "`replace'"=="" { + foreach stat in `iqr' `semean' `cv' `range' `stats' { + confirm_new_ename `prefix'`=lower("`stat'")' + } + } +//copy coefficients matrix and determine varnames + mat `results' = e(b) + local vars: colnames `results' + if `nstats'>1 { + mat `results' = `results' \ J(`nstats'-1,colsof(`results'),.z) + } +//loop over variables: calculate stats + local j 0 + foreach var of local vars { + local ++j + capture confirm_numvar `var' + if _rc mat `results'[1,`j'] = .z + else { + capt su `var' [`wtype'`e(wexp)'] if e(sample) & `subpop', `sumtype' + local i 0 + if "`iqr'"!="" { + mat `results'[`++i',`j'] = cond(_rc,.,r(p75) - r(p25)) + } + if "`semean'"!="" { + mat `results'[`++i',`j'] = cond(_rc,.,r(sd) / sqrt(r(N))) + } + if "`cv'"!="" { + mat `results'[`++i',`j'] = cond(_rc,.,r(sd) / r(mean)) + } + if "`range'"!="" { + mat `results'[`++i',`j'] = cond(_rc,.,r(max) - r(min)) + } + foreach stat of local Stats { + mat `results'[`++i',`j'] = cond(_rc,.,r(`stat')) + } + } + } +//return the results + local i 0 + di as txt _n "added matrices:" + foreach stat in `iqr' `semean' `cv' `range' `stats' { + local sname = lower("`stat'") + mat `results2' = `results'[`++i',1...] + ereturn matrix `prefix'`sname' = `results2' + added_matrix `prefix'`sname' + } +end + +* 12. +* -estadd- subroutine: variance inflation factors +program define estadd_vif, eclass + version 8.2 + local caller : di _caller() + syntax [, TOLerance SQRvif Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//check e()-names + if "`replace'"=="" { + confirm_new_ename `prefix'vif + if "`tolerance'"!="" confirm_new_ename `prefix'tolerance + if "`sqrvif'"!="" confirm_new_ename `prefix'sqrvif + } +//copy coefficients matrix and set to .z + tempname results results2 results3 + matrix `results' = e(b) + forv j = 1/`=colsof(`results')' { + mat `results'[1,`j'] = .z + } + if "`tolerance'"!="" mat `results2' = `results' + if "`sqrvif'"!="" mat `results3' = `results' +//compute VIF and add to results vector + capt n `quietly' version `caller': vif + if _rc { + if _rc == 301 di as err "-estadd:vif- can only be used after -regress-" + exit _rc + } + local i 0 + local name "`r(name_`++i')'" + while "`name'"!="" { + local j = colnumb(`results',"`name'") + if `j'<. { + matrix `results'[1,`j'] = r(vif_`i') + if "`tolerance'"!="" matrix `results2'[1,`j'] = 1 / r(vif_`i') + if "`sqrvif'"!="" matrix `results3'[1,`j'] = sqrt( r(vif_`i') ) + } + local name "`r(name_`++i')'" + } +//return the results + if "`sqrvif'"!="" | "`tolerance'"!="" di as txt _n "added matrices:" + else di as txt _n "added matrix:" + if "`sqrvif'"!="" { + ereturn matrix `prefix'sqrvif = `results3' + added_matrix `prefix'sqrvif + } + if "`tolerance'"!="" { + ereturn matrix `prefix'tolerance = `results2' + added_matrix `prefix'tolerance + } + ereturn matrix `prefix'vif = `results' + added_matrix `prefix'vif +end + +* 13. +* -estadd- subroutine: standardized factor change coefficients +program define estadd_ebsd, eclass + version 8.2 + syntax [, Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'ebsd +//use aweights with -summarize- + local wtype `e(wtype)' + if "`wtype'"=="pweight" local wtype aweight +//subpop? + local subpop "`e(subpop)'" + if "`subpop'"=="" local subpop 1 +//copy coefficients matrix and determine varnames + tempname results + mat `results' = e(b) + local vars: colnames `results' +//loop over variables: calculate -mean- + local j 0 + foreach var of local vars { + local ++j + capture confirm_numvar `var' + if _rc mat `results'[1,`j'] = .z + else { + capt su `var' [`wtype'`e(wexp)'] if e(sample) & `subpop' + mat `results'[1,`j'] = cond(_rc,.,exp( `results'[1,`j'] * r(sd))) + } + } +//return the results + ereturn matrix `prefix'ebsd = `results' + di _n as txt "added matrix:" + added_matrix `prefix'ebsd +end + +* 14. +* -estadd- subroutine: exponentiated coefficients +program define estadd_expb, eclass + version 8.2 + syntax [, noCONStant Prefix(name) Replace Quietly ] +//check e()-names + if "`replace'"=="" confirm_new_ename `prefix'expb +//copy coefficients matrix and determine names of coefficients + tempname results + mat `results' = e(b) + local coefs: colnames `results' +//loop over coefficients + local j 0 + foreach coef of local coefs { + local ++j + if `"`constant'"'!="" & `"`coef'"'=="_cons" { + mat `results'[1,`j'] = .z + } + else { + mat `results'[1,`j'] = exp(`results'[1,`j']) + } + } +//return the results + ereturn matrix `prefix'expb = `results' + di _n as txt "added matrix:" + added_matrix `prefix'expb +end + +* 15. +* -estadd- subroutine: partial and semi-partial correlations +program define estadd_pcorr, eclass + version 8.2 + syntax [, semi Prefix(name) Replace Quietly ] +//check availability of e(sample) + confirm_esample +//check e()-names + if "`replace'"=="" { + if "`semi'"!="" confirm_new_ename `prefix'spcorr + confirm_new_ename `prefix'pcorr + } +//copy coefficients matrix and set to .z + tempname results results2 + matrix `results' = e(b) + forv j = 1/`=colsof(`results')' { + mat `results'[1,`j'] = .z + } + local eqs: coleq `results', quoted + local eq: word 1 of `eqs' + mat `results2' = `results'[1,"`eq':"] + local vars: colnames `results2' + foreach var of local vars { + capt confirm numeric var `var' + if !_rc local temp "`temp'`var' " + } + local vars "`temp'" + if "`semi'"!="" mat `results2' = `results' + else { + mat drop `results2' + local results2 + } + local depv: word 1 of `e(depvar)' +//compute statistics and add to results vector + local wtype `e(wtype)' + if inlist("`wtype'","pweight","iweight") local wtype aweight + _estadd_pcorr_compute `depv' `vars' [`wtype'`e(wexp)'] if e(sample), /// + eq(`eq') results(`results') results2(`results2') +//return the results + if "`semi'"!="" { + di as txt _n "added matrices:" + ereturn matrix `prefix'spcorr = `results2' + added_matrix `prefix'spcorr + } + else di as txt _n "added matrix:" + ereturn matrix `prefix'pcorr = `results' + added_matrix `prefix'pcorr +end +program define _estadd_pcorr_compute // based on pcorr.ado by StataCorp + // and pcorr2.ado by Richard Williams + syntax varlist(min=1) [aw fw] [if], eq(str) results(str) [ results2(str) ] + marksample touse + tempname hcurrent + _est hold `hcurrent', restore + quietly reg `varlist' [`weight'`exp'] if `touse' + if (e(N)==0 | e(N)>=.) error 2000 + local NmK = e(df_r) + local R2 = e(r2) + gettoken depv varlist: varlist + foreach var of local varlist { + quietly test `var' + if r(F)<. { + local s "1" + if _b[`var']<0 local s "-1" + local c = colnumb(`results',"`eq':`var'") + mat `results'[1,`c'] = `s' * sqrt(r(F)/(r(F)+`NmK')) + if "`results2'"!="" { + mat `results2'[1,`c'] = `s' * sqrt(r(F)*((1-`R2')/`NmK')) + } + } + } +end + +* 16. +* -estadd- subroutine: Likelihood-ratio test +program define estadd_lrtest, eclass + version 8.2 + local caller : di _caller() + syntax anything(id="model") [, Name(name) Prefix(name) Replace Quietly * ] + if "`name'"=="" local name lrtest_ +//check e()-names + if "`replace'"=="" { + confirm_new_ename `prefix'`name'p + confirm_new_ename `prefix'`name'chi2 + confirm_new_ename `prefix'`name'df + } +//compute statistics + `quietly' version `caller': lrtest `anything', `options' +//return the results + ereturn scalar `prefix'`name'p = r(p) + ereturn scalar `prefix'`name'chi2 = r(chi2) + ereturn scalar `prefix'`name'df = r(df) + di _n as txt "added scalars:" + added_scalar `prefix'`name'p + added_scalar `prefix'`name'chi2 + added_scalar `prefix'`name'df +end + +* 17. +* -estadd- subroutine: support for -brant- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_brant, eclass + version 8.2 + local caller : di _caller() + syntax [ , Prefix(name) Replace Quietly * ] + capt findfile brant.ado + if _rc { + di as error "fitstat.ado from the -spost9_ado- package by Long and Freese required" + di as error `"type {stata "net from http://www.indiana.edu/~jslsoc/stata"}"' + error 499 + } +// check names + if "`replace'"=="" { + foreach name in brant_chi2 brant_df brant_p brant { + confirm_new_ename `prefix'`name' + } + } +// compute and return the results + `quietly' version `caller': brant, `options' + di as txt _n "added scalars:" + foreach stat in chi2 df p { + ereturn scalar `prefix'brant_`stat' = r(`stat') + added_scalar `prefix'brant_`stat' + } + tempname mat + matrix `mat' = r(ivtests) + matrix `mat' = `mat'' + ereturn matrix `prefix'brant = `mat' + di as txt _n "added matrix:" + added_matrix `prefix'brant _rown +end + +* 18. +* -estadd- subroutine: support for -fitstat- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_fitstat, eclass + version 8.2 + local caller : di _caller() + syntax [ , Prefix(name) Replace Quietly Bic * ] + capt findfile fitstat.ado + if _rc { + di as error "fitstat.ado from the -spost9_ado- package by Long and Freese required" + di as error `"type {stata "net from http://www.indiana.edu/~jslsoc/stata"}"' + error 499 + } + `quietly' version `caller': fitstat, `bic' `options' + local stats: r(scalars) + local allstats /// + dev dev_df lrx2 lrx2_df lrx2_p r2_adj r2_mf r2_mfadj r2_ml /// + r2_cu r2_mz r2_ef v_ystar v_error r2_ct r2_ctadj aic aic_n /// + bic bic_p statabic stataaic n_rhs n_parm + local stats: list allstats & stats + if "`bic'"!="" { + local bic aic aic_n bic bic_p statabic stataaic + local stats: list bic & stats + } + + +// check names + if "`replace'"=="" { + foreach stat of local stats { + if inlist("`stat'", "bic", "aic") local rname `stat'0 + else local rname `stat' + confirm_new_ename `prefix'`rname' + } + } + +// return the results + di as txt _n "added scalars:" + foreach stat of local stats { + if inlist("`stat'", "bic", "aic") local rname `stat'0 + else local rname `stat' + ereturn scalar `prefix'`rname' = r(`stat') + added_scalar `prefix'`rname' + } +end + +* 19. +* -estadd- subroutine: support for -listcoef- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_listcoef, eclass + version 8.2 + local caller : di _caller() + syntax [anything] [ , Prefix(name) Replace Quietly /// + nosd gt lt ADJacent Matrix EXpand * ] + +// handle some options and look for e(sample) + if `"`matrix'"'!="" { + local matrix matrix + } + if `"`e(cmd)'"'=="slogit" & "`expand'"!="" { + di as err "-expand- option not supported" + exit 198 + } + confirm_esample + +// set some constants + local listcoef_matrices "xs ys std fact facts pct pcts" + if "`sd'"=="" local listcoef_matrices "`listcoef_matrices' sdx" + +// run listcoef + capt findfile listcoef.ado + if _rc { + di as error "-listcoef- from the -spost9_ado- package by Long and Freese required" + di as error `"type {stata "net from http://www.indiana.edu/~jslsoc/stata"}"' + error 499 + } + `quietly' version `caller': listcoef `anything' , matrix `gt' `lt' `adjacent' `options' + +// check existing e()'s + if "`replace'"=="" { + confirm_new_ename `prefix'pvalue + foreach matrix of local listcoef_matrices { + _estadd_listcoef_ChkEName b_`matrix', prefix(`prefix') + } + } + +// grab r()-results and post in e() + di as txt _n "added matrices:" + if inlist(`"`e(cmd)'"',"mlogit","mprobit") { + _estadd_listcoef_AddResToNomModl `listcoef_matrices', prefix(`prefix') `gt' `lt' `adjacent' + } + else { + foreach matrix of local listcoef_matrices { + _estadd_listcoef_AddMatToE `matrix', prefix(`prefix') + } + } +end +program define _estadd_listcoef_ChkEName + syntax name [, prefix(str) ] + capt confirm matrix r(`namelist') + if _rc exit + confirm_new_ename `prefix'`namelist' +end +program define _estadd_listcoef_AddMatToE, eclass + syntax name [, prefix(str) ] + capt confirm matrix r(b_`namelist') + if _rc exit + tempname tmp + matrix `tmp' = r(b_`namelist') + capt confirm matrix r(b2_`namelist') + if _rc==0 { + local eqnames: coleq e(b), quoted + local eqnames: list uniq eqnames + local eqname: word 1 of `eqnames' + mat coleq `tmp' = `"`eqname'"' + tempname tmp2 + matrix `tmp2' = r(b2_`namelist') + local eqname: word 2 of `eqnames' + mat coleq `tmp2' = `"`eqname'"' + mat `tmp' = `tmp' , `tmp2' + mat drop `tmp2' + } + ereturn matrix `prefix'b_`namelist' = `tmp' + added_matrix `prefix'b_`namelist' _rown +end +program define _estadd_listcoef_AddResToNomModl, eclass + syntax anything(name=listcoef_matrices) [, prefix(str) gt lt ADJacent ] + if "`lt'"=="" & "`gt'"=="" { + local lt lt + local gt gt + } + local adjacent = "`adjacent'"!="" + local lt = "`lt'"!="" + local gt = "`gt'"!="" + +// outcomes and labels + tempname outcomes + if `"`e(cmd)'"'=="mlogit" { + if c(stata_version) < 9 local type cat + else local type out + mat `outcomes' = e(`type') + local noutcomes = colsof(`outcomes') + local eqnames `"`e(eqnames)'"' + if (`:list sizeof eqnames'<`noutcomes') { + local ibase = e(ibase`type') + } + else local ibase 0 + forv i = 1/`noutcomes' { + if `i'==`ibase' { + local outcomelab`i' `"`e(baselab)'"' + } + else { + gettoken eq eqnames : eqnames + local outcomelab`i' `"`eq'"' + } + if `"`outcomelab`i''"'=="" { + local outcomelab`i': di `outcomes'[1,`i'] + } + } + } + else if `"`e(cmd)'"'=="mprobit" { + mat `outcomes' = e(outcomes)' + local noutcomes = colsof(`outcomes') + forv i = 1/`noutcomes' { + local outcomelab`i' `"`e(out`i')'"' + } + } + else { + di as err `"`e(cmd)' not supported"' + exit 499 + } + +// collect vectors + tempname stats + mat `stats' = r(b) \ r(b_z) \ r(b_z) \ r(b_p) + forv i = 1/`=colsof(`stats')' { + mat `stats'[2,`i'] = `stats'[1,`i'] / `stats'[3,`i'] + } + mat rown `stats' = "b" "se" "z" "P>|z|" + local enames "b_raw b_se b_z b_p" + foreach matrix of local listcoef_matrices { + capt confirm matrix r(b_`matrix') + if _rc continue + mat `stats' = `stats' \ r(b_`matrix') + local enames `"`enames' b_`matrix'"' + } + +// select/reorder contrasts of interest + local contrast "r(contrast)" + local ncontrast = colsof(`contrast') + tempname stats0 temp + matrix rename `stats' `stats0' + forv i = 1/`noutcomes' { + local out1 = `outcomes'[1, `i'] + local j 0 + forv j = 1/`noutcomes' { + local out2 = `outcomes'[1, `j'] + if `out1'==`out2' continue + if `adjacent' & abs(`i'-`j')>1 continue + if `lt'==0 & `out1'<`out2' continue + if `gt'==0 & `out1'>`out2' continue + forv l = 1/`ncontrast' { + if el(`contrast',1,`l')!=`out1' continue + if el(`contrast',2,`l')!=`out2' continue + mat `temp' = `stats0'[1..., `l'] + mat coleq `temp' = `"`outcomelab`i''-`outcomelab`j''"' + mat `stats' = nullmat(`stats'), `temp' + } + } + } + capt mat drop `stats0' + +// post rows to e() + local i 0 + foreach ename of local enames { + local ++i + mat `temp' = `stats'[`i', 1...] + ereturn matrix `prefix'`ename' = `temp' + added_matrix `prefix'`ename' _rown + } +end + +* 20. +* -estadd- subroutine: support for -mlogtest- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_mlogtest, eclass + version 8.2 + local caller : di _caller() + syntax [anything] [ , Prefix(name) Replace Quietly set(passthru) * ] + `quietly' version `caller': mlogtest `anything' , `set' `options' + local rmat: r(matrices) + + // check names + if `"`replace'"'=="" { + foreach m in combine lrcomb { + if `:list m in rmat'==0 continue + forv r = 1/`=rowsof(r(`m'))' { + local cat1 = el(r(`m'),`r',1) + local cat2 = el(r(`m'),`r',2) + confirm_new_ename `prefix'`m'_`cat1'_`cat2'_chi2 + confirm_new_ename `prefix'`m'_`cat1'_`cat2'_df + confirm_new_ename `prefix'`m'_`cat1'_`cat2'_p + } + } + foreach m in hausman suest smhsiao { + if `:list m in rmat'==0 continue + forv r = 1/`=rowsof(r(`m'))' { + local cat = el(r(`m'),`r',1) + confirm_new_ename `prefix'`m'_`cat'_chi2 + confirm_new_ename `prefix'`m'_`cat'_df + confirm_new_ename `prefix'`m'_`cat'_p + } + } + if `"`set'"'!="" { + foreach m in wald lrtest { + if `:list m in rmat'==0 continue + local i 0 + local r = rownumb(r(`m'),"set_`++i'") + while(`r'<.) { + confirm_new_ename `prefix'`m'_set`i'_chi2 + confirm_new_ename `prefix'`m'_set`i'_df + confirm_new_ename `prefix'`m'_set`i'_p + local r = rownumb(r(`m'),"set_`++i'") + } + } + } + foreach m in wald lrtest { + if `:list m in rmat'==0 continue + local r . + if `"`set'"'!="" local r = rownumb(r(`m'),"set_1")-1 + if `r'<1 continue + confirm_new_ename `prefix'`m' + } + } + + local di_added_scalars `"di _n as txt "added scalars:"' + // combine + foreach m in combine lrcomb { + if `:list m in rmat'==0 continue + `di_added_scalars' + local di_added_scalars + forv r = 1/`=rowsof(r(`m'))' { + local cat1 = el(r(`m'),`r',1) + local cat2 = el(r(`m'),`r',2) + eret scalar `prefix'`m'_`cat1'_`cat2'_chi2 = el(r(`m'),`r',3) + added_scalar `prefix'`m'_`cat1'_`cat2'_chi2 + eret scalar `prefix'`m'_`cat1'_`cat2'_df = el(r(`m'),`r',4) + added_scalar `prefix'`m'_`cat1'_`cat2'_df + eret scalar `prefix'`m'_`cat1'_`cat2'_p = el(r(`m'),`r',5) + added_scalar `prefix'`m'_`cat1'_`cat2'_p + } + } + // iia + foreach m in hausman suest smhsiao { + if `:list m in rmat'==0 continue + `di_added_scalars' + local di_added_scalars + if "`m'"=="smhsiao" local skip 2 + else local skip 0 + forv r = 1/`=rowsof(r(`m'))' { + local cat = el(r(`m'),`r',1) + eret scalar `prefix'`m'_`cat'_chi2 = el(r(`m'),`r',2+`skip') + added_scalar `prefix'`m'_`cat'_chi2 + eret scalar `prefix'`m'_`cat'_df = el(r(`m'),`r',3+`skip') + added_scalar `prefix'`m'_`cat'_df + eret scalar `prefix'`m'_`cat'_p = el(r(`m'),`r',4+`skip') + added_scalar `prefix'`m'_`cat'_p + } + } + + // wald/lrtest + tempname tmp + if `"`set'"'!="" { + foreach m in wald lrtest { + if `:list m in rmat'==0 continue + local i 0 + local r = rownumb(r(`m'),"set_`++i'") + if `r'>=. continue + `di_added_scalars' + local di_added_scalars + while(`r'<.) { + eret scalar `prefix'`m'_set`i'_chi2 = el(r(`m'),`r',1) + added_scalar `prefix'`m'_set`i'_chi2 + eret scalar `prefix'`m'_set`i'_df = el(r(`m'),`r',2) + added_scalar `prefix'`m'_set`i'_df + eret scalar `prefix'`m'_set`i'_p = el(r(`m'),`r',3) + added_scalar `prefix'`m'_set`i'_p + local r = rownumb(r(`m'),"set_`++i'") + } + } + } + local di_added_matrices `"di _n as txt "added matrices:"' + foreach m in wald lrtest { + if `:list m in rmat'==0 continue + local r . + if `"`set'"'!="" local r = rownumb(r(`m'),"set_1")-1 + if `r'<1 continue + `di_added_matrices' + local di_added_matrices + mat `tmp' = r(`m') + mat `tmp' = `tmp'[1..`r',1...]' + eret mat `prefix'`m' = `tmp' + added_matrix `prefix'`m' _rown + } + +end + + +* 21. +* -estadd- subroutine: support for -prchange- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_prchange + version 8.2 + local caller : di _caller() + syntax [anything] [if] [in] [ , Prefix(name) Replace Quietly /// + PAttern(str) Binary(str) Continuous(str) NOAvg Avg split SPLIT2(name) /// + adapt /// old syntax; now works as synonym for noavg + Outcome(passthru) Fromto noBAse * ] + +// handle some options + if `"`split2'"'!="" local split split + if "`split'"!="" & `"`outcome'"'!="" { + di as err "split and outcome() not both allowed" + exit 198 + } + if "`split'"!="" & `"`avg'`noavg'"'!="" { + di as err "split and avg not both allowed" + exit 198 + } + if "`avg'"!="" & `"`outcome'"'!="" { + di as err "avg and outcome not both allowed" + exit 198 + } + if "`avg'"!="" & "`noavg'"!="" { + di as err "avg and noavg not both allowed" + exit 198 + } + if `"`adapt'"'!="" local noavg noavg + if `:list sizeof binary'>1 | `:list sizeof continuous'>1 error 198 + estadd_prchange_ExpandType binary `"`binary'"' + estadd_prchange_ExpandType continuous `"`continuous'"' + if `"`binary'"'=="" local binary 2 + if `"`continuous'"'=="" local continuous 4 + if `"`pattern'"'!="" { + estadd_prchange_ExpandType pattern `"`pattern'"' + } + +// check e(sample) + confirm_esample + +// run prchange + capt findfile prchange.ado + if _rc { + di as error "-prchange- from the -spost9_ado- package by Long and Freese required" + di as error `"type {stata "net from http://www.indiana.edu/~jslsoc/stata"}"' + error 499 + } + `quietly' version `caller': prchange `anything' `if' `in', `base' `outcome' `fromto' `options' + +// determine type of model (ordinal: nomord = 1; nominal: nomord = 2) + local nomord = (r(modeltype)=="typical nomord") + if inlist(`"`e(cmd)'"',"mlogit","mprobit") local nomord = 2 + if "`avg'`noavg'"!="" { + if `nomord'==0 { + di as err "avg not allowed with this model" + exit 198 + } + } + if !`nomord' & "`split'"!="" { + di as err "split not allowed with this model" + exit 198 + } + +// determine outcome number (in prchange-returns) + if `"`outcome'"'!="" { + if `nomord' { + forv i = 1/`=colsof(r(catval))' { + if el(r(catval), 1, `i') == r(outcome) { + local outcomenum `i' + continue, break + } + } + if "`outcomenum'"=="" { // should never happen + di as err `"outcome `outcome' not found"' + exit 499 + } + } + else { + local outcomenum = colnumb(r(predval), `"`r(outcome)'"') + } + } + +// check names + if "`replace'"=="" { + if `"`outcome'"'!="" | "`split'"!="" | `nomord'==0 { + confirm_new_ename `prefix'predval + if `"`outcome'"'!="" | "`split'"!="" { + confirm_new_ename `prefix'outcome + } + } + else { + forv i = 1/`=colsof(r(catval))' { + local theoutcome: di el(r(catval),1,`i') + confirm_new_ename `prefix'predval`theoutcome' + } + } + confirm_new_ename `prefix'delta + confirm_new_ename `prefix'centered + confirm_new_ename `prefix'dc + if "`fromto'"!="" { + confirm_new_ename `prefix'dcfrom + confirm_new_ename `prefix'dcto + } + if "`nobase'"=="" { + confirm_new_ename `prefix'X + } + } + +// grab r()-results and post in e() + if "`split'"!="" { + if `"`split2'"'=="" { + local split2 `"`e(_estadd_estimates_name)'"' + if `"`split2'"'=="" { + local split2 `"`e(cmd)'"' + } + local split2 `"`split2'_"' + } + _estadd_prchange_StoreEachOutc `split2' , nomord(`nomord') /// + pattern(`pattern') binary(`binary') continuous(`continuous') /// + `base' `fromto' prefix(`prefix') + } + else { + _estadd_prchange_AddStuffToE, nomord(`nomord') outcome(`outcomenum') /// + pattern(`pattern') binary(`binary') continuous(`continuous') /// + `avg' `noavg' `base' `fromto' prefix(`prefix') + } +end +program estadd_prchange_ExpandType + args name list + foreach l of local list { + local w = length(`"`l'"') + if `"`l'"'==substr("minmax",1,max(2,`w')) local type 1 + else if `"`l'"'==substr("01",1,max(1,`w')) local type 2 + else if `"`l'"'==substr("delta",1,max(1,`w')) local type 3 + else if `"`l'"'==substr("sd",1,max(1,`w')) local type 4 + else if `"`l'"'==substr("margefct",1,max(1,`w')) local type 5 + else { + di as err `"'`l'' not allowed"' + exit 198 + } + local newlist `newlist' `type' + } + c_local `name' `newlist' +end +program define _estadd_prchange_AddStuffToE, eclass +// input add +// ========================= ======================================== +// outcome() nomord opt change changenm change# predval outcome +// no 0 - x last +// yes 0 - x x x +// no 1/2 - x all all +// yes 1/2 - x x x +// no 1/2 avg x all +// no 1/2 noavg all all +// nobase=="" => add X, SD, Min, Max +// all models => add centered, delta + syntax , nomord(str) [ pattern(passthru) binary(passthru) continuous(passthru) /// + outcome(str) NOAVG avg nobase fromto prefix(str) split ] // +// prepare predval and determine value of outcome + if `"`outcome'"'!="" { + tempname predv + mat `predv' = r(predval) + mat `predv' = `predv'[1...,`outcome'] + if `nomord' { + local theoutcome: di el(r(catval),1,`outcome') + } + else { + local theoutcome: colnames `predv' + } + } +// add scalars + di _n as txt "added scalars:" +// - predval and outcome + local cpredval = colsof(r(predval)) + if `"`outcome'"'!="" { + ereturn scalar `prefix'predval = `predv'[1,1] + added_scalar `prefix'predval `"`lab_predval'"' + ereturn scalar `prefix'outcome = `theoutcome' + added_scalar `prefix'outcome + } + else if `nomord' { // add all + forv i=1/`cpredval' { + local theoutcome: di el(r(catval),1,`i') + ereturn scalar `prefix'predval`theoutcome' = el(r(predval),1,`i') + added_scalar `prefix'predval`theoutcome' + } + } + else { // add last + ereturn scalar `prefix'predval = el(r(predval),1,`cpredval') + added_scalar `prefix'predval + } +// - delta and centered + ereturn scalar `prefix'delta = r(delta) + added_scalar `prefix'delta + ereturn scalar `prefix'centered = r(centered) + added_scalar `prefix'centered +// add matrices + di _n as txt "added matrices:" + if `nomord'==0 { + if r(modeltype)=="twoeq count" & "`test'"=="" { + local eq: coleq e(b) + local eq: word 1 of `eq' + } + _estadd_prchange_PostMat r(change), prefix(`prefix') /// + name(dc) `pattern' `binary' `continuous' `fromto' eq(`eq') + } + else { + if `"`outcome'"'=="" { + if "`avg'"!="" local nomordmat "r(changemn)" + else { + tempname nomordmat + _estadd_prchange_GatherNomChMat `nomordmat' `noavg' + } + _estadd_prchange_PostMat `nomordmat', prefix(`prefix') /// + name(dc) `pattern' `binary' `continuous' `fromto' + } + else { + if `nomord'==2 { + _estadd_prchange_GetEqnmNomModl `theoutcome' + } + if `"`split'"'!="" { + _estadd_prchange_PostMat r(change`theoutcome'), prefix(`prefix') /// + name(dc) `pattern' `binary' `continuous' `fromto' eq(`eq') + } + else { + _estadd_prchange_PostMat r(change), prefix(`prefix') /// + name(dc) `pattern' `binary' `continuous' `fromto' eq(`eq') + } + } + } + if `"`base'"'=="" { + _estadd_prchange_PostMat r(baseval), prefix(`prefix') name(X) + } + if `"`pattern'"'=="" { + _estadd_prchange_dcNote, prefix(`prefix') name(dc) `binary' `continuous' + } +end +program define _estadd_prchange_dcNote + syntax [ , prefix(str) name(str) binary(str) continuous(str) ] + local res `""{res:minmax} change" "{res:01} change" "{res:delta} change" "{res:sd} change" "{res:margefct}""' + local bres: word `binary' of `res' + local cres: word `continuous' of `res' + di _n as txt `"first row in e(dc) contains:"' + di _n `" `bres' for binary variables"' + di `" `cres' for continuous variables"' +end +program define _estadd_prchange_PostMat, eclass + syntax anything, name(str) [ Fromto eq(str) prefix(str) /// + pattern(passthru) binary(passthru) continuous(passthru) ] + capt confirm matrix `anything' + if _rc exit + tempname tmp1 + local nmlist "`name'" + matrix `tmp1' = `anything' + if `"`eq'"'!="" { + mat coleq `tmp1' = `"`eq'"' + } + if `"`pattern'`binary'`continuous'"'!="" { + tempname pattmat + _estadd_prchange_Merge `tmp1', pattmat(`pattmat') `pattern' `binary' `continuous' `fromto' + } + if "`fromto'"!="" { + local nmlist "`nmlist' `name'from `name'to" + tempname tmp tmp2 tmp3 + mat rename `tmp1' `tmp' + local r = rowsof(`tmp') + local i = 1 + while (`i'<=`r') { + if (`r'-`i')>=2 { + mat `tmp2' = nullmat(`tmp2') \ `tmp'[`i++',1...] // from + mat `tmp3' = nullmat(`tmp3') \ `tmp'[`i++',1...] // to + } + mat `tmp1' = nullmat(`tmp1') \ `tmp'[`i++',1...] + } + mat drop `tmp' + } + local i 0 + foreach nm of local nmlist { + local ++i + local rown: rown `tmp`i'' + mat rown `tmp`i'' = `rown' // fix problem with leading blanks in equations + ereturn matrix `prefix'`nm' = `tmp`i'' + added_matrix `prefix'`nm' _rown + } + if `"`pattmat'"'!="" { + ereturn matrix `prefix'pattern = `pattmat' + added_matrix `prefix'pattern + } +end +program define _estadd_prchange_Merge + syntax name(name=tmp1) [, pattmat(str) pattern(str) binary(str) continuous(str) fromto ] + tempname tmp + mat rename `tmp1' `tmp' + local r = cond("`fromto'"!="", 3, 1) + mat `tmp1' = `tmp'[1..`r',1...]*. + mat `pattmat' = `tmp'[1,1...]*. + local rtot = rowsof(`tmp') + mat rown `tmp1' = main + mat rown `pattmat' = :type + local vars: colnames `tmp1' + local eqs: coleq `tmp1', quoted + local j 0 + foreach var of local vars { + local ++j + gettoken eq eqs : eqs + if `"`eq'"'!=`"`lasteq'"' gettoken type rest : pattern + else gettoken type rest : rest + local lasteq `"`eq'"' + if `"`type'"'=="" { + capt assert `var'==0|`var'==1 if e(sample) & `var'<. + if _rc local type `continuous' + else local type `binary' + } + local ii = (`type'-1)*`r'+1 + forv i = 1/`r' { + if `r'>1 & `i'<3 & `ii'>=`rtot' { + mat `tmp1'[`i',`j'] = .z + } + else { + mat `tmp1'[`i',`j'] = `tmp'[`ii++',`j'] + } + } + mat `pattmat'[1,`j'] = `type' + } + mat `tmp1' = `tmp1' \ `tmp' +end +program define _estadd_prchange_GatherNomChMat + args mat noavg + local cmd `"`e(cmd)'"' + tempname tmpmat + if `"`noavg'"'=="" { + mat `tmpmat' = r(changemn) + mat coleq `tmpmat' = `"Avg|Chg|"' + mat `mat' = `tmpmat' + } + if `"`cmd'"'=="mlogit" { + if c(stata_version) < 9 local outcat cat + else local outcat out + local k_cat = e(k_`outcat') + local eqnames `"`e(eqnames)'"' + if `k_cat'>`:list sizeof eqnames' { // no base equation + local ibase = e(ibase`outcat') + local baselab `"`e(baselab)'"' + if `"`baselab'"'=="" { + local baselab `"`e(base`outcat')'"' + } + forv i = 1/`k_cat' { + if `i'==`ibase' { + local eq `"`"`baselab'"'"' + } + else gettoken eq eqnames : eqnames, quotes + local temp `"`temp' `eq'"' + } + local eqnames: list retok temp + } + local i 0 + foreach eq of local eqnames { + local ++i + local theoutcome: di el(e(`outcat'),1,`i') + mat `tmpmat' = r(change`theoutcome') + mat coleq `tmpmat' = `"`eq'"' + mat `mat' = nullmat(`mat'), `tmpmat' + } + } + else if `"`cmd'"'=="mprobit" { + local eqnames `"`e(outeqs)'"' + local i 0 + foreach eq of local eqnames { + local ++i + local theoutcome: di el(e(outcomes),`i',1) + mat `tmpmat' = r(change`theoutcome') + mat coleq `tmpmat' = `"`eq'"' + mat `mat' = nullmat(`mat'), `tmpmat' + } + } + else { // ordered models + local eqnames : colnames r(catval) + local i 0 + foreach eq of local eqnames { + local ++i + local theoutcome: di el(r(catval),1,`i') + mat `tmpmat' = r(change`theoutcome') + mat coleq `tmpmat' = `"`eq'"' + mat `mat' = nullmat(`mat'), `tmpmat' + } + } +end +program define _estadd_prchange_GetEqnmNomModl + args theoutcome + local cmd `"`e(cmd)'"' + if `"`cmd'"'=="mlogit" { + if c(stata_version) < 9 local outcat cat + else local outcat out + local k_cat = e(k_`outcat') + local eqnames `"`e(eqnames)'"' + local nobase = (`k_cat'>`:list sizeof eqnames') + if `nobase' { + local ibase = e(ibase`outcat') + local baselab `"`e(baselab)'"' + } + forv i = 1/`k_cat' { + if `nobase' { + if `i'==`ibase' { + local eq `"`baselab'"' + } + else gettoken eq eqnames : eqnames + } + else gettoken eq eqnames : eqnames + if el(e(`outcat'),1,`i')==`theoutcome' { + local value `"`eq'"' + continue, break + } + } + } + else if `"`cmd'"'=="mprobit" { + local eqnames `"`e(outeqs)'"' + local i 0 + foreach eq of local eqnames { + if el(e(outcomes),`++i',1)==`theoutcome' { + local value `"`eq'"' + continue, break + } + } + } + if `"`value'"'=="" local value `theoutcome' + c_local eq `"`value'"' +end +program define _estadd_prchange_StoreEachOutc // only for nomord models + syntax anything [, nomord(str) nobase fromto prefix(passthru) /// + pattern(passthru) binary(passthru) continuous(passthru) ] +// backup estimates + tempname hcurrent + _est hold `hcurrent', copy restore estsystem + if `"`nomord'"'=="2" { // backup b and V + tempname b bi V Vi + mat `b' = e(b) + mat `V' = e(V) + } +// cycle through categories + local k_kat = colsof(r(predval)) + tempname catval catvali + mat `catval' = r(catval) + forv i=1/`k_kat' { + mat `catvali' = `catval'[1...,`i'] + local catlabi: colnames `catvali' + local catnumi: di `catvali'[1,1] + if `"`nomord'"'=="2" { + _estadd_prchange_GetEqnmNomModl `catnumi' + if colnumb(`b', `"`eq':"')<. { + mat `bi' = `b'[1...,`"`eq':"'] + mat `Vi' = `V'[`"`eq':"',`"`eq':"'] + } + else { // base outcome; get first eq and set zero + local tmp : coleq `b', q + gettoken tmp : tmp + mat `bi' = `b'[1...,`"`tmp':"'] * 0 + mat `Vi' = `V'[`"`tmp':"',`"`tmp':"'] * 0 + } + mat coleq `bi' = "" + mat coleq `Vi' = "" + mat roweq `Vi' = "" + erepost b=`bi' V=`Vi' + } + `qui' _estadd_prchange_AddStuffToE, split nomord(1) outcome(`i') /// + `base' `fromto' `pattern' `binary' `continuous' `prefix' + `qui' di "" + local qui qui + _eststo `anything'`catnumi', title(`"`catlabi'"') // store without e(sample) + di as txt "results for outcome " as res `catnumi' /// + as txt " stored as " as res "`anything'`catnumi'" + } +// retore estimates + _est unhold `hcurrent' +end + +* 22. +* -estadd- subroutine: support for -prvalue- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_prvalue, eclass + version 9.2 + local caller : di _caller() + syntax [anything] [if] [in] [ , Prefix(passthru) Replace Quietly /// + LABel(str) Title(passthru) swap Diff * ] + +// post + if `"`anything'"'!="" { + gettoken post post2 : anything + if `"`post'"'!="post" { + di as err `"`post' not allowed"' + exit 198 + } + else if `"`label'"'!="" { + di as err "label() not allowed" + exit 198 + } + _estadd_prvalue_Post `post2' `if' `in', `prefix' `replace' `quietly' /// + `title' `swap' `diff' `options' + exit + } + else if `"`title'"'!="" { + di as err "title() not allowed" + exit 198 + } + else if "`swap'"!="" { + di as err "swap not allowed" + exit 198 + } + +// look for e(sample) + confirm_esample + +// run prvalue + capt findfile prvalue.ado + if _rc { + di as error "-prvalue- from the -spost9_ado- package by Long and Freese required" + di as error `"type {stata "net from http://www.indiana.edu/~jslsoc/stata"}"' + error 499 + } + `quietly' version `caller': prvalue `if' `in', `diff' `options' + +// append? + capture confirm existence `e(_estadd_prvalue)' + local append = (_rc==0) & ("`replace'"=="") + tempname prvalue prvalue_x prvalue_x2 + if `append' { + mat `prvalue' = e(_estadd_prvalue) + mat `prvalue_x' = e(_estadd_prvalue_x) + capt mat `prvalue_x2' = e(_estadd_prvalue_x2) + local ires = rowsof(`prvalue') + 1 + } + else local ires 1 + if `"`label'"'=="" { + local label "pred`ires'" + } + else { + local label = substr(`"`label'"', 1, 30) // 30 characters max + local problemchars `": . `"""'"' + foreach char of local problemchars { + local label: subinstr local label `"`char'"' "_", all + } + } + +// collect results + tempname pred + mat `pred' = r(pred) + if `"`diff'"'!="" { + _estadd_prvalue_GetRidOfD `pred' + } + _estadd_prvalue_ReshapePred `pred', label(`label') + _estadd_prvalue_AddPred `prvalue' `pred' `append' + _estadd_prvalue_AddX `prvalue_x', label(`label') + capture confirm matrix r(x2) + local hasx2 = _rc==0 + if `hasx2' { + _estadd_prvalue_AddX `prvalue_x2', label(`label') two + } + +// post in e() + di as txt _n cond(`append',"updated","added") " matrices:" + ereturn matrix _estadd_prvalue = `prvalue' + added_matrix _estadd_prvalue + ereturn matrix _estadd_prvalue_x = `prvalue_x' + added_matrix _estadd_prvalue_x + if `hasx2' { + ereturn matrix _estadd_prvalue_x2 = `prvalue_x2' + added_matrix _estadd_prvalue_x2 + } +end +program _estadd_prvalue_GetRidOfD + args pred + local coln: colnames `pred' + local firstcol: word 1 of `coln' + local nfirstcol = substr("`firstcol'",2,.) + local coln : subinstr local coln "`firstcol'" "`nfirstcol'" , word + mat coln `pred' = `coln' +end +program _estadd_prvalue_ReshapePred + syntax anything, label(str) + tempname tmp res + local r = rowsof(`anything') + forv i=1/`r' { + mat `tmp' = `anything'[`i',1...] + local nm: rownames `tmp' + mat coleq `tmp' = `"`nm'"' + mat `res' = nullmat(`res'), `tmp' + } + mat rown `res' = `"`label'"' + mat `anything' = `res' +end +program _estadd_prvalue_AddPred + args prvalue pred append + if `append' { + local coln1: colfullnames `prvalue' + local coln2: colfullnames `pred' + if `"`coln1'"'!=`"`coln2'"' { + di as err "incompatible prvalue results" + exit 498 + } + } + mat `prvalue' = nullmat(`prvalue') \ `pred' +end +program _estadd_prvalue_AddX + syntax anything, label(str) [ two ] + if "`two'"!="" local two 2 + tempname tmp + mat `tmp' = r(x`two') + mat rown `tmp' = `"`label'"' + mat `anything' = nullmat(`anything') \ `tmp' +end +program _estadd_prvalue_Post, eclass + syntax [name(name=post2)] [ , Prefix(name) Replace Quietly /// + Title(passthru) swap ] + capture confirm matrix e(_estadd_prvalue) + if _rc { + di as err "prvalue results not found" + exit 498 + } +// backup estimates + tempname hcurrent + _est hold `hcurrent', copy restore estsystem + local cmd = e(cmd) + local depvar = e(depvar) + local N = e(N) + local estname `"`e(_estadd_estimates_name)'"' + +// get results + tempname prvalue prvalue_x prvalue_x2 + mat `prvalue' = e(_estadd_prvalue) + mat `prvalue_x' = e(_estadd_prvalue_x) + capture confirm matrix e(_estadd_prvalue_x2) + local hasx2 = _rc==0 + if `hasx2' { + mat `prvalue_x2' = e(_estadd_prvalue_x2) + } + +// return prvalues + tempname tmp tmp2 b se + if "`swap'"=="" { + local eqs: coleq `prvalue', q + local eqs: list uniq eqs + foreach eq of local eqs { + mat `tmp' = `prvalue'[1...,`"`eq':"'] + mat `tmp2' = `tmp'[1...,1]' + mat coleq `tmp2' = `"`eq'"' + mat roweq `tmp2' = "" + mat `b' = nullmat(`b'), `tmp2' + mat `tmp2' = `tmp'[1...,`"`eq':SE"']' + mat coleq `tmp2' = `"`eq'"' + mat roweq `tmp2' = "" + mat `se' = nullmat(`se'), `tmp2' + } + mat drop `tmp' `tmp2' + } + else { + local r = rowsof(`prvalue') + local c = colsof(`prvalue') + local coln: colnames `prvalue' + local eqs: coleq `prvalue', q + mat coln `prvalue' = `eqs' + mat coleq `prvalue' = `coln' + local coln: list uniq coln + local ncol: list sizeof coln + local icol: list posof "SE" in coln + forv i=1/`r' { + mat `tmp' = `prvalue'[`i',1...] + local labl : rownames `tmp' + forv j=1(`ncol')`c' { + mat `tmp2' = nullmat(`tmp2'), `tmp'[1...,`j'] + } + mat coleq `tmp2' = `"`labl'"' + mat `b' = nullmat(`b'), `tmp2' + mat drop `tmp2' + forv j=`icol'(`ncol')`c' { + mat `tmp2' = nullmat(`tmp2'), `tmp'[1...,`j'] + } + mat coleq `tmp2' = `"`labl'"' + mat `se' = nullmat(`se'), `tmp2' + mat drop `tmp2' + } + mat drop `tmp' + } + ereturn post `b', obs(`N') + ereturn local model "`cmd'" + ereturn local cmd "estadd_prvalue" + ereturn local depvar "`depvar'" + di as txt _n "scalars:" + added_scalar N + di as txt _n "macros:" + added_macro depvar + added_macro cmd + added_macro model + added_macro properties + di as txt _n "matrices:" + added_matrix b "predictions" + ereturn matrix se = `se' + added_matrix se "standard errors" + local istat 0 + foreach stat in LB UB Category Cond { + local elabel: word `++istat' of "lower CI bounds" "upper CI bounds" /// + "outcome values" "conditional predictions" + if "`swap'"=="" { + foreach eq of local eqs { + local colnumb = colnumb(`prvalue',`"`eq':`stat'"') + if `colnumb'>=. continue + mat `tmp2' = `prvalue'[1...,`colnumb']' + mat coleq `tmp2' = `"`eq'"' + mat roweq `tmp2' = "" + mat `tmp' = nullmat(`tmp'), `tmp2' + } + } + else { + local icol: list posof "`stat'" in coln + if `icol'==0 continue + forv i=1/`r' { + mat `tmp2' = `prvalue'[`i',1...] + local labl : rownames `tmp2' + mat coleq `tmp2' = `"`labl'"' + forv j=`icol'(`ncol')`c' { + mat `tmp' = nullmat(`tmp'), `tmp2'[1...,`j'] + } + } + mat drop `tmp2' + } + capt confirm matrix `tmp' + if _rc==0 { + ereturn matrix `prefix'`stat' = `tmp' + added_matrix `prefix'`stat' "`elabel'" + } + } + +// return x-values + matrix `prvalue_x' = `prvalue_x'' + ereturn matrix `prefix'X = `prvalue_x' + added_matrix `prefix'X _rown + if `hasx2' { + matrix `prvalue_x2' = `prvalue_x2'' + ereturn matrix `prefix'X2 = `prvalue_x2' + added_matrix `prefix'X2 _rown + } + +// store + if "`post2'"!="" { + _eststo `estname'`post2', `title' + di as txt _n "results stored as " as res "`estname'`post2'" + } + else if `"`title'"'!="" { + estimates change ., `title' + } + +// retore estimates + if "`post2'"!="" { + _est unhold `hcurrent' + } + else { + _est unhold `hcurrent', not + } +end + +* 23. +* -estadd- subroutine: support for -asprvalue- by Long and Freese +* (see http://www.indiana.edu/~jslsoc/spost.htm) +program define estadd_asprvalue, eclass + version 9.2 + local caller : di _caller() + syntax [anything] [ , Prefix(passthru) Replace Quietly /// + LABel(str) Title(passthru) swap * ] + +// post + if `"`anything'"'!="" { + gettoken post post2 : anything + if `"`post'"'!="post" { + di as err `"`post' not allowed"' + exit 198 + } + else if `"`label'"'!="" { + di as err "label() not allowed" + exit 198 + } + _estadd_asprvalue_Post `post2' , `prefix' `replace' `quietly' /// + `title' `swap' `options' + exit + } + else if `"`title'"'!="" { + di as err "title() not allowed" + exit 198 + } + else if "`swap'"!="" { + di as err "swap not allowed" + exit 198 + } + +// look for e(sample) + confirm_esample + +// run prvalue + capt findfile asprvalue.ado + if _rc { + di as error "-asprvalue- from the -spost9_ado- package by Long and Freese required" + di as error `"type {stata "net from http://www.indiana.edu/~jslsoc/stata"}"' + error 499 + } + `quietly' version `caller': asprvalue , `options' + +// append? + capture confirm existence `e(_estadd_asprval)' + local append = (_rc==0) & ("`replace'"=="") + tempname asprval asprval_asv asprval_csv + if `append' { + mat `asprval' = e(_estadd_asprval) + capt mat `asprval_asv' = e(_estadd_asprval_asv) + capt mat `asprval_csv' = e(_estadd_asprval_csv) + local ires = rowsof(`asprval') + 1 + } + else local ires 1 + if `"`label'"'=="" { + local label "pred`ires'" + } + else { + local label = substr(`"`label'"', 1, 30) // 30 characters max + local problemchars `": . `"""'"' + foreach char of local problemchars { + local label: subinstr local label `"`char'"' "_", all + } + } + +// collect results + tempname res + mat `res' = r(p) + _estadd_asprvalue_Reshape `res', label(`label') + _estadd_asprvalue_Add `asprval' `res' `append' + capture confirm matrix r(asv) + local hasasv = _rc==0 + if `hasasv' { + mat `res' = r(asv) + _estadd_asprvalue_Reshape `res', label(`label') + _estadd_asprvalue_Add `asprval_asv' `res' `append' + } + capture confirm matrix r(csv) + local hascsv = _rc==0 + if `hascsv' { + _estadd_asprvalue_AddCsv `asprval_csv', label(`label') + } + +// post in e() + di as txt _n cond(`append',"updated","added") " matrices:" + ereturn matrix _estadd_asprval = `asprval' + added_matrix _estadd_asprval + if `hasasv' { + ereturn matrix _estadd_asprval_asv = `asprval_asv' + added_matrix _estadd_asprval_asv + } + if `hascsv' { + ereturn matrix _estadd_asprval_csv = `asprval_csv' + added_matrix _estadd_asprval_csv + } +end +program _estadd_asprvalue_Reshape + syntax anything, label(str) + tempname tmp res + local r = rowsof(`anything') + forv i=1/`r' { + mat `tmp' = `anything'[`i',1...] + local nm: rownames `tmp' + mat coleq `tmp' = `"`nm'"' + mat `res' = nullmat(`res'), `tmp' + } + mat rown `res' = `"`label'"' + mat `anything' = `res' +end +program _estadd_asprvalue_Add + args master using append + if `append' { + local coln1: colfullnames `master' + local coln2: colfullnames `using' + if `"`coln1'"'!=`"`coln2'"' { + di as err "incompatible asprvalue results" + exit 498 + } + } + mat `master' = nullmat(`master') \ `using' +end +program _estadd_asprvalue_AddCsv + syntax anything, label(str) + tempname tmp + mat `tmp' = r(csv) + mat rown `tmp' = `"`label'"' + mat `anything' = nullmat(`anything') \ `tmp' +end +program _estadd_asprvalue_Post, eclass + syntax [name(name=post2)] [ , Prefix(name) Replace Quietly /// + Title(passthru) swap ] + capture confirm matrix e(_estadd_asprval) + if _rc { + di as err "asprvalue results not found" + exit 498 + } + +// backup estimates + tempname hcurrent + _est hold `hcurrent', copy restore estsystem + local cmd = e(cmd) + local depvar = e(depvar) + local N = e(N) + local estname `"`e(_estadd_estimates_name)'"' + +// get results + tempname asprval asprval_asv asprval_csv + mat `asprval' = e(_estadd_asprval) + capture confirm matrix e(_estadd_asprval_asv) + local hasasv = _rc==0 + if `hasasv' { + mat `asprval_asv' = e(_estadd_asprval_asv) + } + capture confirm matrix e(_estadd_asprval_csv) + local hascsv = _rc==0 + if `hascsv' { + mat `asprval_csv' = e(_estadd_asprval_csv) + } + +// return predictions + tempname tmp tmp2 b + if "`swap'"=="" { + local eqs: coleq `asprval', q + local eqs: list uniq eqs + foreach eq of local eqs { + mat `tmp' = `asprval'[1...,`"`eq':"'] + mat `tmp2' = `tmp'[1...,1]' + mat coleq `tmp2' = `"`eq'"' + mat roweq `tmp2' = "" + mat `b' = nullmat(`b'), `tmp2' + } + mat drop `tmp' `tmp2' + } + else { + local r = rowsof(`asprval') + local coln: colnames `asprval' + local eqs: coleq `asprval', q + mat coln `asprval' = `eqs' + forv i=1/`r' { + mat `tmp' = `asprval'[`i',1...] + local labl : rownames `tmp' + mat coleq `tmp' = `"`labl'"' + mat `b' = nullmat(`b'), `tmp' + } + mat drop `tmp' + } + ereturn post `b', obs(`N') + ereturn local model "`cmd'" + ereturn local cmd "estadd_asprvalue" + ereturn local depvar "`depvar'" + di as txt _n "scalars:" + added_scalar N + di as txt _n "macros:" + added_macro depvar + added_macro cmd + added_macro model + added_macro properties + di as txt _n "matrices:" + added_matrix b "predictions" + +// return asv-values + if `hasasv' { + if "`swap'"=="" { + local vars: coleq `asprval_asv' + local vars: list uniq vars + local cats: colnames `asprval_asv' + local cats: list uniq cats + foreach var of local vars { + foreach cat of local cats { + mat `tmp2' = `asprval_asv'[1...,`"`var':`cat'"']' + mat coleq `tmp2' = `"`cat'"' + mat roweq `tmp2' = "" + mat `tmp' = nullmat(`tmp'), `tmp2' + } + mat rown `tmp' = `"`var'"' + mat `b' = nullmat(`b') \ `tmp' + mat drop `tmp' + } + } + else { + local r = rowsof(`asprval_asv') + local vars: coleq `asprval_asv' + local vars: list uniq vars + forv i=1/`r' { + foreach var of local vars { + mat `tmp2' = `asprval_asv'[`i',`"`var':"'] + local lbl: rownames `tmp2' + mat coleq `tmp2' = `"`lbl'"' + mat rown `tmp2' = `"`var'"' + mat `tmp' = nullmat(`tmp') \ `tmp2' + } + mat `b' = nullmat(`b') , `tmp' + mat drop `tmp' + } + } + ereturn matrix `prefix'asv = `b' + added_matrix `prefix'asv _rown + } +// return csv-values + if `hascsv' { + matrix `asprval_csv' = `asprval_csv'' + ereturn matrix `prefix'csv = `asprval_csv' + added_matrix `prefix'csv _rown + } + +// store + if "`post2'"!="" { + _eststo `estname'`post2', `title' + di as txt _n "results stored as " as res "`estname'`post2'" + } + else if `"`title'"'!="" { + estimates change ., `title' + } + +// retore estimates + if "`post2'"!="" { + _est unhold `hcurrent' + } + else { + _est unhold `hcurrent', not + } +end + +* 24. estadd_margins +program define estadd_margins, eclass + version 11.0 + local caller : di _caller() + syntax [ anything(everything equalok)] [fw aw iw pw] [, Prefix(name) Replace Quietly * ] + +// set default prefix + if "`prefix'"=="" local prefix "margins_" + +// compute and return the results + if `"`weight'`exp'"'!="" local wgtexp `"[`weight'`exp']"' + `quietly' version `caller': margins `anything' `wgtexp', `options' + +// check names + local rscalars: r(scalars) + local rmacros: r(macros) + local rmatrices: r(matrices) + local rmatrices: subinstr local rmatrices "V" "se", word + if "`replace'"=="" { + foreach nmlist in rscalars rmacros rmatrices { + foreach name of local `nmlist' { + confirm_new_ename `prefix'`name' + } + } + } + +// add results + di as txt _n "added scalars:" + foreach name of local rscalars { + ereturn scalar `prefix'`name' = r(`name') + added_scalar `prefix'`name' + } + di as txt _n "added macros:" + foreach name of local rmacros { + ereturn local `prefix'`name' `"`r(`name')'"' + added_macro `prefix'`name' + } + di as txt _n "added matrices:" + tempname tmpmat + foreach name of local rmatrices { + if "`name'"=="se" { + mat `tmpmat' = vecdiag(r(V)) + forv i = 1/`=colsof(`tmpmat')' { + mat `tmpmat'[1,`i'] = sqrt(`tmpmat'[1,`i']) + } + } + else { + mat `tmpmat' = r(`name') + } + eret matrix `prefix'`name' = `tmpmat' + added_matrix `prefix'`name' + } +end + +* 99. +* copy of erepost.ado, version 1.0.1, Ben Jann, 30jul2007 +* used by estadd_listcoef and estadd_prchange +prog erepost, eclass + version 8.2 + syntax [anything(equalok)] [, cmd(str) noEsample Esample2(varname) REName /// + Obs(passthru) Dof(passthru) PROPerties(passthru) * ] + if "`esample'"!="" & "`esample2'"!="" { + di as err "only one allowed of noesample and esample()" + exit 198 + } +// parse [b = b] [V = V] + if `"`anything'"'!="" { + tokenize `"`anything'"', parse(" =") + if `"`7'"'!="" error 198 + if `"`1'"'=="b" { + if `"`2'"'=="=" & `"`3'"'!="" { + local b `"`3'"' + confirm matrix `b' + } + else error 198 + if `"`4'"'=="V" { + if `"`5'"'=="=" & `"`6'"'!="" { + local v `"`6'"' + confirm matrix `b' + } + else error 198 + } + else if `"`4'"'!="" error 198 + } + else if `"`1'"'=="V" { + if `"`4'"'!="" error 198 + if `"`2'"'=="=" & `"`3'"'!="" { + local v `"`3'"' + confirm matrix `v' + } + else error 198 + } + else error 198 + } +//backup existing e()'s + if "`esample2'"!="" { + local sample "`esample2'" + } + else if "`esample'"=="" { + tempvar sample + gen byte `sample' = e(sample) + } + local emacros: e(macros) + if `"`properties'"'!="" { + local emacros: subinstr local emacros "properties" "", word + } + foreach emacro of local emacros { + local e_`emacro' `"`e(`emacro')'"' + } + local escalars: e(scalars) + if `"`obs'"'!="" { + local escalars: subinstr local escalars "N" "", word + } + if `"`dof'"'!="" { + local escalars: subinstr local escalars "df_r" "", word + } + foreach escalar of local escalars { + tempname e_`escalar' + scalar `e_`escalar'' = e(`escalar') + } + local ematrices: e(matrices) + if "`b'"=="" & `:list posof "b" in ematrices' { + tempname b + mat `b' = e(b) + } + if "`v'"=="" & `:list posof "V" in ematrices' { + tempname v + mat `v' = e(V) + } + local bV "b V" + local ematrices: list ematrices - bV + foreach ematrix of local ematrices { + tempname e_`ematrix' + matrix `e_`ematrix'' = e(`ematrix') + } +// rename + if "`b'"!="" & "`v'"!="" & "`rename'"!="" { + local eqnames: coleq `b', q + local vnames: colnames `b' + mat coleq `v' = `eqnames' + mat coln `v' = `vnames' + mat roweq `v' = `eqnames' + mat rown `v' = `vnames' + } +// post results + if "`esample'"=="" { + eret post `b' `v', esample(`sample') `obs' `dof' `properties' `options' + } + else { + eret post `b' `v', `obs' `dof' `properties' `options' + } + foreach emacro of local emacros { + eret local `emacro' `"`e_`emacro''"' + } + if `"`cmd'"'!="" { + eret local cmd `"`cmd'"' + } + foreach escalar of local escalars { + eret scalar `escalar' = scalar(`e_`escalar'') + } + foreach ematrix of local ematrices { + eret matrix `ematrix' = `e_`ematrix'' + } +end diff --git a/110/replication_package/replication/ado/plus/e/estadd.hlp b/110/replication_package/replication/ado/plus/e/estadd.hlp new file mode 100644 index 0000000000000000000000000000000000000000..2294b55341cc741c24a3cc01280907db93ee4af9 --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estadd.hlp @@ -0,0 +1,939 @@ +{smcl} +{* 01feb2017}{...} +{hi:help estadd}{right:also see: {helpb esttab}, {helpb estout}, {helpb eststo}, {helpb estpost}} +{right: {browse "http://repec.sowi.unibe.ch/stata/estout/"}} +{hline} + +{title:Title} + +{p 4 4 2}{hi:estadd} {hline 2} Add results to (stored) estimates + + +{title:Syntax} + +{p 8 15 2} +{cmd:estadd} {it:{help estadd##subcommands:subcommand}} [{cmd:,} +{it:{help estadd##opts:options}} ] [ {cmd::} {it:namelist} ] + + + where {it:namelist} is {cmd:_all} | {cmd:*} | {it:name} [{it:name} ...] + +{marker subcommands} + {it:subcommands}{col 26}description + {hline 65} + Elementary + {helpb estadd##local:{ul:loc}al} {it:name ...}{col 26}{...} +add a macro + {helpb estadd##scalar:{ul:sca}lar} {it:name} {cmd:=} {it:exp}{col 26}{...} +add a scalar + {helpb estadd##matrix:{ul:mat}rix} {it:name} {cmd:=} {it:mat}{col 26}{...} +add a matrix + {helpb estadd##rreturn:r({it:name})}{col 26}{...} +add contents of {cmd:r(}{it:name}{cmd:)} (matrix or scalar) + + Statistics for each + coefficient + {helpb estadd##beta:beta}{col 26}{...} +standardized coefficients + {helpb estadd##vif:vif}{col 26}{...} +variance inflation factors (after {cmd:regress}) + {helpb estadd##pcorr:pcorr}{col 26}{...} +partial (and semi-partial) correlations + {helpb estadd##expb:expb}{col 26}{...} +exponentiated coefficients + {helpb estadd##ebsd:ebsd}{col 26}{...} +standardized factor change coefficients + {helpb estadd##mean:mean}{col 26}{...} +means of regressors + {helpb estadd##sd:sd}{col 26}{...} +standard deviations of regressors + {helpb estadd##summ:summ}{col 26}{...} +various descriptives of the regressors + + Summary statistics + {helpb estadd##coxsnell:coxsnell}{col 26}{...} +Cox & Snell's pseudo R-squared + {helpb estadd##nagelkerke:nagelkerke}{col 26}{...} +Nagelkerke's pseudo R-squared + {helpb estadd##lrtest:lrtest} {it:model}{col 26}{...} +likelihood-ratio test + {helpb estadd##ysumm:ysumm}{col 26}{...} +descriptives of the dependent variable + + Other + {helpb estadd##margins:margins}{col 26}{...} +add results from {cmd:margins} (Stata 11 or newer) + + {help estadd##spost:SPost9} + {helpb estadd##brant:brant}{col 26}{...} +add results from {cmd:brant} (if installed) + {helpb estadd##fitstat:fitstat}{col 26}{...} +add results from {cmd:fitstat} (if installed) + {helpb estadd##listcoef:listcoef}{col 26}{...} +add results from {cmd:listcoef} (if installed) + {helpb estadd##mlogtest:mlogtest}{col 26}{...} +add results from {cmd:mlogtest} (if installed) + {helpb estadd##prchange:prchange}{col 26}{...} +add results from {cmd:prchange} (if installed) + {helpb estadd##prvalue:prvalue}{col 26}{...} +add results from {cmd:prvalue} (if installed) + {helpb estadd##asprvalue:asprvalue}{col 26}{...} +add results from {cmd:asprvalue} (if installed) + {hline 65} + +{marker opts} + {it:{help estadd##options:options}}{col 26}description + {hline 65} + {cmdab:r:eplace}{col 26}{...} +permit overwriting existing {cmd:e()}'s + {cmdab:p:refix(}{it:string}{cmd:)}{col 26}{...} +specify prefix for names of added results + {cmdab:q:uietly}{col 26}{...} +suppress output from subcommand (if any) + {it:subcmdopts}{col 26}{...} +subcommand specific options + {hline 65} + + +{title:Description} + +{p 4 4 2} +{cmd:estadd} adds additional results to the {cmd:e()}-returns of an +estimation command (see help {help estcom}, help {helpb ereturn}). If no +{it:namelist} is provided, then the results are added to the +currently active estimates (i.e. the model fit last). If these +estimates have been previously stored, the stored copy of the +estimates will also be modified. Alternatively, if {it:namelist} is +provided after the colon, results are added to all indicated sets of +stored estimates (see help {helpb estimates store} or help +{helpb eststo}). You may use the {cmd:*} and {cmd:?} +wildcards in {it:namelist}. Execution is silent if {it:namelist} is +provided. + +{p 4 4 2} +Adding additional results to the {cmd:e()}-returns is useful, for example, +if the estimates be tabulated by commands such as {helpb estout} +or {helpb esttab}. See the {help estadd##examples:Examples} section below for +illustration of the usage of {cmd:estadd}. + +{p 4 4 2}Technical note: Some of the subcommands below make use of the +information contained in {cmd:e(sample)} to determine estimation sample. +These subcommands return error if the estimates do not contain +{cmd:e(sample)}. + + +{title:Subcommands} + +{dlgtab:Elementary} +{marker local} +{p 4 8 2} +{cmd:estadd} {cmdab:loc:al} {it:name ...} + +{p 8 8 2} +adds in macro {cmd:e(}{it:name}{cmd:)} the specified contents (also +see help {helpb ereturn}). + +{marker scalar} +{p 4 8 2} +{cmd:estadd} {cmdab:sca:lar} {it:name} {cmd:=} {it:exp} + +{p 8 8 2} +adds in scalar {cmd:e(}{it:name}{cmd:)} the evaluation of {it:exp} +(also see help {helpb ereturn}). {it:name} must not be {cmd:b} or {cmd:V}. + +{p 4 8 2} +{cmd:estadd} {cmdab:sca:lar} {cmd:r(}{it:name}{cmd:)} + +{p 8 8 2} +adds in scalar {cmd:e(}{it:name}{cmd:)} the value of scalar +{cmd:r(}{it:name}{cmd:)}. {it:name} must not be {cmd:b} or {cmd:V}. + +{p 4 8 2} +{cmd:estadd} {cmdab:sca:lar} {it:name} + +{p 8 8 2} +adds in scalar {cmd:e(}{it:name}{cmd:)} the the value of scalar +{it:name}. {it:name} must not be {cmd:b} or {cmd:V}. + +{marker matrix} +{p 4 8 2} +{cmd:estadd} {cmdab:mat:rix} {it:name} {cmd:=} {it:matrix_expression} + +{p 8 8 2} +adds in matrix {cmd:e(}{it:name}{cmd:)} the evaluation of {it:matrix_expression} +(also see help {helpb matrix define}). {it:name} must not be {cmd:b} or {cmd:V}. + +{p 4 8 2} +{cmd:estadd} {cmdab:mat:rix} {cmd:r(}{it:name}{cmd:)} + +{p 8 8 2} +adds in matrix {cmd:e(}{it:name}{cmd:)} a copy of matrix +{cmd:r(}{it:name}{cmd:)}. {it:name} must not be {cmd:b} or {cmd:V}. + +{p 4 8 2} +{cmd:estadd} {cmdab:mat:rix} {it:name} + +{p 8 8 2} +adds in matrix {cmd:e(}{it:name}{cmd:)} a copy of matrix {it:name}. {it:name} +must not be {cmd:b} or {cmd:V}. + +{marker rreturn} +{p 4 8 2} +{cmd:estadd} {cmd:r(}{it:name}{cmd:)} + +{p 8 8 2} +adds in {cmd:e(}{it:name}{cmd:)} the value of scalar {cmd:r(}{it:name}{cmd:)} +or a copy of matrix {cmd:r(}{it:name}{cmd:)}, depending on the nature of +{cmd:r(}{it:name}{cmd:)}. {it:name} must not be {cmd:b} or {cmd:V}. + + +{dlgtab:Statistics for each coefficient} +{marker beta} +{p 4 8 2} +{cmd:estadd} {cmd:beta} + +{p 8 8 2} +adds in {cmd:e(beta)} the standardized beta coefficients. + +{marker vif} +{p 4 8 2} +{cmd:estadd} {cmd:vif} [{cmd:,} {cmdab:tol:erance} {cmdab:sqr:vif} ] + +{p 8 8 2} +adds in {cmd:e(vif)} the variance inflation factors (VIFs) for the +regressors (see help {helpb vif}). Note that {cmd:vif} only works +with estimates produced by {helpb regress}. {cmd:tolerance} +additionally adds the tolerances (1/VIF) in {cmd:e(tolerance)}. +{cmd:sqrvif} additionally adds the square roots of the VIFs in +{cmd:e(sqrvif)}. + +{marker pcorr} +{p 4 8 2} +{cmd:estadd} {cmd:pcorr} [{cmd:, semi} ] + +{p 8 8 2} +adds the partial correlations (see help {helpb pcorr}) and, +optionally, the semi-partial correlations between the dependent +variable and the individual regressors (see, e.g., the {cmd:pcorr2} +package from the SSC Archive). In the case of multiple-equations +models, the results are computed for the first equation only. The +partial correlations will be returned in {cmd:e(pcorr)} and, if +{cmd:semi} is specified, the semi-partial correlations will be +returned in {cmd:e(spcorr)}. + +{marker expb} +{p 4 8 2} +{cmd:estadd} {cmd:expb} [{cmd:,} {cmdab:nocons:tant} ] + +{p 8 8 2} +adds in {cmd:e(expb)} the exponentiated coefficients (see the help +{it:{help eform_option}}). {cmd:noconstant} excludes the constant +from the added results. + +{marker ebsd} +{p 4 8 2} +{cmd:estadd} {cmd:ebsd} + +{p 8 8 2} +adds in {cmd:e(ebsd)} the standardized factor change coefficients, +i.e. exp(b_jS_j), where b_j is the raw coefficient and S_j is the +standard deviation of regressor j, that are sometimes reported for +logistic regression (see Long 1997). + +{marker mean} +{p 4 8 2} +{cmd:estadd} {cmd:mean} + +{p 8 8 2} +adds in {cmd:e(mean)} the means of the regressors. + +{marker sd} +{p 4 8 2} +{cmd:estadd} {cmd:sd} [{cmd:,} {cmdab:nob:inary} ] + +{p 8 8 2} +adds in {cmd:e(sd)} the standard deviations of the regressors. +{cmd:nobinary} suppresses the computation of the standard deviation +for 0/1 variables. + +{marker summ} +{p 4 8 2} +{cmd:estadd} {cmd:summ} [{cmd:,} {it:stats} ] + +{p 8 8 2} +adds vectors of the regressors' descriptive statistics to the +estimates. The following {it:stats} are available: +{p_end} +{marker stats} + {it:stats}{col 26}description + {hline 59} + {cmdab:me:an}{col 26}mean + {cmdab:su:m}{col 26}sum + {cmdab:mi:n}{col 26}minimum + {cmdab:ma:x}{col 26}maximum + {cmdab:ra:nge}{col 26}range = max - min + {cmd:sd}{col 26}standard deviation + {cmdab:v:ar}{col 26}variance + {cmd:cv}{col 26}coefficient of variation (sd/mean) + {cmdab:sem:ean}{col 26}standard error of mean = sd/sqrt(n) + {cmdab:sk:ewness}{col 26}skewness + {cmdab:k:urtosis}{col 26}kurtosis + {cmd:p1}{col 26}1st percentile + {cmd:p5}{col 26}5th percentile + {cmd:p10}{col 26}10th percentile + {cmd:p25}{col 26}25th percentile + {cmd:p50}{col 26}50th percentile + {cmd:p75}{col 26}75th percentile + {cmd:p90}{col 26}90th percentile + {cmd:p95}{col 26}95th percentile + {cmd:p99}{col 26}99th percentile + {cmd:iqr}{col 26}interquartile range = p75 - p25 + {cmd:all}{col 26}all of the above + {cmdab:med:ian}{col 26}equivalent to specifying "{cmd:p50}" + {cmd:q}{col 26}equivalent to specifying "{cmd:p25 p50 p75}" + {hline 59} + +{p 8 8 2} +The default is {cmd:mean sd min max}. Alternatively, indicate the +desired statistics. For example, to add information on the +regressors' skewness and kurtosis, type + + {inp:. estadd summ, skewness kurtosis} + +{p 8 8 2} +The statistics names are used as the names for the returned {cmd:e()} +matrices. For example, {cmd:estadd summ, mean} will store the means +of the regressors in {cmd:e(mean)}. + + +{dlgtab:Summary statistics} +{marker coxsnell} +{p 4 8 2} +{cmd:estadd} {cmd:coxsnell} + +{p 8 8 2} +adds in {cmd:e(coxsnell)} the Cox & Snell pseudo R-squared, which is +defined as + +{p 12 12 2} +r2_coxsnell = 1 - ( L0 / L1 )^(2/N) + +{p 8 8 2} +where L0 is the likelihood of the model without regressors, L1 the +likelihood of the full model, and N is the sample size. + +{marker nagelkerke} +{p 4 8 2} +{cmd:estadd} {cmd:nagelkerke} + +{p 8 8 2} +adds in {cmd:e(nagelkerke)} the Nagelkerke pseudo R-squared (or Cragg +& Uhler pseudo R-squared), which is defined as + +{p 12 12 2} +r2_nagelkerke = r2_coxsnell / (1 - L0^(2/N)) + +{marker lrtest} +{p 4 8 2} +{cmd:estadd} {cmd:lrtest} {it:model} [{cmd:,} {cmdab:n:ame:(}{it:string}{cmd:)} +{it:{help lrtest:lrtest_options}} ] + +{p 8 8 2} +adds the results from a likelihood-ratio test, where {it:model} is +the comparison model (see help {helpb lrtest}). Added are +{cmd:e(lrtest_chi2)}, {cmd:e(lrtest_df)}, and {cmd:e(lrtest_p)}. The +names may be modified using the {cmd:name()} option. Specify +{cmd:name(}{it:myname}{cmd:)} to add {cmd:e(}{it:myname}{cmd:chi2)}, +{cmd:e(}{it:myname}{cmd:df)}, and {cmd:e(}{it:myname}{cmd:p)}. See +help {helpb lrtest} for the {it:lrtest_options}. + +{marker ysumm} +{p 4 8 2} +{cmd:estadd} {cmd:ysumm} [{cmd:,} {it:stats} ] + +{p 8 8 2} +adds descriptive statistics of the dependent variable. See the +{helpb estadd##summ:summ} subcommand above for a list of the available +{it:stats}. The default is {cmd:mean sd min max}. The default prefix +for the names of the added scalars is {cmd:y} (e.g. the mean of the +dependent variable will be returned in {cmd:e(ymean)}). Use +{cmd:estadd}'s {cmd:prefix()} option to change the prefix. If a model +has multiple dependent variables, results for the first variable will +be added. + +{dlgtab:Other} +{marker margins} +{p 4 8 2} +{cmd:estadd} {cmd:margins} [{it:marginlist}] [{it:if}] [{it:in}] [{it:weight}] [, {it:options} ] + +{p 8 8 2} +adds results from the {cmd:margins} command, which was introduced +in Stata 11. See help {helpb margins} for options. All results returned by +{cmd:margins} except {cmd:e(V)} are added using "{cmd:margins_}" as a default +prefix. For example, the margins are added in {cmd:e(margins_b)}. The +standard errors are added in {cmd:e(margins_se)}. Use the {helpb estadd##opts:prefix()} +option to change the default prefix. + +{marker spost} +{dlgtab:SPost9} + +{p 4 4 2} The following subcommands are wrappers for +commands from Long and Freese's {helpb SPost9} package (see +{browse "http://www.indiana.edu/~jslsoc/spost9.htm"}). Type + + . {net "from http://www.indiana.edu/~jslsoc/stata":net from http://www.indiana.edu/~jslsoc/stata} + +{p 4 4 2} +to obtain the {cmd:SPost9} package (spost9_ado). {cmd:SPost} for Stata 8 (spostado) is not +supported. + +{p 4 4 2}For examples on using the subcommands see +{browse "http://repec.sowi.unibe.ch/stata/estout/spost.html"}. + +{marker brant} +{p 4 8 2} +{cmd:estadd brant} [{cmd:,} {it:{help brant:brant_options}} ] + +{p 8 8 2} +applies {helpb brant} from Long and +Freese's {helpb SPost} package and adds the returned results to +{cmd:e()}. You may specify {it:brant_options} as described in +help {helpb brant}. The following results are added: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + Scalars + {cmd:brant_chi2} Chi-squared of overall Brant test + {cmd:brant_df} Degrees of freedom of overall Brant test + {cmd:brant_p} P-value of overall Brant test + + Matrix + {cmd:brant} Test results for individual regressors + (rows: chi2, p LR or Wald X2 + {cmd:r2_adj} Adjusted R2 + {cmd:r2_mf} McFadden's R2 + {cmd:r2_mfadj} McFadden's Adj R2 + {cmd:r2_ml} ML (Cox-Snell) R2 + {cmd:r2_cu} Cragg-Uhler(Nagelkerke) R2 + {cmd:r2_mz} McKelvey & Zavoina's R2 + {cmd:r2_ef} Efron's R2 + {cmd:v_ystar} Variance of y* + {cmd:v_error} Variance of error + {cmd:r2_ct} Count R2 + {cmd:r2_ctadj} Adj Count R2 + {cmd:aic0} AIC + {cmd:aic_n} AIC*n + {cmd:bic0} BIC + {cmd:bic_p} BIC' + {cmd:statabic} BIC used by Stata + {cmd:stataaic} AIC used by Stata + {cmd:n_rhs} Number of rhs variables + {cmd:n_parm} Number of parameters + {hline 60} + +{marker listcoef} +{p 4 8 2} +{cmd:estadd listcoef} [{it:varlist}] [{cmd:,} {cmd:nosd} {it:{help listcoef:listcoef_options}} ] + +{p 8 8 2} +applies {helpb listcoef} from Long and +Freese's {helpb SPost} package and adds the returned results to +{cmd:e()}. You may specify {it:listcoef_options} as described in +help {helpb listcoef}. Furthermore, option {cmd:nosd} suppresses +adding the standard deviations of the variables in {cmd:e(b_sdx)}. + +{p 8 8 2}Depending on the estimation command and options, several of the +following matrices are added: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + {cmd:b_xs} x-standardized coefficients + {cmd:b_ys} y-standardized coefficients + {cmd:b_std} Fully standardized coefficients + {cmd:b_fact} Factor change coefficients + {cmd:b_facts} Standardized factor change coefficients + {cmd:b_pct} Percent change coefficients + {cmd:b_pcts} Standardized percent change coefficients + {cmd:b_sdx} Standard deviation of the Xs + {hline 60} + +{p 8 8 2}For nominal models ({helpb mlogit}, {helpb mprobit}) the +original parametrization of {cmd:e(b)} may not match the contrasts +computed by {cmd:listcoef}. To be able to tabulate standardized +coefficients along with the raw coefficients for the requested +contrasts, the following additional matrices are added for +these models: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + {cmd:b_raw} raw coefficients + {cmd:b_se} standard errors of raw coefficients + {cmd:b_z} z statistics + {cmd:b_p} p-values + {hline 60} + +{marker mlogtest} +{p 4 8 2} +{cmd:estadd mlogtest} [{it:varlist}] [{cmd:,} {it:{help mlogtest:mlogtest_options}} ] + +{p 8 8 2} +applies {helpb mlogtest} from Long and +Freese's {helpb SPost} package and adds the returned results to +{cmd:e()}. You may specify {it:mlogtest_options} as described in +help {helpb mlogtest}. + +{p 8 8 2}Depending on the specified options, a selection of the following +returns are added: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + Scalars + {cmd:hausman_set}{it:#}{cmd:_chi2} Hausman IIA tests using {helpb hausman} + {cmd:hausman_set}{it:#}{cmd:_df} + {cmd:hausman_set}{it:#}{cmd:_p} + + {cmd:suest_set}{it:#}{cmd:_chi2} Hausman IIA tests using {helpb suest} + {cmd:suest_set}{it:#}{cmd:_df} + {cmd:suest_set}{it:#}{cmd:_p} + + {cmd:smhsiao_set}{it:#}{cmd:_chi2} Small-Hsiao IIA tests + {cmd:smhsiao_set}{it:#}{cmd:_df} + {cmd:smhsiao_set}{it:#}{cmd:_p} + + {cmd:combine_}{it:#1}{cmd:_}{it:#2}{cmd:_chi2} Wald tests for combination of outcomes + {cmd:combine_}{it:#1}{cmd:_}{it:#2}{cmd:_df} + {cmd:combine_}{it:#1}{cmd:_}{it:#2}{cmd:_p} + + {cmd:lrcomb_}{it:#1}{cmd:_}{it:#2}{cmd:_chi2} LR tests for combination of outcomes + {cmd:lrcomb_}{it:#1}{cmd:_}{it:#2}{cmd:_df} + {cmd:lrcomb_}{it:#1}{cmd:_}{it:#2}{cmd:_p} + + {cmd:wald_set}{it:#}{cmd:_chi2} Wald tests for sets of independent + {cmd:wald_set}{it:#}{cmd:_df} variables + {cmd:wald_set}{it:#}{cmd:_p} + + {cmd:lrtest_set}{it:#}{cmd:_chi2} LR tests for sets of independent + {cmd:lrtest_set}{it:#}{cmd:_df} variables + {cmd:lrtest_set}{it:#}{cmd:_p} + + Matrices + {cmd:wald} Wald tests for individual variables + (rows: chi2, df, p) + {cmd:lrtest} LR tests for individual variables + (rows: chi2, df, p) + {hline 60} + +{p 4 4 2}To address the rows of {cmd:e(wald)} and {cmd:e(lrtest)} in {helpb estout}'s +{cmd:cells()} option type the row names in brackets, for example, {cmd:wald[p]} or +{cmd:lrtest[chi2]}. + +{marker prchange} +{p 4 8 2} +{cmd:estadd prchange} [{it:varlist}] [{cmd:if} {it:exp}] [{cmd:in} {it:range}] [{cmd:,} + {cmdab:pa:ttern(}{it:typepattern}{cmd:)} {cmdab:b:inary(}{it:type}{cmd:)} {cmdab:c:ontinuous(}{it:type}{cmd:)} + [{cmd:no}]{cmdab:a:vg} {cmd:split}[{cmd:(}{it:prefix}{cmd:)}] {it:{help prchange:prchange_options}} ] + +{p 8 8 2} +applies {helpb prchange} from Long and +Freese's {helpb SPost} package and adds the returned results to +{cmd:e()}. You may specify {it:prchange_options} as described in +help {helpb prchange}. In particular, the {cmd:outcome()} option may be +used with models for count, ordered, or nominal outcomes +to request results for a specific outcome. Further options are: + +{p 8 12 2}{cmd:pattern(}{it:typepattern}{cmd:)}, {cmd:binary(}{it:type}{cmd:)}, and +{cmd:continuous(}{it:type}{cmd:)} to determine which types of discrete change +effects are added as the main results. The default is to add the 0 to 1 +change effect for binary variables and the standard deviation change effect +for continuous variables. Use {cmd:binary(}{it:type}{cmd:)} and +{cmd:continuous(}{it:type}{cmd:)} to change these defaults. Available +types are: + + {it:type} Description + {hline 48} + {cmdab:mi:nmax} minimum to maximum change effect + {cmdab:0:1} 0 to 1 change effect + {cmdab:d:elta} {cmd:delta()} change effect + {cmdab:s:d} standard deviation change effect + {cmdab:m:argefct} marginal effect (some models only) + {hline 48} + +{p 12 12 2}Use {cmd:pattern(}{it:typepattern}{cmd:)} if you want to determine the +type of the added effects individually for each regressor. For example, +{bind:{cmd:pattern(minmax sd delta)}} would add {cmd:minmax} for the first regressor, +{cmd:sd} for the second, and {cmd:delta} for the third, and then proceed +using the defaults for the remaining variables. + +{p 8 12 2}{cmd:avg} to request that only the average results over +all outcomes are added if applied to ordered +or nominal models ({helpb ologit}, {helpb oprobit}, {helpb slogit}, {helpb mlogit}, {helpb mprobit}). The +default is to add the average results as well as the individual results for +the different outcomes (unless {helpb prchange}'s {cmd:outcome()} option is +specified, in which case only results for the indicated outcome are +added). Furthermore, specify {cmd:noavg} to suppress the average results +and only add the outcome-specific results. {cmd:avg} cannot be combined with {cmd:split} +or {cmd:outcome()}. + +{p 8 12 2}{cmd:split}[{cmd:(}{it:prefix}{cmd:)}] to save +each outcome's results in a separate estimation set if applied to ordered +or nominal models ({helpb ologit}, {helpb oprobit}, {helpb slogit}, {helpb mlogit}, +{helpb mprobit}). The estimation sets are named +{it:prefix}{it:#}, where {it:#} is the value of the outcome at hand. If no +{it:prefix} is provided, the name of the estimation set followed by an +underscore is used as the prefix. If the estimation set has no name +(because it has not been stored yet) the name of the estimation command +followed by an underscore is used as the prefix (e.g. {cmd:ologit_}). The +estimation sets stored by the {cmd:split} option are intended for +tabulation only and should not be used with other post-estimation +commands. + +{p 8 8 2}Depending on model and options, several of the following matrices +and scalars are added: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + Scalars + {cmd:centered} {cmd:1} if effects are centered, {cmd:0} else + {cmd:delta} Value of {cmd:delta()} + {cmd:predval}[{it:#}] Prediction(s) at the base values + {cmd:outcome} Outcome value ({cmd:outcome()}/{cmd:split} only) + + Matrices + {cmd:dc} Discrete change effects (rows: main, minmax, + 01, delta, sd [, margefct]) + {cmd:pattern} Types of effects in the main row of {cmd:e(dc)} + {cmd:X} Base values and descriptive statistics + (rows: X, SD, Min, Max) + {hline 60} + +{p 8 8 2}The {cmd:e(dc)} and {cmd:e(X)} matrices have multiple rows. The +{cmd:e(dc)} matrix contains the main results as determined by +{cmd:pattern()}, {cmd:binary()}, and {cmd:continuous()} in the first row. +The second and following rows contain the separate results for each type of +effect using the labels provided by {cmd:prchange} as row names. Type +{cmd:dc[}{it:#}{cmd:]} or {cmd:dc[}{it:rowname}{cmd:]} to address the rows +in {helpb estout}'s {cmd:cells()} option, where {it:#} is the row number +or {it:rowname} is the +row name. For example, type {cmd:dc[-+sd/2]} to address the centered +standard deviation change effects. To tabulate the main results (1st row), +simply type {cmd:dc}. {cmd:e(pattern)} indicates the types of effects +contained in the main row of {cmd:e(dc)} using numeric codes. The codes are 1 +for the minimum to maximum change effect, 2 for the 0 to 1 change effect, 3 +for the {cmd:delta()} change effect, 4 for the standard deviation change +effect, and 5 for the marginal effect. {cmd:e(X)} has four rows +containing the base values, standard deviations, minimums, and maximums. If +the {cmd:fromto} option is specified, two additional matrices, +{cmd:e(dcfrom)} and {cmd:e(dcto)} are added. + +{marker prvalue} +{p 4 8 2} +{cmd:estadd prvalue} [{cmd:if} {it:exp}] [{cmd:in} {it:range}] [{cmd:,} {cmdab:lab:el:(}{it:string}{cmd:)} +{it:{help prvalue:prvalue_options}} ] + +{p 4 8 2} +{cmd:estadd prvalue} {cmd:post} [{it:name}] [{cmd:,} {cmdab:t:itle:(}{it:string}{cmd:)} {cmd:swap} ] + +{p 8 8 2} applies {helpb prvalue} from Long and Freese's {helpb SPost} +package and adds the returned results to {cmd:e()}. The procedure is to +first collect a series of predictions by repeated calls to +{cmd:estadd prvalue} and then apply {cmd:estadd prvalue post} to prepare the results +for tabulation as in the following example: + + {com}. logit lfp k5 k618 age wc hc lwg inc + . estadd prvalue, x(inc 10) label(low inc) + . estadd prvalue, x(inc 20) label(med inc) + . estadd prvalue, x(inc 30) label(high inc) + . estadd prvalue post + . estout{txt} + +{p 8 8 2} You may specify {it:prvalue_options} with {cmd:estadd prvalue} as +described in help {helpb prvalue}. For example, use {cmd:x()} and +{cmd:rest()} to set the values of the independent variables. Use +{cmd:label()} to label the single calls. "pred#" is used as label if +{cmd:label()} is omitted, where # is the number of the call. Labels may +contain spaces but they will be trimmed to a maximum +length of 30 characters and some characters ({cmd::}, +{cmd:.}, {cmd:"}) will be replaced by underscore. The results +from the single calls are collected in matrix {cmd:e(_estadd_prvalue)} +(predictions) and matrix {cmd:e(_estadd_prvalue_x)} (x-values). Specify +{cmd:replace} to drop results from previous calls. + +{p 8 8 2} +{cmd:estadd prvalue post} posts the collected predictions in {cmd:e(b)} +so that they can be tabulated. The following results are saved: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + Scalars + {cmd:N} number of observations + + Macros + {cmd:depvar} name of dependent variable + {cmd:cmd} {cmd:estadd_prvalue} + {cmd:model} model estimation command + {cmd:properties} {cmd:b} + + Matrices + {cmd:b} predictions + {cmd:se} standard errors + {cmd:LB} lower confidence interval bounds + {cmd:UB} upper confidence interval bounds + {cmd:Category} outcome values + {cmd:Cond} conditional predictions (some models only) + {cmd:X} values of predictors (for each prediction) + {cmd:X2} second equation predictors (some models only) + {hline 60} + +{p 8 8 2} {cmd:estadd prvalue post} replaces the current model unless +{it:name} is specified, in which case the results are stored under {it:name} and the model +remains active. However, if the model has a name +(because it has been stored), the name of the model is used as a prefix. +If, for example, the model has been stored as {cmd:model1}, then +{cmd:estadd prvalue post} stores its results under {cmd:model1}{it:name}. +Use {cmd:title()} to specify a title for the stored results. + +{p 8 8 2}The default for {cmd:estadd prvalue post} is to arrange +{cmd:e(b)} in a way so that predictions are grouped by outcome (i.e. outcome labels are used +as equations). Alternatively, specify {cmd:swap} to group predictions by +{cmd:prvalue} calls (i.e. to use the prediction labels as equations). + +{p 8 8 2}{cmd:e(X)} contains one row for each independent variable. To address the rows in +{helpb estout}'s {cmd:cells()} option type {cmd:X[}{it:varname}{cmd:]}, where {it:varname} is +the name of the variable of interest. {cmd:e(X2)}, if provided, is analogous to {cmd:e(X)}. + +{marker asprvalue} +{p 4 8 2} +{cmd:estadd asprvalue} [{cmd:,} {cmdab:lab:el:(}{it:string}{cmd:)} +{it:{help asprvalue:asprvalue_options}} ] + +{p 4 8 2} +{cmd:estadd asprvalue} {cmd:post} [{it:name}] [{cmd:,} {cmdab:t:itle:(}{it:string}{cmd:)} {cmd:swap} ] + +{p 8 8 2} applies {helpb asprvalue} from Long and Freese's {helpb SPost} +package and adds the returned results to {cmd:e()}. The procedure is to +first collect a series of predictions by repeated calls to +{cmd:estadd asprvalue} and then apply {cmd:estadd asprvalue post} to prepare the results +for tabulation as in the following example: + + {com}. clogit choice train bus time invc, group(id) + . estadd asprvalue, cat(train bus) label(at means) + . estadd asprvalue, cat(train bus) rest(asmean) label(at asmeans) + . estadd asprvalue post + . estout{txt} + +{p 8 8 2} You may specify {it:asprvalue_options} with {cmd:estadd asprvalue} as +described in help {helpb asprvalue}. For example, use {cmd:x()} and +{cmd:rest()} to set the values of the independent variables. Use +{cmd:label()} to label the single calls. "pred#" is used as label if +{cmd:label()} is omitted, where # is the number of the call. Labels may +contain spaces but they will be trimmed to a maximum +length of 30 characters and some characters ({cmd::}, +{cmd:.}, {cmd:"}) will be replaced by underscore. The results +from the single calls are collected in matrices {cmd:e(_estadd_asprval)} +(predictions), {cmd:e(_estadd_asprval_asv)} (values of alternative-specific +variables), and {cmd:e(_estadd_asprval_csv)} (values of case-specific +variables). Specify {cmd:replace} to drop results from previous calls. + +{p 8 8 2} +{cmd:estadd asprvalue post} posts the collected predictions in {cmd:e(b)} +so that they can be tabulated. The following results are saved: + + {cmd:e(}{it:...}{cmd:)} Contents + {hline 60} + Scalars + {cmd:N} number of observations + + Macros + {cmd:depvar} name of dependent variable + {cmd:cmd} {cmd:estadd_asprvalue} + {cmd:model} model estimation command + {cmd:properties} {cmd:b} + + Matrices + {cmd:b} predictions + {cmd:asv} alternative-specific variables (if available) + {cmd:csv} case-specific variables (if available) + {hline 60} + +{p 8 8 2} {cmd:estadd asprvalue post} replaces the current model unless +{it:name} is specified, in which case the results are stored under +{it:name} and the model remains active. However, if the model has a name +(because it has been stored), the name of the model is used as a prefix. +If, for example, the model has been stored as {cmd:model1}, then +{cmd:estadd asprvalue post} stores its results under {cmd:model1}{it:name}. +Use {cmd:title()} to specify a title for the stored results. + +{p 8 8 2}The default for {cmd:estadd asprvalue post} is to arrange +{cmd:e(b)} in a way so that predictions are grouped by outcome (i.e. outcome labels are used +as equations). Alternatively, specify {cmd:swap} to group predictions by +{cmd:prvalue} calls (i.e. to use the prediction labels as equations). + +{p 8 8 2}{cmd:e(asv)} and {cmd:e(csv)} contain one row for each variable. +To address the rows in {helpb estout}'s {cmd:cells()} option type +{cmd:asv[}{it:varname}{cmd:]} or {cmd:csv[}{it:varname}{cmd:]}, where +{it:varname} is the name of the variable of interest. + +{marker options} +{title:Options} + +{p 4 8 2} +{cmd:replace} permits {cmd:estadd} to overwrite existing {cmd:e()} +macros, scalars, or matrices. + +{p 4 8 2} +{cmd:prefix(}{it:string}{cmd:)} denotes a prefix for the names of the +added results. The default prefix is an empty string. For example, if +{cmd:prefix(}{it:string}{cmd:)} is specified, the {cmd:beta} +subcommand will return the matrix {cmd:e(}{it:string}{cmd:beta)}. + +{p 4 8 2}{cmd:quietly} suppresses the output from the called subcommand and displays only +the list of added results. Note that many of {cmd:estadd}'s subcommands do not generate +output, in which case {cmd:quietly} has no effect. + +{p 4 8 2} +{it:subcmdopts} are subcommand specific options. See the descriptions +of the subcommands above. + +{marker examples} +{title:Examples} + +{p 4 4 2}Example 1: Add {cmd:r()}-returns from other programs to the +current estimates + + {com}. sysuse auto + {txt}(1978 Automobile Data) + + {com}. quietly regress price mpg weight + {txt} + {com}. test mpg=weight + + {txt} ( 1) {res}mpg - weight = 0 + + {txt} F( 1, 71) ={res} 0.36 + {txt}{col 13}Prob > F ={res} 0.5514 + {txt} + {com}. estadd scalar p_diff = r(p) + + {txt}added scalar: + e(p_diff) = {res}.55138216 + {txt} + {com}. estout, stats(p_diff) + {res} + {txt}{hline 25} + {txt} b + {txt}{hline 25} + {txt}mpg {res} -49.51222{txt} + {txt}weight {res} 1.746559{txt} + {txt}_cons {res} 1946.069{txt} + {txt}{hline 25} + {txt}p_diff {res} .5513822{txt} + {txt}{hline 25} + + +{p 4 4 2}Example 2: Add means and standard deviations of the model's regressors +to the current estimates + + {com}. quietly logit foreign price mpg + {txt} + {com}. estadd summ, mean sd + + {txt}added matrices: + e(sd) : {res}1 x 3 + {txt}e(mean) : {res}1 x 3 + {txt} + {com}. estout, cells("mean sd") drop(_cons) + {res} + {txt}{hline 38} + {txt} mean sd + {txt}{hline 38} + {txt}price {res} 6165.257 2949.496{txt} + {txt}mpg {res} 21.2973 5.785503{txt} + {txt}{hline 38} + + +{p 4 4 2} +Example 3: Add standardized beta coefficients to stored estimates + + {com}. eststo: quietly regress price mpg + {txt}({res}est1{txt} stored) + + {com}. eststo: quietly regress price mpg foreign + {txt}({res}est2{txt} stored) + + {com}. estadd beta: * + {txt} + {com}. estout, cells(beta) drop(_cons) + {res} + {txt}{hline 38} + {txt} est1 est2 + {txt} beta beta + {txt}{hline 38} + {txt}mpg {res} -.4685967 -.5770712{txt} + {txt}foreign {res} .2757378{txt} + {txt}{hline 38} + + +{p 4 4 2}See +{browse "http://repec.sowi.unibe.ch/stata/estout/"} +for additional examples. + + +{title:Writing one's own subcommands} + +{p 4 4 2} +A program providing a new {cmd:estadd} subcommand should be called +{cmd:estadd_}{it:mysubcommand} (see help {helpb program} for advice +on defining programs). {it:mysubcommand} will be available to {cmd:estadd} as a new +{it:subcommand} after the program definition has been executed or +saved to a file called "estadd_{it:mysubcommand}.ado" in either the +current directory or somewhere else in the {cmd:adopath} +(see help {helpb sysdir}). + +{p 4 4 2} +Use the subcommands provided within "estadd.ado" as a starting +point for writing new subcommands. See +{browse "http://repec.sowi.unibe.ch/stata/estout/estadd.html#007"} +for an example. + + +{title:Author} + +{p 4 4 2} Ben Jann, Institute of Sociology, University of Bern, jann@soz.unibe.ch + + +{title:Also see} + + Manual: {hi:[R] estimates} + +{p 4 13 2}Online: help for + {helpb estimates}, + {helpb ereturn}, + {helpb program}, + {helpb esttab}, + {helpb estout}, + {helpb eststo}, + {helpb estpost} +{p_end} diff --git a/110/replication_package/replication/ado/plus/e/estfe.ado b/110/replication_package/replication/ado/plus/e/estfe.ado new file mode 100644 index 0000000000000000000000000000000000000000..b3f876266b71e44b1f9443959e4819590d9961e3 --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estfe.ado @@ -0,0 +1,203 @@ +* ESTFE - Allow easy FE rows with estout +* See example at the end + +capture program drop estfe +program define estfe + syntax [anything(id="stored estimates" name=est_list)], [restore labels(string asis)] + if ("`restore'"!="") Restore `est_list' + else Add `est_list', labels(`labels') +end + +capture program drop Add +program define Add, rclass + syntax [anything(id="stored estimates" name=est_list)], [labels(string asis)] + local dot . + local hasdot : list dot in est_list + local est_list : list est_list - dot + + if ("`est_list'"!="") { + qui estimates dir `est_list' + local models "`r(names)'" + } + + if (`hasdot') { + tempname hold + estimates store `hold', nocopy + local models `hold' `models' + } + + foreach model of local models { + AddOne `model' // injected `absvars' + local fe_list `fe_list' `absvars' + } + local fe_list : list uniq fe_list + + if (`hasdot') { + qui estimates restore `hold' + estimates drop `hold' + } + + local indicate_fe // This will contain our answer + while (`"`labels'"'!="") { + gettoken lhs labels : labels + gettoken rhs labels : labels + if "`rhs'"=="" { + di as error "error: odd number of labels" + error 123 + } + + foreach fe of local fe_list { + local fixed_fe : subinstr local fe "0." "", all + if ("`fixed_fe'"=="`lhs'") { + local indicate_fe `"`indicate_fe' "`rhs'=`fe'" "' + local fe_list : list fe_list - fe + continue, break + } + } + } + + * Parse remaining (w/out label) + foreach fe of local fe_list { + local fixed_fe : subinstr local fe "0." "", all + local indicate_fe `"`indicate_fe' "`fixed_fe'=`fe'""' + } + + return local indicate_fe `"`indicate_fe'"' +end + +capture program drop AddOne +program define AddOne, eclass + * From Ben Jann + * See https://github.com/benjann/estout/issues/6 + * Requires erepost from SSC + args model + + qui estimates restore `model' + + * Backup e(b) e(V) + tempname b V new + matrix `b' = e(b) + matrix `V' = e(V) + ereturn matrix b_backup = `b', copy + ereturn matrix V_backup = `V', copy + + * Augment, reghdfe convention + local K = colsof(`b') + local G = e(N_hdfe_extended) + local absvars "`e(extended_absvars)'" + + * Also allow areg + if (`G'==.) local G = 1 + if ("`absvars'"=="") local absvars "`e(absvar)'" + FixAbsvars `absvars' + + * Allow xtreg_fe, xtivreg_fe, etc + if ("`absvars'"=="" & "`e(model)'"=="fe") local absvars "`e(ivar)'" + + matrix `new' = J(1, `G', 0) + matrix colnames `new' = `absvars' + matrix `b' = `b', `new' + matrix `V' = (`V' , J(`K', `G', 0)) \ (J(`G', `K', 0), J(`G', `G', 0)) + + erepost b=`b' V=`V', rename // Minor problem: removes "hidden" attribute + estimates store `model', nocopy + c_local absvars "`absvars'" +end + +capture program drop FixAbsvars +program define FixAbsvars + while ("`0'"!="") { + gettoken absvar 0 : 0 + local newabsvar + while ("`absvar'"!="") { + gettoken part absvar : absvar, parse("# ") + if (strpos("`part'", "#")==0 & strpos("`part'", "c.")==0) local part 0.`part' + local newabsvar `newabsvar'`part' + } + local newabsvars `newabsvars' `newabsvar' + } + c_local absvars `newabsvars' +end + +capture program drop Restore +program define Restore, eclass + syntax [anything(id="stored estimates" name=est_list)] + local dot . + local hasdot : list dot in est_list + local est_list : list est_list - dot + + if ("`est_list'"!="") { + qui estimates dir `est_list' + local models "`r(names)'" + } + + if (`hasdot') { + tempname hold + estimates store `hold', nocopy + local models `hold' `models' + } + + foreach model of local models { + qui estimates restore `model' + tempname b V + matrix `b' = e(b_backup) + matrix `V' = e(V_backup) + ereturn local b_backup + ereturn local V_backup + erepost b=`b' V=`V', rename + estimates store `model', nocopy + } + + if (`hasdot') { + qui estimates restore `hold' + estimates drop `hold' + } +end + +/* +* Setup + pr drop _all + set trace off + clear all + set more off + sysuse auto + + bys turn: gen t = _n + xtset turn t + +* Run and store regressions + reghdfe price weight, a(turn foreign#trunk##c.gear) keepsing + estimates store model1, nocopy + + reghdfe price weight length, a(foreign turn) keepsing + estimates store model2, nocopy + + areg price length, a(turn) + estimates store model3, nocopy + + regress price weight + estimates store model4, nocopy + + xtreg price gear, fe + estimates store model5, nocopy + + xtreg price gear length, re + *estimates store model6, nocopy + +* Prepare estimates for -estout- + estfe . model*, labels(turn "Turn FE" foreign#trunk "Foreign-Trunk FE" foreign#trunk#c.gear_ratio "Foreign-Trunk Gear Slope") + return list + +* Run estout/esttab + esttab . model* , indicate("Length Controls=length" `r(indicate_fe)') varwidth(30) + +* Return stored estimates to their previous state + estfe . model*, restore + +* Verify + areg + estimates dir _all + estimates restore model2 + reghdfe + di e(b_backup) // gives error if not restored +*/ diff --git a/110/replication_package/replication/ado/plus/e/estout.ado b/110/replication_package/replication/ado/plus/e/estout.ado new file mode 100644 index 0000000000000000000000000000000000000000..d3455f625cbb4b2f183b835f0806850aa6a89fd2 --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estout.ado @@ -0,0 +1,4959 @@ +*! version 3.31 26apr2022 Ben Jann + +program define estout, rclass + version 8.2 + return local cmdline estout `macval(0)' + syntax [anything] [using] [ , /// + Cells(string asis) /// + Drop(string asis) /// + Keep(string asis) /// + Order(string asis) /// + REName(passthru) /// + Indicate(string asis) /// + TRansform(string asis) /// + EQuations(passthru) /// + EFORM2(string) /// + Margin2(string) /// + DIscrete(string asis) /// + MEQs(string) /// + DROPPED2(string) /// + level(numlist max=1 int >=10 <=99) /// + Stats(string asis) /// + STARLevels(string asis) /// + STARKeep(string asis) /// + STARDrop(string asis) /// + VARwidth(numlist max=1 int >=0) /// + MODELwidth(numlist int >=0) /// + EXTRAcols(numlist sort) /// + BEGin(string asis) /// + DELimiter(string asis) /// + INCELLdelimiter(string asis) /// + end(string asis) /// + DMarker(string) /// + MSign(string) /// + SUBstitute(string asis) /// + INTERACTion(string asis) /// + TItle(string) /// + note(string) /// + PREHead(string asis) /// + POSTHead(string asis) /// + PREFoot(string asis) /// + POSTFoot(string asis) /// + HLinechar(string) /// + VARLabels(string asis) /// + REFcat(string asis) /// + MLabels(string asis) /// + NUMbers2(string asis) /// + COLLabels(string asis) /// + EQLabels(string asis) /// + MGRoups(string asis) /// + LABCOL2(string asis) /// + TOPfile(string) /// + BOTtomfile(string) /// + STYle(string) /// + DEFaults(string) /// + * /// + ] + MoreOptions, `options' + if "`style'"!="" local defaults "`style'" + +*Matrix mode + tempname B + MatrixMode, `anything' `rename' // resets the cells argument + // and returns r(coefs) etc. and local 'matrixmode' + if (`matrixmode'==1) { + local models `r(names)' + local nmodels = r(nmodels) + local ccols = r(ccols) + if `ccols'>0 { + mat `B' = r(coefs) + } + } + +*Parse suboptions + local elnum 0 + if `"`cells'"'!="none" { + gettoken row rest: cells, bind match(par) qed(qed) + if `"`par'"'=="(" local qed 1 + local cells + while `"`row'"'!="" { + local newrow + gettoken opt row: row, parse(" ([&") + if `"`macval(row)'"'=="" & `qed'==0 { + local row0 + gettoken trash: rest, parse("[") + if `"`trash'"'=="[" { + gettoken trash rest: rest, parse("[") + gettoken mrow rest: rest, parse("]") q + gettoken trash rest: rest, parse("]") + if `"`trash'"'!="]" { + error 198 + } + } + gettoken trash: rest, match(par) + if `"`par'"'=="(" { + gettoken opt2 rest: rest, match(par) + } + else local opt2 + } + else { + gettoken trash: row, parse("[") + if `"`trash'"'=="[" { + gettoken trash row: row, parse("[") + gettoken mrow row: row, parse("]") q + gettoken trash row: row, parse("]") + if `"`trash'"'!="]" { + error 198 + } + } + gettoken trash row0: row, match(par) + gettoken opt2: row, match(par) + } + while "`opt'"!="" { + if "`opt'"!="&" & "`opt'"!="." { + local `opt'_tname "el`++elnum'" + local ``opt'_tname'_ "`opt'" + local newrow `"`newrow' ``opt'_tname'"' + if `"`par'"'!="(" local opt2 + ParseValueSubopts ``opt'_tname' `opt', mrow(`mrow') `macval(opt2)' + local mrow + } + else { + if `"`par'"'=="(" | `"`mrow'"'!="" error 198 + local newrow `"`newrow' `opt'"' + } + if `"`par'"'!="(" { + gettoken opt row: row, parse(" ([&") + } + else { + gettoken opt row: row0, parse(" ([&") + } + gettoken trash: row, parse("[") + if `"`trash'"'=="[" { + gettoken trash row: row, parse("[") + gettoken mrow row: row, parse("]") q + gettoken trash row: row, parse("]") + if `"`trash'"'!="]" { + error 198 + } + } + gettoken trash row0: row, match(par) + gettoken opt2: row, match(par) + } + local newrow: list retok newrow + if `qed' local cells `"`cells'"`newrow'" "' + else local cells `"`cells'`newrow' "' + gettoken row rest: rest, bind match(par) qed(qed) + if `"`par'"'=="(" local qed 1 + } + local cells: list retok cells + } + if "`eform2'"!="" { + local eform "`eform2'" + local eform2 + } + if `"`transform'"'!="" { + ParseTransformSubopts `transform' + } + if "`margin2'"!="" { + local margin "`margin2'" + local margin2 + } + if `"`dropped'"'!="" local dropped "(dropped)" + if `"`macval(dropped2)'"'!="" { + local dropped `"`macval(dropped2)'"' + local dropped2 + } + if `"`macval(stats)'"'!="" { + ParseStatsSubopts `macval(stats)' + if `"`macval(statslabels)'"'!="" { + if trim(`"`statslabels'"')=="none" { + local statslabelsnone none + local statslabels + } + else { + ParseLabelsSubopts statslabels `macval(statslabels)' + } + } + } + foreach opt in mgroups mlabels eqlabels collabels varlabels { + if `"`macval(`opt')'"'!="" { + if trim(`"``opt''"')=="none" { + local `opt'none none + local `opt' + } + else { + ParseLabelsSubopts `opt' `macval(`opt')' + } + } + } + if `"`macval(numbers2)'"'!="" { + local numbers `"`macval(numbers2)'"' + local numbers2 + } + if `"`macval(indicate)'"'!="" { + ParseIndicateOpts `macval(indicate)' + } + if `"`macval(refcat)'"'!="" { + ParseRefcatOpts `macval(refcat)' + } + if `"`macval(starlevels)'"'!="" { + ParseStarlevels `macval(starlevels)' + } + if `"`macval(labcol2)'"'!="" { + ParseLabCol2 `macval(labcol2)' + } + +*Process No-Options + foreach opt in unstack eform margin dropped discrete stardetach wrap /// + legend label refcatlabel numbers lz abbrev replace append type showtabs /// + smcltags smclrules smclmidrules smcleqrules asis outfilenoteoff /// + omitted baselevels rtfencode { + if "`no`opt''"!="" local `opt' + } + +*Defaults + if "`defaults'"=="esttab" local defaults "tab" + if "`defaults'"=="" & `"`using'"'=="" local defaults "smcl" + if inlist("`defaults'", "", "smcl", "tab", "fixed", "tex", "html","mmd") { + local varwidthfactor = (1 + ("`eqlabelsmerge'"!="" & "`unstack'"=="")*.5) + if inlist("`defaults'", "", "tab") { + if `"`macval(delimiter)'"'=="" local delimiter _tab + if `"`macval(interaction)'"'=="" local interaction `"" # ""' + } + else if "`defaults'"=="smcl" { + if "`varwidth'"=="" local varwidth = cond("`label'"=="", 12, 20) * `varwidthfactor' + if "`modelwidth'"=="" local modelwidth 12 + if "`noabbrev'"=="" local abbrev abbrev + if `"`macval(delimiter)'"'=="" local delimiter `"" ""' + if "`nosmcltags'"=="" local smcltags smcltags + if "`nosmclrules'"=="" local smclrules smclrules + if "`asis'"=="" local noasis noasis + if `"`macval(interaction)'"'=="" local interaction `"" # ""' + } + else if "`defaults'"=="fixed" { + if "`varwidth'"=="" local varwidth = cond("`label'"=="", 12, 20) * `varwidthfactor' + if "`modelwidth'"=="" local modelwidth 12 + if "`noabbrev'"=="" local abbrev abbrev + if `"`macval(delimiter)'"'=="" local delimiter `"" ""' + if `"`macval(interaction)'"'=="" local interaction `"" # ""' + } + else if "`defaults'"=="tex" { + if "`varwidth'"=="" local varwidth = cond("`label'"=="", 12, 20) * `varwidthfactor' + if "`modelwidth'"=="" local modelwidth 12 + if `"`macval(delimiter)'"'=="" local delimiter & + if `"`macval(end)'"'=="" { + local end \\\ + } + if `"`macval(interaction)'"'=="" local interaction `"" $\times$ ""' + } + else if "`defaults'"=="html" { + if "`varwidth'"=="" local varwidth = cond("`label'"=="", 12, 20) * `varwidthfactor' + if "`modelwidth'"=="" local modelwidth 12 + if `"`macval(begin)'"'=="" local begin + if `"`macval(delimiter)'"'=="" local delimiter + if `"`macval(end)'"'=="" local end + if `"`macval(interaction)'"'=="" local interaction `"" # ""' + } + else if "`defaults'"=="mmd" { + if "`varwidth'"=="" local varwidth = cond("`label'"=="", 12, 20) * `varwidthfactor' + if "`modelwidth'"=="" local modelwidth 12 + if `"`macval(begin)'"'=="" local begin "| " + if `"`macval(delimiter)'"'=="" local delimiter " | " + if `"`macval(end)'"'=="" local end " |" + if `"`macval(interaction)'"'=="" local interaction `"" # ""' + } + if "`nostatslabelsfirst'"=="" local statslabelsfirst first + if "`nostatslabelslast'"=="" local statslabelslast last + if "`novarlabelsfirst'"=="" local varlabelsfirst first + if "`novarlabelslast'"=="" local varlabelslast last + if "`noeqlabelsfirst'"=="" local eqlabelsfirst first + if "`noeqlabelslast'"=="" local eqlabelslast last + if "`nolz'"=="" local lz lz + if `"`macval(discrete)'"'=="" & "`nodiscrete'"=="" { + local discrete `"" (d)" for discrete change of dummy variable from 0 to 1"' + } + if `"`macval(indicatelabels)'"'=="" local indicatelabels "Yes No" + if `"`macval(refcatlabel)'"'=="" & "`norefcatlabel'"=="" local refcatlabel "ref." + if `"`macval(incelldelimiter)'"'=="" local incelldelimiter " " + if "`noomitted'"=="" local omitted omitted + if "`nobaselevels'"=="" local baselevels baselevels + } + else { + capture findfile estout_`defaults'.def + if _rc { + di as error `"`defaults' style not available "' /// + `"(file estout_`defaults'.def not found)"' + exit 601 + } + else { + tempname file + file open `file' using `"`r(fn)'"', read text + if c(SE) local max 244 + else local max 80 + while 1 { + ReadLine `max' `file' + if `"`line'"'=="" continue, break + gettoken opt line: line + else if index(`"`opt'"',"_") { + gettoken opt0 opt1: opt, parse("_") + if `"``opt0'_tname'"'!="" { + local opt `"``opt0'_tname'`opt1'"' + } + } + if `"`macval(`opt')'"'=="" & `"`no`opt''"'=="" { + if `"`opt'"'=="cells" { + local newline + gettoken row rest: line, match(par) qed(qed) + if `"`par'"'=="(" local qed 1 + while `"`row'"'!="" { + local newrow + gettoken el row: row, parse(" &") + while `"`el'"'!="" { + if `"`el'"'!="." & `"`el'"'!="&" { + local `el'_tname "el`++elnum'" + local ``el'_tname'_ "`el'" + local newrow "`newrow' ``el'_tname'" + } + else { + local newrow "`newrow' `el'" + } + gettoken el row: row, parse(" &") + } + local newrow: list retok newrow + if `qed' local newline `"`newline'"`newrow'" "' + else local newline `"`newline'`newrow' "' + gettoken row rest: rest, match(par) qed(qed) + if `"`par'"'=="(" local qed 1 + } + local line `"`newline'"' + } + local line: list retok line + local `opt' `"`macval(line)'"' + } + } + file close `file' + } + } + if "`notype'"=="" & `"`using'"'=="" local type type + if "`smcltags'"=="" & "`noasis'"=="" local asis asis + if "`asis'"!="" local asis "_asis" + if "`smclrules'"!="" & "`nosmclmidrules'"=="" local smclmidrules smclmidrules + if "`smclmidrules'"!="" & "`nosmcleqrules'"=="" local smcleqrules smcleqrules + local haslabcol2 = (`"`macval(labcol2)'"'!="") + +*title/notes option + if `"`macval(prehead)'`macval(posthead)'`macval(prefoot)'`macval(postfoot)'"'=="" { + if `"`macval(title)'"'!="" { + local prehead `"`"`macval(title)'"'"' + } + if `"`macval(note)'"'!="" { + local postfoot `"`"`macval(note)'"'"' + } + } + +*Generate/clean-up cell contents + if `"`:list clean cells'"'=="" { + local cells b + local b_tname "b" + local b_ "b" + } + else if `"`:list clean cells'"'=="none" { + local cells + } + CellsCheck `"`cells'"' + if `:list sizeof incelldelimiter'==1 gettoken incelldelimiter: incelldelimiter + +*Special treatment of confidence intervals + if "`level'"=="" local level $S_level + if `level'<10 | `level'>99 { + di as error "level(`level') invalid" + exit 198 + } + if "`ci_tname'"!="" { + if `"`macval(`ci_tname'_label)'"'=="" { + local `ci_tname'_label "ci`level'" + } + if `"`macval(`ci_tname'_par)'"'=="" { + local `ci_tname'_par `""" , """' + } + gettoken 1 2 : `ci_tname'_par + gettoken 2 3 : 2 + gettoken 3 : 3 + local `ci_tname'_l_par `""`macval(1)'" "`macval(2)'""' + local `ci_tname'_u_par `""" "`macval(3)'""' + } + if "`ci_l_tname'"!="" { + if `"`macval(`ci_l_tname'_label)'"'=="" { + local `ci_l_tname'_label "min`level'" + } + } + if "`ci_u_tname'"!="" { + if `"`macval(`ci_u_tname'_label)'"'=="" { + local `ci_u_tname'_label "max`level'" + } + } + +*Formats + local firstv: word 1 of `values' + if "`firstv'"=="" local firstv "b" + if "``firstv'_fmt'"=="" local `firstv'_fmt %9.0g + foreach v of local values { + if "``v'_fmt'"=="" local `v'_fmt "``firstv'_fmt'" + if `"`macval(`v'_label)'"'=="" { + local `v'_label "``v'_'" + } + } + +*Check margin option / prepare discrete option / prepare dropped option + if "`margin'"!="" { + if !inlist("`margin'","margin","u","c","p") { + di as error "margin(`margin') invalid" + exit 198 + } + if `"`macval(discrete)'"'!="" { + gettoken discrete discrete2: discrete + } + } + else local discrete + local droppedison = (`"`macval(dropped)'"'!="") + +*Formats/labels/stars for statistics + if "`statsfmt'"=="" local statsfmt: word 1 of ``firstv'_fmt' + ProcessStatslayout `"`stats'"' `"`statsfmt'"' `"`statsstar'"' /// + `"`statslayout'"' `"`statspchar'"' + local stats: list uniq stats + if "`statsstar'"!="" local p " p" + else local p + +*Significance stars + local tablehasstars 0 + foreach v of local values { + local el "``v'_'" + if "``v'_star'"!="" | inlist("`el'","_star","_sigsign") { + if "``v'_pvalue'"=="" local `v'_pvalue p + local tablehasstars 1 + } + } + +*Check/define starlevels/make levelslegend + if `tablehasstars' | `"`statsstar'"'!="" { + if `"`macval(starlevels)'"'=="" /// + local starlevels "* 0.05 ** 0.01 *** 0.001" + CheckStarvals `"`macval(starlevels)'"' `"`macval(starlevelslabel)'"' /// + `"`macval(starlevelsdelimiter)'"' + } + +*Get coefficients/variances/statistics: _estout_getres +* - prepare transform/eform + if `"`transform'"'=="" { // transform() overwrites eform() + if "`eform'"!="" { + local transform "exp(@) exp(@)" + if "`eform'"!="eform" { + local transformpattern "`eform'" + } + } + } + foreach m of local transformpattern { + if !( "`m'"=="1" | "`m'"=="0" ) { + di as error "invalid pattern in transform(,pattern()) or eform()" + exit 198 + } + } +* - handle pvalue() suboption + if `tablehasstars' { + local temp + foreach v of local values { + local temp: list temp | `v'_pvalue + } + foreach v of local temp { + if `"``v'_tname'"'=="" { + local `v'_tname "el`++elnum'" + local ``v'_tname'_ "`v'" + local values: list values | `v'_tname + } + } + } +* - prepare list of results to get from e()-matrices + if "`ci_tname'"!="" { + local values: subinstr local values "`ci_tname'" "`ci_tname'_l `ci_tname'_u", word + local `ci_tname'_l_ "ci_l" + local ci_l_tname "`ci_tname'_l" + local `ci_tname'_u_ ci_u + local ci_u_tname "`ci_tname'_u" + } + foreach v of local values { + local temp = ("``v'_transpose'"!="") + local values1mrow `"`values1mrow' `"``v'_' `temp' ``v'_mrow'"'"' + } + tempname D St + if `matrixmode'==0 { +* - expand model names + if `"`anything'"'=="" { + capt est_expand $eststo + if !_rc { + local anything `"$eststo"' + } + if `'"`anything'"'!="" { + if `"`: e(scalars)'`: e(macros)'`: e(matrices)'`: e(functions)'"'!="" { + local inlist: list posof `"`e(_estimates_name)'"' in anything + if `inlist'==0 { + di as txt "(tabulating estimates stored by eststo;" /// + `" specify "." to tabulate the active results)"' + } + } + } + } + if `"`anything'"'=="" local anything "." + capt est_expand `"`anything'"' + if _rc { + if _rc==301 { // add e(cmd)="." to current estimates if undefined + if `:list posof "." in anything' & `"`e(cmd)'"'=="" { + if `"`: e(scalars)'`: e(macros)'`: e(matrices)'`: e(functions)'"'!="" { + qui estadd local cmd "." + } + } + } + est_expand `"`anything'"' + } + local models `r(names)' + // could not happen, ... + if "`models'" == "" { + exit + } +* - get results + local temp names(`models') coefs(`values1mrow') stats(`stats'`p') /// + `rename' margin(`margin') meqs(`meqs') dropped(`droppedison') level(`level') /// + transform(`transform') transformpattern(`transformpattern') /// + `omitted' `baselevels' + _estout_getres, `equations' `temp' + local ccols = r(ccols) + if `"`equations'"'=="" & "`unstack'"=="" & `ccols'>0 { // specify equations("") to deactivate + TableIsAMess + if `value' { + _estout_getres, equations(main=1) `temp' + } + } + mat `St' = r(stats) + local nmodels = r(nmodels) + local ccols = r(ccols) + if `ccols'>0 { + mat `B' = r(coefs) + } + } + else { // matrix mode + // define `St' so that code does not break + if `"`stats'"'!="" { + mat `St' = J(`:list sizeof stats',1,.z) + mat coln `St' = `models' + mat rown `St' = `stats' + } + } + return add +* - process order() option + if `"`order'"' != "" { + ExpandEqVarlist `"`order'"' `B' append + local order `"`value'"' + Order `B' `"`order'"' + } +* - process indicate() option + local nindicate 0 + foreach indi of local indicate { + local ++nindicate + ProcessIndicateGrp `nindicate' `B' `nmodels' `ccols' "`unstack'" /// + `"`macval(indicatelabels)'"' `"`macval(indi)'"' + } +* - process keep() option + if `"`keep'"' != "" { + ExpandEqVarlist `"`keep'"' `B' + DropOrKeep 1 `B' `"`value'"' + } +* - process drop() option + if `"`drop'"' != "" { + ExpandEqVarlist `"`drop'"' `B' + DropOrKeep 0 `B' `"`value'"' + } + +* - names and equations of final set + capt confirm matrix `B' + if _rc { + return local coefs "" // erase r(coefs) + return local ccols "" + local R 0 + local varlist "" + local eqlist "" + local eqs "__" + local fullvarlist "" + } + else { + tempname C + matrix `C' = `B' + RestoreEmptyEqnames `C' // replace equation name "__" by "_" + return matrix coefs = `C' // replace r(coefs) + local R = rowsof(`B') + local C = colsof(`B') + local eqlist: roweq `B', q + local eqlist: list clean eqlist + UniqEqsAndDims `"`eqlist'"' + if "`unstack'"!="" { + // unstack requires equations to be tied together + // RerrangeEqs resets B, eqlist, eqs, eqsdims + RerrangeEqs `B' `"`eqlist'"' `"`eqs'"' + } + QuotedRowNames `B' + local varlist `"`value'"' + MakeQuotedFullnames `"`varlist'"' `"`eqlist'"' + local fullvarlist `"`value'"' +* - dropped coefs + local droppedpos = `ccols' + if "`margin'"!="" { + local droppedpos `droppedpos' - 1 + } +* - 0/1-variable indicators (for marginals) + mat `D' = `B'[1...,1], J(`R',1,0) // so that row names are copied from `B' + mat `D' = `D'[1...,2] + if "`margin'"!="" { + forv i = 1/`R' { // last colum for each model contains _dummy info + forv j = `ccols'(`ccols')`C' { + if `B'[`i',`j']==1 { + mat `D'[`i',1] = 1 + } + } + } + } + } + +*Prepare element specific keep/drop + local dash + tempname tmpmat + foreach v in star `values' { + local temp `"`fullvarlist'"' + if "`unstack'"!="" { + local temp2: list uniq eqs + local `v'`dash'eqdrop: list uniq eqs + } + if `"``v'`dash'keep'"'!="" { + capt mat `tmpmat' = `B' + ExpandEqVarlist `"``v'`dash'keep'"' `tmpmat' + DropOrKeep 1 `tmpmat' `"`value'"' + capt confirm matrix `tmpmat' + if _rc local temp + else { + QuotedRowNames `tmpmat' + MakeQuotedFullnames `"`value'"' `"`: roweq `tmpmat', q'"' + local temp: list temp & value + if "`unstack'"!="" { + local value: roweq `tmpmat', q + local value: list uniq value + local temp2: list temp2 & value + } + } + } + if `"``v'`dash'drop'"'!="" { + capt mat `tmpmat' = `B' + ExpandEqVarlist `"``v'`dash'drop'"' `tmpmat' + DropOrKeep 0 `tmpmat' `"`value'"' + capt confirm matrix `tmpmat' + if _rc local temp + else { + QuotedRowNames `tmpmat' + MakeQuotedFullnames `"`value'"' `"`: roweq `tmpmat', q'"' + local temp: list temp & value + if "`unstack'"!="" { + local value: roweq `tmpmat', q + local value: list uniq value + local temp2: list temp2 & value + } + } + } + local `v'`dash'drop: list fullvarlist - temp + if "`unstack'"!="" { + local `v'`dash'eqdrop: list `v'`dash'eqdrop - temp2 + } + local dash "_" + } + capt mat drop `tmpmat' + +*Prepare unstack + if "`unstack'"!="" & `R'>0 { + local varlist: list uniq varlist + GetVarnamesFromOrder `"`order'"' + local temp: list value & varlist + local varlist: list temp | varlist + local cons _cons + if `:list cons in value'==0 { + if `:list cons in varlist' { + local varlist: list varlist - cons + local varlist: list varlist | cons + } + } + local R: word count `varlist' + local eqswide: list uniq eqs + forv i=1/`nindicate' { + ReorderEqsInIndicate `"`nmodels'"' `"`eqswide'"' /// + `"`indicate`i'eqs'"' `"`macval(indicate`i'lbls)'"' + local indicate`i'lbls `"`macval(value)'"' + } + } + else local eqswide "__" + +*Prepare coefs for tabulation + if `R'>0 { + local i 0 + foreach v of local values { + local ++i + tempname _`v' + forv j = 1/`nmodels' { + mat `_`v'' = nullmat(`_`v''), `B'[1..., (`j'-1)*`ccols'+`i'] + } + mat coln `_`v'' = `models' + mat coleq `_`v'' = `models' + if inlist("``v'_'", "t", "z") { + if `"``v'_abs'"'!="" { // absolute t-values + forv r = 1/`R' { + forv j = 1/`nmodels' { + if `_`v''[`r',`j']>=. continue + mat `_`v''[`r',`j'] = abs(`_`v''[`r',`j']) + } + } + } + } + } + } + +*Model labels + if "`nomlabelstitles'"=="" & "`label'"!="" local mlabelstitles titles + local tmp: list sizeof mlabels + local i 0 + foreach model of local models { + local ++i + if `i'<=`tmp' continue + local lab + if "`mlabelsdepvars'"!="" { + local var `"`return(m`i'_depname)'"' + if "`label'"!="" { + local temp = index(`"`var'"',".") + local temp2 = substr(`"`var'"',`temp'+1,.) + capture local lab: var l `temp2' + if _rc | `"`lab'"'=="" { + local lab `"`temp2'"' + } + local temp2 = substr(`"`var'"',1,`temp') + local lab `"`temp2'`macval(lab)'"' + } + else local lab `"`var'"' + } + else if "`mlabelstitles'"!="" { + local lab `"`return(m`i'_estimates_title)'"' + if `"`lab'"'=="" local lab "`model'" + } + else { + local lab "`model'" + } + local mlabels `"`macval(mlabels)' `"`macval(lab)'"'"' + } + if "`mlabelsnumbers'"!="" { + NumberMlabels `nmodels' `"`macval(mlabels)'"' + } + +*Equations labels + local eqconssubok = (`"`macval(eqlabels)'"'!=`""""') + local numeqs: list sizeof eqs + local temp: list sizeof eqlabels + if `temp'<`numeqs' { + forv i = `=`temp'+1'/`numeqs' { + local eq: word `i' of `eqs' + local value + if "`label'"!="" { + capture confirm variable `eq' + if !_rc { + local value: var l `eq' + } + } + if `"`value'"'=="" { + if `"`eq'"'=="__" local value "_" + else local value "`eq'" + } + local eqlabels `"`macval(eqlabels)' `"`value'"'"' + } + } + if `eqconssubok' { + if "`eqlabelsnone'"!="" & `numeqs'>1 & "`unstack'"=="" { + EqReplaceCons `"`varlist'"' `"`eqlist'"' `"`eqlabels'"' `"`macval(varlabels)'"' + if `"`macval(value)'"'!="" { + local varlabels `"`macval(value)' `macval(varlabels)'"' + } + } + } + +*Column labels + if `"`macval(collabels)'"'=="" { + forv j = 1/`ncols' { + local temp + forv i = 1/`nrows' { + local v: word `i' of `cells' + local v: word `j' of `v' + local v: subinstr local v "&" " ", all + local v: subinstr local v "." "", all + local v: list retok v + foreach vi of local v { + if `"`macval(temp)'"'!="" { + local temp `"`macval(temp)'/"' + } + local temp `"`macval(temp)'`macval(`vi'_label)'"' + } + } + local collabels `"`macval(collabels)'`"`macval(temp)'"' "' + } + } + +*Prepare refcat() + if `"`macval(refcat)'"'!="" { + PrepareRefcat `"`macval(refcat)'"' + } + +*Determine table layout + local m 1 + local starcol 0 + foreach model of local models { + local e 0 + foreach eq of local eqswide { + local stc 0 + local ++e + if "`unstack'"!="" & `R'>0 { + ModelEqCheck `B' `"`eq'"' `m' `ccols' + if !`value' continue + } + local eqsrow "`eqsrow'`e' " + local modelsrow "`modelsrow'`m' " + local k 0 + local something 0 + forv j = 1/`ncols' { + local col + local nocol 1 + local colhasstats 0 + forv i = 1/`nrows' { + local row: word `i' of `cells' + local v: word `j' of `row' + local v: subinstr local v "&" " ", all + foreach vi in `v' { + if "`vi'"=="." continue + local colhasstats 1 + if "`unstack'"!="" { + local inlist: list posof `"`eq'"' in `vi'_eqdrop + if `inlist' continue + } + if "`:word `m' of ``vi'_pattern''"=="0" { + local v: subinstr local v "`vi'" ".`vi'", word + } + else { + local nocol 0 + if `"``vi'_star'"'!="" local starcol 1 + } + } + local v: subinstr local v " " "&", all + if "`v'"=="" local v "." + local col "`col'`v' " + } + if `colhasstats'==0 local nocol 0 + if !`nocol' { + local colsrow "`colsrow'`j' " + if `++k'>1 { + local modelsrow "`modelsrow'`m' " + local eqsrow "`eqsrow'`e' " + } + if `"`: word `++stc' of `statscolstar''"'=="1" local starcol 1 + local starsrow "`starsrow'`starcol' " + local starcol 0 + Add2Vblock `"`vblock'"' "`col'" + local something 1 + } + } + if !`something' { + local col + forv i = 1/`nrows' { + local col "`col'. " + } + Add2Vblock `"`vblock'"' "`col'" + local colsrow "`colsrow'1 " + if `"`: word `++stc' of `statscolstar''"'=="1" local starcol 1 + local starsrow "`starsrow'`starcol' " + local starcol 0 + } + } + local ++m + } + CountNofEqs "`modelsrow'" "`eqsrow'" + local neqs `value' + if `"`extracols'"'!="" { + foreach row in model eq col star { + InsertAtCols `"`extracols'"' `"``row'srow'"' + local `row'srow `"`value'"' + } + foreach row of local vblock { + InsertAtCols `"`extracols'"' `"`row'"' + local nvblock `"`nvblock' `"`value'"'"' + } + local vblock: list clean nvblock + } + local ncols = `: word count `starsrow'' + 1 + `haslabcol2' + +*Modelwidth/varwidth/starwidth + if "`modelwidth'"=="" local modelwidth 0 + if "`varwidth'"=="" local varwidth 0 + local nmodelwidth: list sizeof modelwidth + local modelwidthzero: list uniq modelwidth + local modelwidthzero = ("`modelwidth'"=="0") + if "`labcol2width'"=="" local labcol2width `: word 1 of `modelwidth'' + local starwidth 0 + if `modelwidthzero'==0 { + if `tablehasstars' | `"`statsstar'"'!="" { + Starwidth `"`macval(starlevels)'"' + local starwidth `value' + } + } + if `varwidth'<2 local wrap + +* totcharwidth / hline + local totcharwidth `varwidth' + if c(stata_version)>=14 local length udstrlen + else local length length + capture { + local delwidth = `length'(`macval(delimiter)') + } + if _rc { + local delwidth = `length'(`"`macval(delimiter)'"') + } + if `haslabcol2' { + local totcharwidth = `totcharwidth' + `delwidth' + `labcol2width' + } + local j 0 + foreach i of local starsrow { + local modelwidthj: word `=1 + mod(`j++',`nmodelwidth')' of `modelwidth' + local totcharwidth = `totcharwidth' + `delwidth' + `modelwidthj' + if `i' { + if "`stardetach'"!="" { + local ++ncols + local totcharwidth = `totcharwidth' + `delwidth' + } + local totcharwidth = `totcharwidth' + `starwidth' + } + } + IsInString "@hline" `"`0'"' // sets local strcount + if `strcount' { + local hline `totcharwidth' + if `hline'>400 local hline 400 // _dup(400) is limit + if `"`macval(hlinechar)'"'=="" local hlinechar "-" + local hline: di _dup(`hline') `"`macval(hlinechar)'"' + } + else local hline + +* check begin, delimiter, end + tempfile tfile + tempname file + file open `file' using `"`tfile'"', write text + foreach opt in begin delimiter end { + capture file write `file' `macval(`opt')' + if _rc { + local `opt' `"`"`macval(`opt')'"'"' + } + } + file close `file' + +* RTF support: set macros rtfrowdef, rtfrowdefbrdrt, rtfrowdefbrdrb, rtfemptyrow + local hasrtfbrdr 0 + local rtfbrdron 0 + IsInString "@rtfrowdef" `"`begin'"' // sets local strcount + local hasrtf = `strcount' + if `hasrtf' { + MakeRtfRowdefs `"`macval(begin)'"' `"`starsrow'"' "`stardetach'" /// + `varwidth' "`modelwidth'" `haslabcol2' `labcol2width' + local varwidth 0 + local wrap + local modelwidth 0 + local nmodelwidth 1 + local modelwidthzero 1 + local starwidth 0 + local labcol2width 0 + IsInString "@rtfrowdefbrdr" `"`begin'"' // sets local strcount + if `strcount' { + local hasrtfbrdr 1 + local rtfbeginbak `"`macval(begin)'"' + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrt'"' + local rtfbrdron 1 + } + else { + StableSubinstr begin `"`macval(begin)'"' "@rtfrowdef" `"`rtfrowdef'"' + } + } + +* set widths + if `starwidth'>0 local fmt_stw "%-`starwidth's" + if `varwidth'>0 local fmt_v "%-`varwidth's" + if `labcol2width'>0 local fmt_l2 "%~`labcol2width's" + if "`mgroupsspan'`mlabelsspan'`eqlabelsspan'`collabelsspan'"!="" { + if `modelwidthzero'==0 { + file open `file' using `"`tfile'"', write text replace + file write `file' `macval(delimiter)' + file close `file' + file open `file' using `"`tfile'"', read text + file read `file' delwidth + file close `file' + local delwidth = `length'(`"`macval(delwidth)'"') + } + else local delwidth 0 + } + local stardetachon = ("`stardetach'"!="") + if `stardetachon' { + local stardetach `"`macval(delimiter)'"' + } + +*Prepare @-Variables + local atvars2 `""`nmodels'" "`neqs'" "`totcharwidth'" `"`macval(hline)'"' `hasrtf' `"`rtfrowdefbrdrt'"' `"`rtfrowdefbrdrb'"' `"`rtfrowdef'"' `"`rtfemptyrow'"'"' + local atvars3 `"`"`macval(title)'"' `"`macval(note)'"' `"`macval(discrete)'`macval(discrete2)'"' `"`macval(starlegend)'"'"' + +*Open output file + file open `file' using `"`tfile'"', write text replace + +*Write prehead + if `"`macval(prehead)'"'!="" { + if index(`"`macval(prehead)'"',`"""')==0 { + local prehead `"`"`macval(prehead)'"'"' + } + } + foreach line of local prehead { + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(line)'"' 0 "`ncols'" `macval(atvars2)' `macval(atvars3)' + file write `file' `"`macval(value)'"' _n + } + local hasheader 0 + if "`smcltags'"!="" local thesmclrule "{txt}{hline `totcharwidth'}" + else local thesmclrule "{hline `totcharwidth'}" + if "`smclrules'"!="" { + file write `file' `"`thesmclrule'"' _n + } + +*Labcol2 - title + if `haslabcol2' { + IsInString `"""' `"`macval(labcol2title)'"' // sets local strcount + if `strcount'==0 { + local labcol2chunk `"`macval(labcol2title)'"' + local labcol2rest "" + } + else { + gettoken labcol2chunk labcol2rest : labcol2title + } + } + +*Write head: Models groups + if "`mgroupsnone'"=="" & `"`macval(mgroups)'"'!="" { + local hasheader 1 + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(mgroupsbegin)'"' 2 "`ncols'" `macval(atvars2)' + local mgroupsbegin `"`macval(value)'"' + InsertAtVariables `"`macval(mgroupsend)'"' 2 "`ncols'" `macval(atvars2)' + local mgroupsend `"`macval(value)'"' + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`mgroupsreplace'"!="" { + if `"`macval(mgroupsbegin)'"'!="" local tmpbegin + if `"`macval(mgroupsend)'"'!="" local tmpend + } + MgroupsPattern "`modelsrow'" "`mgroupspattern'" + Abbrev `varwidth' `"`macval(mgroupslhs)'"' "`abbrev'" + local value: di `fmt_v' `"`macval(value)'"' + WriteBegin `"`file'"' `"`macval(mgroupsbegin)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + WriteCaption `"`file'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`mgroupspattern'" "`mgroupspattern'" /// + `"`macval(mgroups)'"' "`starsrow'" "`mgroupsspan'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" /// + `"`macval(mgroupserepeat)'"' `"`macval(mgroupsprefix)'"' /// + `"`macval(mgroupssuffix)'"' "`haslabcol2'" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(mgroupsend)'"' /// + `"`"`macval(value)'"'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + gettoken labcol2chunk labcol2rest : labcol2rest + } + +*Write head: Models numbers + if `"`macval(numbers)'"'!="" { + local hasheader 1 + if "`smcltags'"!="" file write `file' "{txt}" + if `"`macval(numbers)'"'=="numbers" local numbers "( )" + file write `file' `macval(begin)' `fmt_v' (`""') + if `haslabcol2' { + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + tokenize `"`macval(numbers)'"' + numlist `"1/`nmodels'"' + WriteCaption `"`file'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`modelsrow'" "`modelsrow'" /// + "`r(numlist)'" "`starsrow'" "`mlabelsspan'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" /// + `""' `"`macval(1)'"' `"`macval(2)'"' "`haslabcol2'" + file write `file' `macval(end)' _n + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + gettoken labcol2chunk labcol2rest : labcol2rest + } + +*Write head: Models captions + if "`nomlabelsnone'"=="" & "`models'"=="." & `"`macval(mlabels)'"'=="." local mlabelsnone "none" + if "`mlabelsnone'"=="" { + local hasheader 1 + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(mlabelsbegin)'"' 2 "`ncols'" `macval(atvars2)' + local mlabelsbegin `"`macval(value)'"' + InsertAtVariables `"`macval(mlabelsend)'"' 2 "`ncols'" `macval(atvars2)'' + local mlabelsend `"`macval(value)'"' + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`mlabelsreplace'"!="" { + if `"`macval(mlabelsbegin)'"'!="" local tmpbegin + if `"`macval(mlabelsend)'"'!="" local tmpend + } + Abbrev `varwidth' `"`macval(mlabelslhs)'"' "`abbrev'" + local value: di `fmt_v' `"`macval(value)'"' + WriteBegin `"`file'"' `"`macval(mlabelsbegin)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + WriteCaption `"`file'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`modelsrow'" "`modelsrow'" /// + `"`macval(mlabels)'"' "`starsrow'" "`mlabelsspan'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" /// + `"`macval(mlabelserepeat)'"' `"`macval(mlabelsprefix)'"' /// + `"`macval(mlabelssuffix)'"' "`haslabcol2'" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(mlabelsend)'"' /// + `"`"`macval(value)'"'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + gettoken labcol2chunk labcol2rest : labcol2rest + } + +*Write head: Equations captions + if "`eqlabelsnone'"=="" { + InsertAtVariables `"`macval(eqlabelsbegin)'"' 2 "`ncols'" `macval(atvars2)' + local eqlabelsbegin `"`macval(value)'"' + InsertAtVariables `"`macval(eqlabelsend)'"' 2 "`ncols'" `macval(atvars2)' + local eqlabelsend `"`macval(value)'"' + } + if `"`eqswide'"'!="__" & "`eqlabelsnone'"=="" { + local hasheader 1 + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`eqlabelsreplace'"!="" { + if `"`macval(eqlabelsbegin)'"'!="" local tmpbegin + if `"`macval(eqlabelsend)'"'!="" local tmpend + } + if "`smcltags'"!="" file write `file' "{txt}" + Abbrev `varwidth' `"`macval(eqlabelslhs)'"' "`abbrev'" + local value: di `fmt_v' `"`macval(value)'"' + WriteBegin `"`file'"' `"`macval(eqlabelsbegin)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + WriteCaption `"`file'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`eqsrow'" "`modelsrow'" /// + `"`macval(eqlabels)'"' "`starsrow'" "`eqlabelsspan'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" /// + `"`macval(eqlabelserepeat)'"' `"`macval(eqlabelsprefix)'"' /// + `"`macval(eqlabelssuffix)'"' "`haslabcol2'" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(eqlabelsend)'"' /// + `"`"`macval(value)'"'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + gettoken labcol2chunk labcol2rest : labcol2rest + } + +*Write head: Columns captions + if `"`macval(collabels)'"'!="" & "`collabelsnone'"=="" { + local hasheader 1 + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(collabelsbegin)'"' 2 "`ncols'" `macval(atvars2)' + local collabelsbegin `"`macval(value)'"' + InsertAtVariables `"`macval(collabelsend)'"' 2 "`ncols'" `macval(atvars2)' + local collabelsend `"`macval(value)'"' + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`collabelsreplace'"!="" { + if `"`macval(collabelsbegin)'"'!="" local tmpbegin + if `"`macval(collabelsend)'"'!="" local tmpend + } + Abbrev `varwidth' `"`macval(collabelslhs)'"' "`abbrev'" + local value: di `fmt_v' `"`macval(value)'"' + WriteBegin `"`file'"' `"`macval(collabelsbegin)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + WriteCaption `"`file'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`colsrow'" "" `"`macval(collabels)'"' /// + "`starsrow'" "`collabelsspan'" "`abbrev'" "`modelwidth'" /// + "`delwidth'" "`starwidth'" `"`macval(collabelserepeat)'"' /// + `"`macval(collabelsprefix)'"' `"`macval(collabelssuffix)'"' "`haslabcol2'" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(collabelsend)'"' /// + `"`"`macval(value)'"'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + gettoken labcol2chunk labcol2rest : labcol2rest + } + +*Write posthead + if `hasheader' & "`smclmidrules'"!="" { + file write `file' `"`thesmclrule'"' _n + } + if `"`macval(posthead)'"'!="" { + if index(`"`macval(posthead)'"',`"""')==0 { + local posthead `"`"`macval(posthead)'"'"' + } + } + foreach line of local posthead { + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(line)'"' 0 "`ncols'" `macval(atvars2)' `macval(atvars3)' + file write `file' `"`macval(value)'"' _n + } + +* Create mmd alignment/divider line + if `"`defaults'"'=="mmd" { + MakeMMDdef "`varwidth'" "`haslabcol2'" "`labcol2width'" /// + "`modelwidth'" "`starsrow'" "`stardetachon'" "`starwidth'" + file write `file' `"`macval(value)'"' _n + } + +*Write body of table +*Loop over table rows + InsertAtVariables `"`macval(varlabelsbegin)'"' 2 "`ncols'" `macval(atvars2)' + local varlabelsbegin `"`macval(value)'"' + InsertAtVariables `"`macval(varlabelsend)'"' 2 "`ncols'" `macval(atvars2)' + local varlabelsend `"`macval(value)'"' + tempname first + if `"`vblock'"'!="" { + local RI = `R' + `nindicate' + local e 0 + local eqdim = `R' + `nindicate' + local weqcnt 0 + local theeqlabel + if `hasrtfbrdr' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrt'"' + local rtfbrdron 1 + } + local varlabelsbegin0 `"`macval(varlabelsbegin)'"' + local eqlabelsbegin0 `"`macval(eqlabelsbegin)'"' + if "`eqlabelsfirst'"=="" local eqlabelsbegin0 + forv r = 1/`R' { + local varlabelsend0 `"`macval(varlabelsend)'"' + local var: word `r' of `varlist' + +*Write equation name/label + if "`unstack'"=="" { + local eqvar: word `r' of `fullvarlist' + if `"`eqs'"'!="__" { + local eqrlast `"`eqr'"' + local eqr: word `r' of `eqlist' + if `"`eqr'"'!=`"`eqrlast'"' & "`eqlabelsnone'"=="" { + local value: word `++e' of `macval(eqlabels)' + local eqdim: word `e' of `macval(eqsdims)' + local weqcnt 0 + if `e'==`numeqs' { + if "`eqlabelslast'"=="" local eqlabelsend + local eqdim = `eqdim' + `nindicate' + } + if "`eqlabelsmerge'"!="" { + local theeqlabel `"`macval(eqlabelsprefix)'`macval(value)'`macval(eqlabelssuffix)'"' + } + else { + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`eqlabelsreplace'"!="" { + if `"`macval(eqlabelsbegin0)'"'!="" local tmpbegin + if `"`macval(eqlabelsend)'"'!="" local tmpend + } + if `e'>1 & "`smcleqrules'"!="" { + file write `file' `"`thesmclrule'"' _n + } + WriteBegin `"`file'"' `"`macval(eqlabelsbegin0)'"' `"`macval(tmpbegin)'"' + if "`smcltags'"!="" file write `file' "{res}" + WriteEqrow `"`file'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' `"`macval(value)'"' "`starsrow'" /// + "`eqlabelsspan'" "`varwidth'" "`fmt_v'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" /// + `"`macval(eqlabelsprefix)'"' `"`macval(eqlabelssuffix)'"' /// + "`haslabcol2'" "`labcol2width'" "`fmt_l2'" + if "`smcltags'"!="" file write `file' "{txt}" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(eqlabelsend)'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + local eqlabelsbegin0 `"`macval(eqlabelsbegin)'"' + } + } + } + } + local ++weqcnt + if `weqcnt'==1 { + if "`varlabelsfirst'"=="" local varlabelsbegin0 + } + +*Determine rows to be written + local rvblock + foreach row of local vblock { + local c 0 + local skiprow 1 + local rowhasstats 0 + foreach v of local row { + local ++c + if "`unstack'"!="" { + local eqr: word `:word `c' of `eqsrow'' of `eqs' + if `"`eqr'"'!="" local eqvar `"`eqr':`var'"' + else local eqvar "`var'" + } + local v: subinstr local v "&" " ", all + foreach vi of local v { + if "`vi'"=="." continue + if rownumb(`B',`"`eqvar'"')<. { + local rowhasstats 1 + if index("`vi'",".")==1 continue + local inlist: list posof `"`eqvar'"' in `vi'_drop + if `inlist' continue + local skiprow 0 + continue, break + } + } + if `skiprow'==0 continue, break + } + if `rowhasstats'==0 local skiprow 0 + if `"`ferest()'"'=="" & `"`rvblock'"'=="" local skiprow 0 + if `skiprow' continue + local rvblock `"`rvblock'"`row'" "' + } + local nrvblock: list sizeof rvblock + +*Insert refcat() (unless refcatbelow) + if `"`macval(refcat)'"'!="" { + local isref: list posof `"`var'"' in refcatcoefs + if `isref' { + if "`unstack'"=="" { + local temp `"`eqr'"' + if `"`temp'"'=="" local temp "__" + } + else local temp `"`eqswide'"' + GenerateRefcatRow `B' `ccols' "`var'" `"`temp'"' `"`macval(refcatlabel)'"' + local refcatrow `"`macval(value)'"' + } + } + else local isref 0 + if `isref' & `"`refcatbelow'"'=="" { + if "`smcltags'"!="" file write `file' "{txt}" + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`varlabelsreplace'"!="" { + if `"`macval(varlabelsbegin0)'"'!="" local tmpbegin + if `"`macval(varlabelsend0)'"'!="" local tmpend + } + if "`varlabelsnone'"=="" { + local value: word `isref' of `macval(refcatnames)' + Abbrev `varwidth' `"`macval(value)'"' "`abbrev'" + } + else local value + local value: di `fmt_v' `"`macval(varlabelsprefix)'`macval(value)'`macval(varlabelssuffix)'"' + WriteBegin `"`file'"' `"`macval(varlabelsbegin0)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + gettoken labcol2chunk labcol2 : labcol2 + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + if "`smcltags'"!="" file write `file' "{res}" + WriteStrRow `"`file'"' "`modelsrow'" `"`eqsrow'"' `"`: list sizeof eqswide'"' /// + `"`macval(refcatrow)'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`starsrow'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" + if "`smcltags'"!="" file write `file' "{txt}" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(varlabelsend0)'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + local varlabelsbegin0 `"`macval(varlabelsbegin)'"' + } + +*Write variable name/label + if "`smcltags'"!="" file write `file' "{txt}" + local tmpbegin `"`macval(begin)'"' + if "`varlabelsnone'"=="" { + VarInList `"`var'"' "`unstack'" `"`eqvar'"' /// + `"`eqr'"' `"`macval(varlabelsblist)'"' + if `"`macval(value)'"'!="" { + IsInString `"""' `"`value'"' // sets local strcount + if `strcount'==0 { + local value `"`"`macval(value)'"'"' + } + InsertAtVariables `"`macval(value)'"' 2 "`ncols'" `macval(atvars2)' + WriteStrLines `"`file'"' `"`macval(value)'"' + if "`varlabelsreplace'"!="" { + local tmpbegin + local varlabelsbegin0 + } + } + if "`label'"!="" { + CompileVarl, vname(`var') interaction(`macval(interaction)') + } + else local varl `var' + VarInList `"`var'"' "`unstack'" `"`eqvar'"' /// + `"`eqr'"' `"`macval(varlabels)'"' + if `"`macval(value)'"'!="" { + local varl `"`macval(value)'"' + } + if `"`macval(discrete)'"'!="" { + local temp 0 + if "`unstack'"=="" { + if `D'[`r',1]==1 local temp 1 + } + else { + foreach eqr of local eqswide { + if `D'[rownumb(`D',`"`eqr':`var'"'),1]==1 local temp 1 + } + } + if `temp'==1 & `temp'<. { + local varl `"`macval(varl)'`macval(discrete)'"' + } + } + } + else local varl + if `hasrtfbrdr' & `r'==`RI' & !(`isref' & `"`refcatbelow'"'!="") { + if `nrvblock'==1 { + if `rtfbrdron' { + // special case: still in first physical row of table + // body; this means that the table body only has a single + // physical row => need line at top and bottom + StableSubinstr tmpbegin `"`macval(tmpbegin)'"' /* + */ "\clbrdrt\brdrw10\brdrs" /* + */ "\clbrdrt\brdrw10\brdrs\clbrdrb\brdrw10\brdrs" all + } + else { + StableSubinstr tmpbegin `"`macval(rtfbeginbak)'"' /* + */ "@rtfrowdefbrdr" `"`rtfrowdefbrdrb'"' + } + local rtfbrdron 1 + } + } + if "`varlabelsreplace'"!="" { + if `"`macval(varlabelsbegin0)'"'!="" local tmpbegin + } + if "`wrap'"!="" & `nrvblock'>1 { + local wrap_i 1 + local value: piece `wrap_i' `varwidth' of `"`macval(theeqlabel)'`macval(varl)'"', nobreak + Abbrev `varwidth' `"`macval(value)'"' "`abbrev'" + } + else { + Abbrev `varwidth' `"`macval(theeqlabel)'`macval(varl)'"' "`abbrev'" + } + local value: di `fmt_v' `"`macval(varlabelsprefix)'`macval(value)'`macval(varlabelssuffix)'"' + WriteBegin `"`file'"' `"`macval(varlabelsbegin0)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + gettoken labcol2chunk labcol2 : labcol2 + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + local varlabelsbegin0 `"`macval(varlabelsbegin)'"' + +*Write table cells + if "`smcltags'"!="" file write `file' "{res}" + local newrow 0 + mat `first'=J(1,`nmodels',1) + foreach row of local rvblock { + if `hasrtfbrdr' & `r'==`RI' & !(`isref' & `"`refcatbelow'"'!="") { + if `"`ferest()'"'=="" { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrb'"' + local rtfbrdron 1 + } + } + local c 0 + foreach v of local row { + local m: word `++c' of `modelsrow' + local unstackskipcoef 0 + if "`unstack'"!="" { + capt local eqr: word `:word `c' of `eqsrow'' of `eqs' + local rr = rownumb(`B', `"`eqr':`var'"') + if `"`eqr'"'!="" local eqvar `"`eqr':`var'"' + else local eqvar "`var'" + if `rr'>=. local unstackskipcoef 1 // local v "." + } + else local rr `r' + if `newrow' & `c'==1 { + if "`smcltags'"!="" file write `file' "{txt}" + if "`wrap'"!="" & `nrvblock'>1 { + local value + local space + while (1) { + local temp: piece `++wrap_i' `varwidth' of `"`macval(varl)'"', nobreak + if `"`macval(temp)'"'=="" continue, break + local value `"`macval(value)'`space'`macval(temp)'"' + if `wrap_i'<`nrvblock' continue, break + local space " " + } + Abbrev `varwidth' `"`macval(value)'"' "`abbrev'" + local value: di `fmt_v' `"`macval(varlabelsprefix)'`macval(value)'`macval(varlabelssuffix)'"' + local value `"`"`macval(value)'"'"' + } + else local value "_skip(`varwidth')" + file write `file' `macval(end)' _n `macval(begin)' `value' + if `haslabcol2' { + file write `file' `macval(delimiter)' `fmt_l2' ("") + } + if "`smcltags'"!="" file write `file' "{res}" + } + local v: subinstr local v "&" " ", all + local modelwidthj: word `=1+mod(`c'-1,`nmodelwidth')' of `modelwidth' + if `modelwidthj'>0 local fmt_m "%`modelwidthj's" + else local fmt_m + local thevalue + foreach vi of local v { + if index("`vi'",".")!=1 { + local inlist: list posof `"`eqvar'"' in `vi'_drop + if `inlist' local vi "..`vi'" + else { + local vipar: subinstr local `vi'_par "@modelwidth" "`modelwidthj'", all + } + } + if index("`vi'",".")==1 { + local value + } + else if `unstackskipcoef' { + local value `"``vi'_vacant'"' + } + else if `B'[`rr',`m'*`droppedpos']==1 & `droppedison' { + if `first'[1,`m'] { + local value `"`macval(dropped)'"' + mat `first'[1,`m']=0 + } + else local value + } + else if "``vi'_'"=="ci" { + if `_`vi'_l'[`rr',`m']>=.y local value `"``vi'_vacant'"' + else { + local format: word `r' of ``vi'_fmt' + if "`format'"=="" { + local format: word `:word count ``vi'_fmt'' of ``vi'_fmt' + } + local value = `_`vi'_l'[`rr',`m'] + local vipar: subinstr local `vi'_l_par "@modelwidth" "`modelwidthj'", all + vFormat `value' `format' "`lz'" `"`macval(dmarker)'"' /// + `"`macval(msign)'"' `"`macval(vipar)'"' + local temp "`macval(value)'" + local value = `_`vi'_u'[`rr',`m'] + local vipar: subinstr local `vi'_u_par "@modelwidth" "`modelwidthj'", all + vFormat `value' `format' "`lz'" `"`macval(dmarker)'"' /// + `"`macval(msign)'"' `"`macval(vipar)'"' + local value `"`macval(temp)'`macval(value)'"' + } + } + else if `_`vi''[`rr',`m']>=.y local value `"``vi'_vacant'"' + //else if `_`vi''[`rr',`m']>=. local value . + else if "``vi'_'"=="_star" { + CellStars `"`macval(starlevels)'"' `_```vi'_pvalue'_tname''[`rr',`m'] `"`macval(vipar)'"' + } + else if "``vi'_'"=="_sign" { + MakeSign `_`vi''[`rr',`m'] `"`macval(msign)'"' `"`macval(vipar)'"' + } + else if "``vi'_'"=="_sigsign" { + MakeSign `_`vi''[`rr',`m'] `"`macval(msign)'"' `"`macval(vipar)'"' /// + `"`macval(starlevels)'"' `_```vi'_pvalue'_tname''[`rr',`m'] + } + else { + local format: word `r' of ``vi'_fmt' + if "`format'"=="" { + local format: word `:word count ``vi'_fmt'' of ``vi'_fmt' + } + local value = `_`vi''[`rr',`m'] + vFormat `value' `format' "`lz'" `"`macval(dmarker)'"' /// + `"`macval(msign)'"' `"`macval(vipar)'"' + } + local thevalue `"`macval(thevalue)'`macval(value)'"' + if !`stardetachon' & `:word `c' of `starsrow''==1 { + if `modelwidthj'>0 | `starwidth'>0 local fmt_m "%`=`modelwidthj'+`starwidth''s" + local value + if index("`vi'",".")!=1 & `"``vi'_star'"'!="" { + local inlist: list posof `"`eqvar'"' in stardrop + if !`inlist' { + Stars `"`macval(starlevels)'"' `_```vi'_pvalue'_tname''[`rr',`m'] + } + } + if "`ferest()'"=="" { + local value: di `fmt_stw' `"`macval(value)'"' + } + local thevalue `"`macval(thevalue)'`macval(value)'"' + } + if "`ferest()'"!="" & index("`vi'","..")!=1 { + local thevalue `"`macval(thevalue)'`macval(incelldelimiter)'"' + } + } + if `:length local thevalue'<245 { + local thevalue: di `fmt_m' `"`macval(thevalue)'"' + } + file write `file' `macval(delimiter)' `"`macval(thevalue)'"' + if `stardetachon' & `:word `c' of `starsrow''==1 { + local thevalue + foreach vi of local v { + if index("`vi'",".")!=1 { + local inlist: list posof `"`eqvar'"' in `vi'_drop + if `inlist' local vi "..`vi'" + } + if index("`vi'",".")!=1 & `"``vi'_star'"'!="" { + local inlist: list posof `"`eqvar'"' in stardrop + if `inlist' local value + else { + Stars `"`macval(starlevels)'"' `_```vi'_pvalue'_tname''[`rr',`m'] + } + local thevalue `"`macval(thevalue)'`macval(value)'"' + } + if "`ferest()'"!="" & index("`vi'","..")!=1 { + local thevalue `"`macval(thevalue)'`macval(incelldelimiter)'"' + } + } + if `:length local thevalue'<245 { + local thevalue: di `fmt_stw' `"`macval(thevalue)'"' + } + file write `file' `macval(stardetach)' `"`macval(thevalue)'"' + } + } + local newrow 1 + } + +*End of table row + if "`smcltags'"!="" file write `file' "{txt}" + if `weqcnt'==`eqdim' & "`varlabelslast'"=="" /// + & !(`isref' & `"`refcatbelow'"'!="") local varlabelsend0 + local tmpend `"`macval(end)'"' + if "`varlabelsreplace'"!="" { + if `"`macval(varlabelsend0)'"'!="" local tmpend + } + VarInList `"`var'"' "`unstack'" `"`eqvar'"' `"`eqr'"' /// + `"`macval(varlabelselist)'"' + if `"`macval(value)'"'!="" { + IsInString `"""' `"`value'"' // sets local strcount + if `strcount'==0 { + local value `"`"`macval(value)'"'"' + } + InsertAtVariables `"`macval(value)'"' 2 "`ncols'" `macval(atvars2)' + if "`varlabelsreplace'"!="" local varlabelsend0 + } + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(varlabelsend0)'"' /// + `"`macval(value)'"' +* insert refcat() (if refcatbelow) + if `isref' & `"`refcatbelow'"'!="" { + if "`smcltags'"!="" file write `file' "{txt}" + if `hasrtfbrdr' & `r'==`RI' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrb'"' + local rtfbrdron 1 + } + if `weqcnt'==`eqdim' & "`varlabelslast'"=="" local varlabelsend0 + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`varlabelsreplace'"!="" { + if `"`macval(varlabelsbegin0)'"'!="" local tmpbegin + if `"`macval(varlabelsend0)'"'!="" local tmpend + } + if "`varlabelsnone'"=="" { + local value: word `isref' of `macval(refcatnames)' + Abbrev `varwidth' `"`macval(value)'"' "`abbrev'" + } + else local value + local value: di `fmt_v' `"`macval(varlabelsprefix)'`macval(value)'`macval(varlabelssuffix)'"' + WriteBegin `"`file'"' `"`macval(varlabelsbegin0)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + gettoken labcol2chunk labcol2 : labcol2 + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + if "`smcltags'"!="" file write `file' "{res}" + WriteStrRow `"`file'"' "`modelsrow'" `"`eqsrow'"' `"`: list sizeof eqswide'"' /// + `"`macval(refcatrow)'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`starsrow'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" + if "`smcltags'"!="" file write `file' "{txt}" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(varlabelsend0)'"' + } +* end insert refcat() + } + } + +*Write indicator sets + forv i=1/`nindicate' { + if `hasrtfbrdr' & `i'==`nindicate' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrb'"' + local rtfbrdron 1 + } + if `i'==`nindicate' & "`varlabelslast'"=="" local varlabelsend + local tmpbegin `"`macval(begin)'"' + local tmpend `"`macval(end)'"' + if "`varlabelsreplace'"!="" { + if `"`macval(varlabelsbegin0)'"'!="" local tmpbegin + if `"`macval(varlabelsend)'"'!="" local tmpend + } + if "`varlabelsnone'"=="" { + Abbrev `varwidth' `"`macval(indicate`i'name)'"' "`abbrev'" + } + else local value + if "`smcltags'"!="" file write `file' "{txt}" + local value: di `fmt_v' `"`macval(varlabelsprefix)'`macval(value)'`macval(varlabelssuffix)'"' + WriteBegin `"`file'"' `"`macval(varlabelsbegin0)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `haslabcol2' { + gettoken labcol2chunk labcol2 : labcol2 + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + if "`smcltags'"!="" file write `file' "{res}" + WriteStrRow `"`file'"' "`modelsrow'" `"`eqsrow'"' `"`: list sizeof eqswide'"' /// + `"`macval(indicate`i'lbls)'"' `"`macval(delimiter)'"' /// + `"`macval(stardetach)'"' "`starsrow'" "`abbrev'" /// + "`modelwidth'" "`delwidth'" "`starwidth'" + if "`smcltags'"!="" file write `file' "{txt}" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(varlabelsend)'"' + } + +*Write prefoot + if `"`macval(prefoot)'"'!="" { + if index(`"`macval(prefoot)'"',`"""')==0 { + local prefoot `"`"`macval(prefoot)'"'"' + } + } + foreach line of local prefoot { + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(line)'"' 0 "`ncols'" `macval(atvars2)' `macval(atvars3)' + file write `file' `"`macval(value)'"' _n + } + if ((`"`vblock'"'!="" & `R'>0) | `nindicate'>0) & "`smclmidrules'"!="" { + if `"`macval(statsarray)'"'!="" { + file write `file' `"`thesmclrule'"' _n + } + } + +*Write foot of table (statistics) + InsertAtVariables `"`macval(statslabelsbegin)'"' 2 "`ncols'" `macval(atvars2)' + local statslabelsbegin `"`macval(value)'"' + InsertAtVariables `"`macval(statslabelsend)'"' 2 "`ncols'" `macval(atvars2)' + local statslabelsend `"`macval(value)'"' + local statslabelsbegin0 `"`macval(statslabelsbegin)'"' + local S: list sizeof statsarray + local eqr "__" + if `hasrtfbrdr' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrt'"' + local rtfbrdron 1 + } + forv r = 1/`S' { + if `r'==`S' & `hasrtfbrdr' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdefbrdrb'"' + local rtfbrdron 1 + } + local stat: word `r' of `macval(statslabels)' + if `"`stat'"'=="" local stat: word `r' of `statsrowlbls' + if "`statslabelsnone'"!="" local stat + if "`smcltags'"!="" file write `file' "{txt}" + if `r'==1 & "`statslabelsfirst'"=="" local statslabelsbegin0 + local tmpbegin `"`macval(begin)'"' + if "`statslabelsreplace'"!="" { + if `"`macval(statslabelsbegin0)'"'!="" local tmpbegin + } + Abbrev `varwidth' `"`macval(stat)'"' "`abbrev'" + local value: di `fmt_v' `"`macval(statslabelsprefix)'`macval(value)'`macval(statslabelssuffix)'"' + WriteBegin `"`file'"' `"`macval(statslabelsbegin0)'"' `"`macval(tmpbegin)'"' /// + `"`"`macval(value)'"'"' + if `r'==1 & "`statslabelsfirst'"=="" { + local statslabelsbegin0 `"`macval(statslabelsbegin)'"' + } + if `haslabcol2' { + gettoken labcol2chunk labcol2 : labcol2 + Abbrev `labcol2width' `"`macval(labcol2chunk)'"' "`abbrev'" + if `:length local value'<245 { + local value: di `fmt_l2' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + } + if "`smcltags'"!="" file write `file' "{res}" + local strow: word `r' of `statsarray' + local strowlay: word `r' of `macval(statslayout)' + local strowfmt: word `r' of `statsrowfmt' + local strowstar: word `r' of `statsrowstar' + local lastm + local lasteq + local c 0 + local mpos 0 + foreach m of local modelsrow { + local ++c + local modelwidthj: word `=1+mod(`c'-1,`nmodelwidth')' of `modelwidth' + if `modelwidthj'>0 local fmt_m "%`modelwidthj's" + else local fmt_m + if "`m'"=="." { + file write `file' `macval(delimiter)' `fmt_m' (`""') + continue + } + local value + local eq: word `:word `c' of `eqsrow'' of `eqs' + if "`m'"!="`lastm'" { + local stc 0 + local hasmestats 0 + } + if "`m'"!="`lastm'" | `"`eq'"'!="`lasteq'" local stc_eq 0 + local usemestats 0 + local ++stc_eq + local stcell: word `++stc' of `strow' + local stcelllay: word `stc' of `macval(strowlay)' + local stcellfmt: word `stc' of `strowfmt' + local stcellstar: word `stc' of `strowstar' + local cellhasstat 0 + foreach stat of local stcell { + gettoken format stcellfmt: stcellfmt + local rr = rownumb(`St',`"`stat'"') + local value = `St'[`rr',`m'] + if `value'==.y { + local value `"`return(m`m'_`stat')'"' + if `"`value'"'!="" { + local cellhasstat 1 + local stcelllay: subinstr local stcelllay `"`statspchar'"' /// + `"`value'"' + } + } + else if `value'==.x { + local hasmestats 1 + } + else if `value'<.x { + local cellhasstat 1 + vFormat `value' "`format'" "`lz'" `"`macval(dmarker)'"' /// + `"`macval(msign)'"' + local stcelllay: subinstr local stcelllay `"`statspchar'"' /// + `"`macval(value)'"' + } + } + if `cellhasstat'==0 & `hasmestats' { + local stcell: word `stc_eq' of `strow' + local stcelllay: word `stc_eq' of `macval(strowlay)' + local stcellfmt: word `stc_eq' of `strowfmt' + local stcellstar: word `stc_eq' of `strowstar' + local cellhasstat 0 + foreach stat of local stcell { + gettoken format stcellfmt: stcellfmt + local rr = rownumb(`St',`"`eq':`stat'"') + if `rr'>=. local value .z + else local value = `St'[`rr',`m'] + if `value'!=.z { + local cellhasstat 1 + vFormat `value' "`format'" "`lz'" `"`macval(dmarker)'"' /// + `"`macval(msign)'"' + local stcelllay: subinstr local stcelllay `"`statspchar'"' `"`macval(value)'"' + } + } + if `cellhasstat' local usemestats 1 + } + if `cellhasstat'==0 local stcelllay + if `:length local stcelllay'<245 { + local stcelllay: di `fmt_m' `"`macval(stcelllay)'"' + } + file write `file' `macval(delimiter)' `"`macval(stcelllay)'"' + if `:word `c' of `starsrow''==1 { + if "`stcellstar'"=="1" & `cellhasstat' { + if `usemestats' { + local rr=rownumb(`St',`"`eq':p"') + } + else { + local rr=rownumb(`St',"p") + } + Stars `"`macval(starlevels)'"' `St'[`rr',`m'] + if `:length local value'<245 { + local value: di `fmt_stw' `"`macval(value)'"' + } + file write `file' `macval(stardetach)' `"`macval(value)'"' + } + else { + file write `file' `macval(stardetach)' _skip(`starwidth') + } + } + local lastm "`m'" + local lasteq `"`eq'"' + } + if `r'==`S' & "`statslabelslast'"=="" local statslabelsend + local tmpend `"`macval(end)'"' + if "`statslabelsreplace'"!="" { + if `"`macval(statslabelsend)'"'!="" local tmpend + } + if "`smcltags'"!="" file write `file' "{txt}" + WriteEnd `"`file'"' `"`macval(tmpend)'"' `"`macval(statslabelsend)'"' + if `hasrtfbrdr' & `rtfbrdron' { + StableSubinstr begin `"`macval(rtfbeginbak)'"' "@rtfrowdefbrdr" `"`rtfrowdef'"' + local rtfbrdron 0 + } + } + +*Write postfoot + if "`smclrules'"!="" { + file write `file' `"`thesmclrule'"' _n + } + local discrete: list retok discrete + if `"`macval(postfoot)'"'!="" { + if index(`"`macval(postfoot)'"',`"""')==0 { + local postfoot `"`"`macval(postfoot)'"'"' + } + } + foreach line of local postfoot { + if "`smcltags'"!="" file write `file' "{txt}" + InsertAtVariables `"`macval(line)'"' 0 "`ncols'" `macval(atvars2)' `macval(atvars3)' + file write `file' `"`macval(value)'"' _n + } + +*Write legend (starlevels, marginals) + if "`legend'"!="" { + if `"`macval(discrete2)'"'!="" { + mat `D' = `D''*`D' + if `D'[1,1]!=0 { + if "`smcltags'"!="" file write `file' "{txt}" + file write `file' `"`macval(discrete)'`macval(discrete2)'"' _n + } + } + if `"`macval(starlegend)'"'!="" { + if "`smcltags'"!="" file write `file' "{txt}" + file write `file' `"`macval(starlegend)'"' _n + } + } + +*Finish: copy tempfile to user file / type to screen + file close `file' + local rtfenc = ("`nortfencode'"=="") & (`hasrtf'!=0) & (c(stata_version)>=14) + local S: word count `macval(substitute)' + if `"`topfile'"'!="" { + confirm file `"`topfile'"' + } + if `"`bottomfile'"'!="" { + confirm file `"`bottomfile'"' + } + if `"`using'"'!="" { + tempname file2 + file open `file2' `using', write text `replace' `append' + } + if "`type'"!="" di as res "" + if `"`topfile'"'!="" { + file open `file' using `"`topfile'"', read text + file read `file' temp + while r(eof)==0 { + if `"`using'"'!="" { + file write `file2' `"`macval(temp)'"' _n + } + if "`type'"!="" { + if "`showtabs'"!="" { + local temp: subinstr local temp "`=char(9)'" "", all + } + di `asis' `"`macval(temp)'"' + } + file read `file' temp + } + file close `file' + } + file open `file' using `"`tfile'"', read text + file read `file' temp + while r(eof)==0 { + forv s = 1(2)`S' { + local from: word `s' of `macval(substitute)' + local to: word `=`s'+1' of `macval(substitute)' + if `"`macval(from)'`macval(to)'"'!="" { + local temp: subinstr local temp `"`macval(from)'"' `"`macval(to)'"', all + } + } + if `rtfenc' { + mata: estout_rtfencode("temp") + } + if `"`using'"'!="" { + file write `file2' `"`macval(temp)'"' _n + } + if "`type'"!="" { + if "`showtabs'"!="" { + local temp: subinstr local temp "`=char(9)'" "", all + } + di `asis' `"`macval(temp)'"' + } + file read `file' temp + } + file close `file' + if `"`bottomfile'"'!="" { + file open `file' using `"`bottomfile'"', read text + file read `file' temp + while r(eof)==0 { + if `"`using'"'!="" { + file write `file2' `"`macval(temp)'"' _n + } + if "`type'"!="" { + if "`showtabs'"!="" { + local temp: subinstr local temp "`=char(9)'" "", all + } + di `asis' `"`macval(temp)'"' + } + file read `file' temp + } + file close `file' + } + if `"`using'"'!="" { + file close `file2' + gettoken junk using0 : using + return local fn `using0' + if "`outfilenoteoff'"=="" { + di as txt `"(output written to {browse `using0'})"' + } + } +end + +program MoreOptions +// estout has more options than -syntax- can handle; a subroutine is used +// here (rather than a second syntax call) to preserve the 'using' macro +// from the first syntax call +// MoreOptions is intended for options without arguments only + local theoptions /// + NOOMITted OMITted /// + NOBASElevels BASElevels /// + NOEFORM eform /// + NOMargin Margin /// + NODIscrete /// + NODROPPED dropped /// + NOSTARDetach STARDetach /// + NOABbrev ABbrev /// + NOUNStack UNStack /// + NOLZ lz /// + NOLabel Label /// + NOLEgend LEgend /// + NONUMbers NUMbers /// + NOReplace Replace /// + NOAppend Append /// + NOTYpe TYpe /// + NOSHOWTABS showtabs /// + NOASIS asis /// + NOWRAP wrap /// + NOSMCLTags SMCLTags /// + NOSMCLRules SMCLRules /// + NOSMCLMIDRules SMCLMIDRules /// + NOSMCLEQRules SMCLEQRules /// + NOOUTFILENOTEOFF outfilenoteoff /// + NORTFENCODE rtfencode + syntax [, `theoptions' ] + foreach opt of local theoptions { + local opt = lower("`opt'") + c_local `opt' "``opt''" + } + c_local options +end + +program ParseValueSubopts + syntax anything [ , mrow(string asis) NOTranspose Transpose /// + NOStar Star PVALue(string) Fmt(string) Label(string) Vacant(string) /// + NOPAR par PAR2(string asis) Keep(string asis) Drop(string asis) /// + PATtern(string) NOABS abs ] + local el: word 1 of `anything' + local elname: word 2 of `anything' + CheckPattern `"`pattern'"' "`elname'" + if `"`macval(par2)'"'!="" { + local par `"`macval(par2)'"' + } + else if "`par'"!="" { + if "`elname'"=="ci" local par "[ , ]" + else if "`elname'"=="ci_l" local par `"[ """' + else if "`elname'"=="ci_u" local par `""" ]"' + else local par "( )" + } + if `"`mrow'"'!="" { + capt confirm integer number `mrow' + if _rc==0 { + if `mrow'>=1 { + if `"`macval(label)'"'=="" { + local label "`elname'[`mrow']" + } + } + else { + local mrow `""`mrow'""' + if `"`macval(label)'"'=="" { + local label `mrow' + } + } + } + else { + gettoken trash : mrow, qed(qed) + if `qed'==0 { + local mrow `"`"`mrow'"'"' + } + if `"`macval(label)'"'=="" { + local label `mrow' + } + } + } + foreach opt in transpose star par abs { + if "`no`opt''"!="" c_local no`el'_`opt' 1 + else c_local `el'_`opt' "``opt''" + } + foreach opt in mrow pvalue fmt label vacant keep drop pattern { + c_local `el'_`opt' `"`macval(`opt')'"' + } +end + +program CheckPattern + args pattern option + foreach p of local pattern { + if !( "`p'"=="1" | "`p'"=="0" ) { + di as error `""`pattern'" invalid in `option'(... pattern())"' + exit 198 + } + } +end + +program ParseStatsSubopts + syntax [anything] [ , Fmt(string) Labels(string asis) /// + NOStar Star Star2(string) LAYout(string asis) PChar(string) ] + foreach opt in fmt labels layout pchar { + c_local stats`opt' `"`macval(`opt')'"' + } + if "`nostar'"!="" c_local nostatsstar 1 + else if "`star2'"!="" { + local anything: list anything | star2 + c_local statsstar "`star2'" + } + else if "`star'"!="" { + local star2: word 1 of `anything' + c_local statsstar "`star2'" + } + c_local stats "`anything'" + c_local stats2 +end + +prog ProcessStatslayout // returns statsarray, -rowlbls, -rowfmt, -rowstar, -colstar, -layout + args stats statsfmt statsstar statslayout statspchar + local format "%9.0g" + if `"`statspchar'"'=="" { + local statspchar "@" + c_local statspchar "@" + } + local statsarray + local statsrowlbls + local statsrowfmt + local statsrowstar + local space1 + local i 0 + local wmax 0 + foreach row of local statslayout { + local statsrow + local statsrowlbl + local statsrfmt + local statsrstar + local space2 + local w = 0 + foreach cell of local row { + local ++w + local statscell + local statsclbl `"`cell'"' + local statscfmt + local statscstar 0 + local space3 + local trash: subinstr local cell `"`statspchar'"' "", all count(local cnt) + forv j=1/`cnt' { + local stat: word `++i' of `stats' + local statscell `"`statscell'`space3'`stat'"' + local statsclbl: subinstr local statsclbl `"`statspchar'"' "`stat'" + local tmp: word `i' of `statsfmt' + if `"`tmp'"'!="" local format `"`tmp'"' + local statscfmt `"`statscfmt'`space3'`format'"' + if `:list stat in statsstar' { + local statscstar 1 + local statscol_`w' 1 + } + local space3 " " + } + local statsrow `"`statsrow'`space2'"`statscell'""' + local statsrowlbl `"`statsrowlbl'`space2'`statsclbl'"' + local statsrfmt `"`statsrfmt'`space2'"`statscfmt'""' + local statsrstar "`statsrstar'`space2'`statscstar'" + local space2 " " + } + local statsarray `"`statsarray'`space1'`"`statsrow'"'"' + local statsrowlbls `"`statsrowlbls'`space1'`"`statsrowlbl'"'"' + local statsrowfmt `"`statsrowfmt'`space1'`"`statsrfmt'"'"' + local statsrowstar `"`statsrowstar'`space1'`"`statsrstar'"'"' + local space1 " " + local wmax = max(`w',`wmax') + } + while (1) { + local stat: word `++i' of `stats' + if `"`stat'"'=="" continue, break + local tmp: word `i' of `statsfmt' + if `"`tmp'"'!="" local format `"`tmp'"' + local statscstar: list stat in statsstar + if `statscstar' local statscol_1 1 + local statsarray `"`statsarray'`space1'`"`stat'"'"' + local statsrowlbls `"`statsrowlbls'`space1'`"`stat'"'"' + local statsrowfmt `"`statsrowfmt'`space1'`"`format'"'"' + local statsrowstar `"`statsrowstar'`space1'`"`statscstar'"'"' + local statslayout `"`statslayout'`space1'`statspchar'"' + local space1 " " + local wmax = max(1,`wmax') + } + local statscolstar + local space + forv w = 1/`wmax' { + if "`statscol_`w''"=="" local statscol_`w' 0 + local statscolstar "`statscolstar'`space'`statscol_`w''" + local space " " + } + c_local statsarray `"`statsarray'"' + c_local statsrowlbls `"`statsrowlbls'"' + c_local statsrowfmt `"`statsrowfmt'"' + c_local statsrowstar `"`statsrowstar'"' + c_local statscolstar `"`statscolstar'"' + c_local statslayout `"`statslayout'"' +end + +program ParseLabelsSubopts + gettoken type 0: 0 + local lblsubopts + syntax [anything] [ , NONUMbers NUMbers NOTItles TItles NODEPvars DEPvars /// + NONONE NONE NOSPAN span Prefix(string) Suffix(string) Begin(string asis) /// + End(string asis) NOReplace Replace BList(string asis) EList(string asis) /// + ERepeat(string) NOFirst First NOLast Last lhs(string) PATtern(string) /// + NOMerge Merge ] + CheckPattern `"`pattern'"' "`type'" + if "`merge'"!="" & "`nomerge'`macval(suffix)'"=="" local suffix ":" + foreach opt in begin end { + if `"`macval(`opt')'"'!="" { + if index(`"`macval(`opt')'"', `"""')==0 { + local `opt' `"`"`macval(`opt')'"'"' + } + } + } + foreach opt in prefix suffix begin end blist elist erepeat lhs pattern { + c_local `type'`opt' `"`macval(`opt')'"' + } + foreach opt in numbers titles depvars span replace none first last merge { + if "`no`opt''"!="" c_local no`type'`opt' 1 + else c_local `type'`opt' "``opt''" + } + c_local `type' `"`macval(anything)'"' +end + +program ReadLine + args max file + local end 0 + file read `file' temp1 + local temp1: subinstr local temp1 "`=char(9)'" " ", all + while r(eof)==0 { + local j 1 + local temp2 + local temp3: piece `j++' `max' of `"`macval(temp1)'"' + if `"`temp3'"'=="" | index(`"`temp3'"',"*")==1 /// + | index(`"`temp3'"',"//")==1 { + file read `file' temp1 + local temp1: subinstr local temp1 "`=char(9)'" " ", all + continue + } + while `"`temp3'"'!="" { + local comment=index(`"`macval(temp3)'"'," ///") + if `comment' { + local temp3=substr(`"`macval(temp3)'"',1,`comment') + local temp2 `"`macval(temp2)'`macval(temp3)'"' + local end 0 + continue, break + } + local comment=index(`"`macval(temp3)'"'," //") + if `comment' { + local temp3=substr(`"`macval(temp3)'"',1,`comment') + local temp2 `"`macval(temp2)'`macval(temp3)'"' + local end 1 + continue, break + } + local temp2 `"`macval(temp2)'`macval(temp3)'"' + local temp3: piece `j++' `max' of `"`macval(temp1)'"' + local end 1 + } + if `end' { + local line `"`macval(line)'`macval(temp2)'"' + continue, break + } + else { + local line `"`macval(line)'`macval(temp2)'"' + file read `file' temp1 + local temp1: subinstr local temp1 "`=char(9)'" " ", all + } + } + c_local line `"`macval(line)'"' +end + +program CellsCheck + args cells + local ncols 0 + local nrows 0 + local cells: subinstr local cells "& " "&", all + local cells: subinstr local cells " &" "&", all + local cells: subinstr local cells `"&""' `"& ""', all + local cells: subinstr local cells `""&"' `"" &"', all + foreach row of local cells { + local newrow + foreach col of local row { + local vals: subinstr local col "&" " ", all + //local vals: list vals - values + local values: list values | vals + local vals: list retok vals + local vals: subinstr local vals " " "&", all + //local newrow: list newrow | vals + local newrow `"`newrow'`vals' "' + } + local newrow: list retok newrow + if "`newrow'"!="" { + local ncols = max(`ncols',`:list sizeof newrow') + local newcells `"`newcells'"`newrow'" "' + local ++nrows + } + } + local newcells: list retok newcells + c_local cells `"`newcells'"' + c_local ncols `ncols' + c_local nrows `nrows' + local dot "." + c_local values: list values - dot +end + +program Star2Cells + args cells star + local newcells + foreach row of local cells { + local newrow + foreach col of local row { + if "`col'"=="`star'" { + local col "`col'star" + } + local newrow: list newrow | col + } + local newcells `"`newcells'"`newrow'" "' + } + local newcells: list retok newcells + c_local cells `"`newcells'"' +end + +prog ParseStarlevels + syntax [anything(equalok)] [ , Label(str) Delimiter(str) ] + c_local starlevels `"`macval(anything)'"' + c_local starlevelslabel `"`macval(label)'"' + c_local starlevelsdelimiter `"`macval(delimiter)'"' +end + +program CheckStarvals + args starlevels label del + if `"`macval(label)'"'=="" local label " p<" + if `"`macval(del)'"'=="" local del ", " + local nstar: word count `macval(starlevels)' + local nstar = `nstar'/2 + capture confirm integer number `nstar' + if _rc { + di as error "unmatched list of significance symbols and levels" + exit 198 + } + local istar 1 + forv i = 1/`nstar' { + local iistar: word `=`i'*2' of `macval(starlevels)' + confirm number `iistar' + if `iistar'>`istar' | `iistar'<=0 { + di as error "significance levels out of order or out of range (0,1]" + exit 198 + } + local istar `iistar' + local isym: word `=`i'*2-1' of `macval(starlevels)' + if `"`macval(legend)'"'!="" { + local legend `"`macval(legend)'`macval(del)'"' + } + local ilabel: subinstr local label "@" "`istar'", count(local hasat) + if `hasat'==0 { + local ilabel `"`macval(label)'`istar'"' + } + local legend `"`macval(legend)'`macval(isym)'`macval(ilabel)'"' + } + c_local starlegend `"`macval(legend)'"' +end + +program Starwidth + args starlevels + if c(stata_version)>=14 local length udstrlen + else local length length + local nstar: word count `macval(starlevels)' + forv i = 2(2)`nstar' { + local istar: word `=`i'-1' of `macval(starlevels)' + local width = max(length("`width'"),`length'(`"`macval(istar)'"')) + } + c_local value `width' +end + +// Loosely based on Mkemat from est_table.ado, but with heavy modifications +program _estout_getres, rclass + syntax, names(str) [ coefs(str asis) stats(str asis) equations(str) /// + rename(str asis) margin(str asis) meqs(str asis) /// + dropped(int 0) level(int 95) /// + transform(str asis) transformpattern(str asis) /// + omitted baselevels ] + // coefs: coef "coef O/1 #" `"coef O/1 "rowname""' etc... + + tempname bc bbc bs bbs st + + local nnames : word count `names' + local rename : subinstr local rename "," "", all + if `"`stats'"' != "" { + local stats : subinstr local stats "," "", all + confirm names `stats' + local stats : list uniq stats + local nstat : list sizeof stats + mat `bbs' = J(`nstat', `nnames', .z) + mat colnames `bbs' = `: subinstr local names "." "active", all word' + mat rownames `bbs' = `stats' + } + + if "`equations'" != "" { + MatchNames "`equations'" + local eqspec `r(eqspec)' + local eqnames `r(eqnames)' + } + + local ncoefs 0 + foreach coefn of local coefs { + local ++ncoefs + gettoken coef : coefn + local coefnms `"`coefnms' `coef'"' // use more informative label? (coefn => error in Stata 8 and 10) + } + local bVs "b se var t z p ci_l ci_u _star _sign _sigsign" + local hasbVs = `"`: list coefnms & bVs'"'!="" + local hastransform = (`"`transform'"'!="") & `hasbVs' + local getbV = cond(`hasbVs' | `dropped', "b var ", "") + + tempname hcurrent esample + local estcycle = ("`names'"!=".") + if `estcycle' { + _est hold `hcurrent', restore nullok estsystem + } + + local ni 0 + local hasbbc 0 + local ccols = `ncoefs' + ("`margin'"!="") + `dropped' + foreach name of local names { + local ++ni + local hasbc 0 + local hasmargin 0 + nobreak { + if "`name'" != "." { + local eqname `name' + *est_unhold `name' `esample' // (why preserve missings in esample?) + capt confirm new var _est_`name' // fix e(sample) if obs have been added + if _rc qui replace _est_`name' = 0 if _est_`name' >=. + _est unhold `name' + } + else { + local eqname active + if `estcycle' { + _est unhold `hcurrent' + } + } + + // get coefficients + capture noisily break { + CheckEqs `"`getbV'`coefs'"' // sets local seqmerge + GetCoefs `bc' `seqmerge' `"`getbV'`coefs'"' // sets local hasbc + if `hasbc' { + mat coln `bc' = `getbV'`coefnms' + SubstEmptyEqname `bc' // replace empty eqname "_" by "__" + } + } + local rc = _rc + + // set equation names and get marginal effects + if `hasbc' & `rc'==0 { + capture noisily break { + if `dropped' { + DroppedCoefs `bc' + } + if "`equations'"!="" { + AdjustRowEq `bc' `ni' `nnames' "`eqspec'" "`eqnames'" + } + if "`margin'"!="" & `hasbVs' { + GetMarginals `bc' "`margin'" `"`meqs'"' // resets local hasmargin + } + if `hasbVs' { + ComputeCoefs `bc' `hasmargin' `"`coefnms'"' `level' + } + if `hastransform' & `hasbVs' { + if `"`transformpattern'"'!="" { + local transformthis: word `ni' of `transformpattern' + } + else local transformthis 1 + if `"`transformthis'"'=="1" { + TransformCoefs `bc' `"`coefnms'"' `"`transform'"' + } + } + if "`getbV'"!="" { + mat `bc' = `bc'[1...,3...] // remove b and var + } + } + local rc = _rc + } + + // get stats + if `rc'==0 { + capture noisily break { + if "`stats'" != "" { + GetStats "`stats'" `bbs' `ni' + if `hasbc'>0 & inlist(`"`e(cmd)'"', "reg3", "sureg", "mvreg") { + GetEQStats "`stats'" `bbs' `ni' `bc' + } + return add + } + } + local rc = _rc + } + + local depname: word 1 of `e(depvar)' + return local m`ni'_depname "`depname'" + + local title `"`e(estimates_title)'"' + if `"`title'"'=="" local title `"`e(_estimates_title)'"' // prior to Stata 10 + return local m`ni'_estimates_title `"`title'"' + + if "`name'" != "." { + *est_hold `name' `esample' + _est hold `name', estimates varname(_est_`name') + } + else { + if `estcycle' { + _est hold `hcurrent', restore nullok estsystem + } + } + } + + if `rc' { + exit `rc' + } + + if (c(stata_version)>=11) & (`hasbc'>0) { + mata: estout_omitted_and_base() // sets local hasbc + } + + if `hasbc'>0 { + mat coleq `bc' = `eqname' + if `"`rename'"'!="" { + RenameCoefs `bc' `"`rename'"' + } + if `hasbbc' { + _estout_mat_capp `bbc' : `bbc' `bc', miss(.z) cons ts + } + else { + mat `bbc' = `bc' + if `ni'>1 { // add previous empty models + mat `bc' = (1, `bc'[1,1...]) \ ( `bc'[1...,1], J(rowsof(`bc'), colsof(`bc'), .z)) + mat `bc' = `bc'[2...,2...] + forv nj = 1/`ni' { + if `nj'==`ni' continue + local eqname: word `nj' of `names' + if `"`eqname'"'=="." { + local eqname active + } + mat coleq `bc' = `eqname' + mat `bbc' = `bc', `bbc' + } + } + } + local hasbbc 1 + } + else { + if `hasbbc' { // add empty model if bbc exists + mat `bc' = `bbc'[1...,1..`ccols'] + mat `bc' = (1, `bc'[1,1...]) \ ( `bc'[1...,1], J(rowsof(`bc'), colsof(`bc'), .z)) + mat `bc' = `bc'[2...,2...] + mat coleq `bc' = `eqname' + mat `bbc' = `bbc', `bc' + } + } + } + + if `hasbbc' { + return matrix coefs = `bbc' + return scalar ccols = `ccols' + } + else { + return scalar ccols = 0 // indicates that r(coefs) is missing + } + if "`stats'" != "" { + return matrix stats = `bbs' + } + return local names `names' + return scalar nmodels = `ni' +end + +program _estout_mat_capp + // variant of mat_capp that is robust against blanks in coefficient names + if c(stata_version)<11 { // requires Stata 11 or newer + mat_capp `0' + exit + } + syntax anything [, * ] + gettoken m1 m3 : anything, parse(":") // mat1 + gettoken m2 m3 : m3, parse(":") // : + gettoken m2 m3 : m3 // mat2 + gettoken m3 : m3 // mat3 + local hasblanks 0 + mata: estout_rown_hasblanks("hasblanks", ("`m2'", "`m3'")) + if `hasblanks'==0 { + mat_capp `0' + exit + } + mata: estout_mat_capp("`m1'", ("`m2'", "`m3'")) +end + +program DroppedCoefs // identify dropped coeffficients + args bc + tempname tmp + mat `tmp' = `bc'[1..., 1] * 0 + mat coln `tmp' = "_dropped" + local r = rowsof(`bc') + forv i = 1/`r' { + if `bc'[`i',1]==0 & `bc'[`i',2]==0 { // b=0 and var=0 + mat `tmp'[`i',1] = 1 + } + } + mat `bc' = `bc', `tmp' +end + +program RenameCoefs + args bc rename + local Stata11 = cond(c(stata_version)>=11, "version 11:", "") + tempname tmp + local eqs: roweq `bc', q + local eqs: list clean eqs + local eqs: list uniq eqs + local newnames + foreach eq of local eqs { + mat `tmp' = `bc'[`"`eq':"',1] + QuotedRowNames `tmp' + local vars `"`value'"' + local rest `"`rename'"' + while (`"`rest'"'!="") { + gettoken from rest : rest + gettoken to rest : rest + if `"`from'`to'"'=="" continue + gettoken equ x : from, parse(:) + local equ: list clean equ + if `"`equ'"'==":" { // case 1: ":varname" + local equ + local x: list clean x + } + else if `"`x'"'=="" { // case 2: "varname" + local x `"`equ'"' + local equ + } + else { // case 3. "eqname:varname" + if `"`equ'"'=="_" local equ "__" + gettoken colon x : x, parse(:) + local x: list clean x + } + if `"`x'"'=="" { + di as err "invalid rename()" + exit 198 + } + if index(`"`to'"',":") | `"`to'"'=="" { + di as err "invalid rename()" + exit 198 + } + if `"`equ'"'!="" { + if `"`equ'"'!=`"`eq'"' continue // different equation + } + local x `"`"`x'"'"' + local x: list clean x + local vars: subinstr local vars `"`x'"' `"`"`to'"'"', word + } + local newnames `"`newnames'`vars' "' + } + `Stata11' mat rown `bc' = `newnames' +end + +// Source: est_table.ado version 1.1.4 09oct2008 (unmodified) +program MatchNames, rclass + args eqspec + + local eqspec : subinstr local eqspec ":" " ", all + local eqspec0 : subinstr local eqspec "#" "" , all + + local iterm 0 + gettoken term eqspec : eqspec0 , parse(",") + while "`term'" != "" { + local ++iterm + + // term = [name =] { # | #-list } + gettoken eqname oprest: term, parse("=") + gettoken op rest : oprest, parse("=") + if trim(`"`op'"') == "=" { + confirm name `eqname' + local term `rest' + } + else { + local eqname #`iterm' + } + local eqnames `eqnames' `eqname' + + if "`eqspec'" == "" { + continue, break + } + gettoken term eqspec: eqspec , parse(",") + assert "`term'" == "," + gettoken term eqspec: eqspec , parse(",") + } + + if `"`:list dups eqnames'"' != "" { + dis as err "duplicate matched equation names" + exit 198 + } + + return local eqspec `eqspec0' + return local eqnames `eqnames' +end + +// Source: est_table.ado version 1.1.4 09oct2008 +// 02oct2013: added -version 11: matrix roweq- to support new eqnames +program AdjustRowEq + args b ni nmodel eqspec eqnames + + local beqn : roweq `b', quote + local beqn : list clean beqn + local beq : list uniq beqn + + if `"`:list beq & eqnames'"' != "" { + dis as err "option equations() invalid" + dis as err "specified equation name already occurs in model `ni'" + exit 198 + } + + local iterm 0 + gettoken term eqspec : eqspec , parse(",") + while "`term'" != "" { + // dis as txt "term:|`term'|" + local ++iterm + + // term = [name =] { # | #-list } + gettoken eqname oprest: term, parse("=") + gettoken op rest : oprest, parse("=") + if trim(`"`op'"') == "=" { + local term `rest' + } + else { + local eqname #`iterm' + } + + local nword : list sizeof term + if !inlist(`nword', 1, `nmodel') { + dis as err "option equations() invalid" + dis as err "a term should consist of either 1 or `nmodel' equation numbers" + exit 198 + } + if `nword' > 1 { + local term : word `ni' of `term' + } + + if trim("`term'") != "." { + capt confirm integer number `term' + if _rc { + dis as err "option equations() invalid" + dis as err "`term' was found, while an integer equation number was expected" + exit 198 + } + if !inrange(`term',1,`:list sizeof beq') { + dis as err "option equations() invalid" + dis as err "equation number `term' for model `ni' out of range" + exit 198 + } + if `:list posof "`eqname'" in beq' != 0 { + dis as err "impossible to name equation `eqname'" + dis as err "you should provide (another) equation name" + exit 198 + } + + local beqn : subinstr local beqn /// + `"`:word `term' of `beq''"' /// + "`eqname'" , word all + } + + if "`eqspec'" == "" { + continue, break + } + gettoken term eqspec: eqspec , parse(",") + assert "`term'" == "," + gettoken term eqspec: eqspec , parse(",") + } + if c(stata_version)>=11 { // similar to RenameCoefs + version 11: matrix roweq `b' = `beqn' + } + else { + matrix roweq `b' = `beqn' + } +end + +// Source: est_table.ado version 1.1.4 09oct2008 (modified) +// Modification: returns string scalars in r(m`ni'_name) (and sets `bbs' = .y) +program GetStats, rclass + args stats bbs ni + tempname rank st V + local escalars : e(scalars) + local emacros : e(macros) + local is 0 + foreach stat of local stats { + local ++is + if inlist("`stat'", "aic", "bic", "rank") { + if "`hasrank'" == "" { + capt mat `V' = syminv(e(V)) + local rc = _rc + if `rc' == 0 { + scalar `rank' = colsof(`V') - diag0cnt(`V') + } + else if `rc' == 111 { + scalar `rank' = 0 + } + else { + // rc<>0; show error message + mat `V' = syminv(e(V)) + } + local hasrank 1 + } + if "`stat'" == "aic" { + scalar `st' = -2*e(ll) + 2*`rank' + } + else if "`stat'" == "bic" { + scalar `st' = -2*e(ll) + log(e(N)) * `rank' + } + else if "`stat'" == "rank" { + scalar `st' = `rank' + } + } + else { + if `:list stat in escalars' > 0 { + scalar `st' = e(`stat') + } + else if "`stat'"=="p" { + if `"`e(F)'"'!="" { + scalar `st' = Ftail(e(df_m), e(df_r), e(F)) + } + else if `"`e(chi2)'"'!="" { + scalar `st' = chi2tail(e(df_m), e(chi2)) + } + else { + scalar `st' = .z + } + } + else if `:list stat in emacros' > 0 { + scalar `st' = .y + capt return local m`ni'_`stat' `"`e(`stat')'"' // name might be too long + } + else { + scalar `st' = .z + } + } + mat `bbs'[`is',`ni'] = `st' + } +end + +program GetEQStats, rclass // eq-specific stats for reg3, sureg, and mvreg (sets `bbs' = .x) + args stats bbs ni bc + return add + tempname addrow + local ic "aic bic rank" + local eqs: roweq `bc', q + local eqs: list clean eqs + local eqs: list uniq eqs + local s 0 + foreach stat of local stats { + local ++s + if inlist(`"`stat'"', "aic", "bic", "rank") continue + if `bbs'[`s',`ni']<.y continue + local e 0 + local found 0 + foreach eq of local eqs { + local ++e + if e(cmd)=="mvreg" { + if "`stat'"=="p" local value: word `e' of `e(p_F)' + else local value: word `e' of `e(`stat')' + } + else if "`stat'"=="df_m" { + local value `"`e(`stat'`e')'"' + } + else { + local value `"`e(`stat'_`e')'"' + } + capture confirm number `value' + if _rc==0 { + local found 1 + local r = rownumb(`bbs', `"`eq':`stat'"') + if `r'>=. { + mat `addrow' = J(1, colsof(`bbs'), .z) + mat rown `addrow' = `"`eq':`stat'"' + mat `bbs' = `bbs' \ `addrow' + local r = rownumb(`bbs', `"`eq':`stat'"') + } + mat `bbs'[`r',`ni'] = `value' + } + } + if `found' { + if `bbs'[`s',`ni']==.y { + capt return local m`ni'_`stat' "" + } + mat `bbs'[`s',`ni'] = .x + } + } +end + +program CheckEqs + args coefs + tempname tmp + local j 0 + local bVs "b _star _sign _sigsign" + local seqmerge 0 + local hasseqs 0 + foreach coefn in `coefs' { + local ++j + gettoken coef row : coefn + gettoken transpose row : row + gettoken row : row, q + if `"`coef'"'=="b" & `j'==1 { + capt confirm mat e(`coef') + if _rc continue + mat `tmp' = e(`coef') + local eqs: coleq `tmp', q + if `:list posof "_" in eqs'==0 { + local seqmerge 1 + } + else continue, break + } + if `:list coef in bVs' continue + capt confirm mat e(`coef') + if _rc continue + mat `tmp' = e(`coef') + if `transpose' { + mat `tmp' = `tmp'' + } + if `"`row'"'=="" local row 1 + capt confirm number `row' + if _rc { + local row = rownumb(`tmp',`row') + } + if `row'>rowsof(`tmp') continue + local eqs: coleq `tmp', q + if `:list posof "_" in eqs' { + local eqs: list uniq eqs + local eqs: list clean eqs + if `"`eqs'"'!="_" { // => contains "_" but also others + local local seqmerge 0 + continue, break + } + else local hasseqs 1 + } + else { + local seqmerge 1 + } + } + if `hasseqs'==0 local seqmerge 0 + c_local seqmerge `seqmerge' +end + +program SubstEmptyEqname // replace empty equation name "_" by "__" + args M + local eqs: roweq `M', q + if `: list posof "_" in eqs' { + local eqs: subinstr local eqs `""_""' `""__""', all + mat roweq `M' = `eqs' + } +end + +program RestoreEmptyEqnames // replace equation name "__" by "_" + args M + local eqs: roweq `M', q + if `: list posof "__" in eqs' { + local eqs: subinstr local eqs `""__""' `""_""', all + mat roweq `M' = `eqs' + } +end + +program GetCoefs + args bc seqmerge coefs + tempname tmp + local hasbc 0 + local j 0 + local bVs "b _star _sign _sigsign" + foreach coefn of local coefs { + local ++j + gettoken coef row : coefn + gettoken transpose row : row + gettoken row : row, q + local isinbVs: list coef in bVs + if `isinbVs' & `j'>2 { + if `hasbc'==0 continue + mat `bc' = `bc', J(rowsof(`bc'),1, .y) + continue + } + if `j'==2 & `"`coef'"'=="var" { + local isinbVs 1 + capt mat `tmp' = vecdiag(e(V)) + if _rc { + capt confirm mat e(se) + if _rc==0 { + mat `tmp' = e(se) + forv i = 1/`=colsof(`tmp')' { + mat `tmp'[1, `i'] = `tmp'[1, `i']^2 + } + } + } + } + else { + capt confirm mat e(`coef') + if _rc==0 { + mat `tmp' = e(`coef') + } + } + if _rc { + if `hasbc'==0 continue + mat `bc' = `bc', J(rowsof(`bc'),1, .y) + continue + } + if `isinbVs'==0 { // => not b or var + if `transpose' { + mat `tmp' = `tmp'' + } + if `"`row'"'=="" local row 1 + capt confirm number `row' + if _rc { + local row = rownumb(`tmp',`row') + } + if `row'>rowsof(`tmp') { + if `hasbc'==0 continue + mat `bc' = `bc', J(rowsof(`bc'),1, .y) + continue + } + mat `tmp' = `tmp'[`row', 1...] + } + local bcols = colsof(`tmp') + if `bcols'==0 { + if `hasbc'==0 continue + mat `bc' = `bc', J(rowsof(`bc'),1, .y) + continue + } + mat `tmp' = `tmp'' + if `seqmerge' & `isinbVs'==0 { + local eqs: roweq `tmp', q + local eqs: list uniq eqs + local eqs: list clean eqs + if `"`eqs'"'=="_" { + local seqmergejs `seqmergejs' `j' + local seqmergecoefs `"`seqmergecoefs'`"`coefn'"' "' + if `hasbc'==0 continue + mat `bc' = `bc', J(rowsof(`bc'),1, .y) + continue + } + } + if `hasbc'==0 { + mat `bc' = `tmp' + local hasbc 1 + if `j'>1 { + mat `bc' = `bc', J(`bcols',`j'-1, .y), `bc' + mat `bc' = `bc'[1...,2...] + } + } + else { + _estout_mat_capp `bc' : `bc' `tmp', miss(.y) cons ts + } + } + foreach coefn of local seqmergecoefs { + gettoken j seqmergejs : seqmergejs + gettoken coef row : coefn + gettoken transpose row : row + gettoken row : row, q + mat `tmp' = e(`coef') + if `transpose' { + mat `tmp' = `tmp'' + } + if `"`row'"'=="" local row 1 + capt confirm number `row' + if _rc { + local row = rownumb(`tmp',`row') + } + mat `tmp' = `tmp'[`row', 1...] + SEQMerge `bc' `j' `tmp' + } + c_local hasbc `hasbc' +end + +program SEQMerge + args bc j x + tempname tmp + local r = rowsof(`bc') + forv i = 1/`r' { + mat `tmp' = `bc'[`i',1...] + local v: rown `tmp' + local c = colnumb(`x', `"`v'"') + if `c'<. { + mat `bc'[`i',`j'] = `x'[1,`c'] + } + } +end + +program ComputeCoefs + args bc hasmargin coefs level + local bVs1 "b _star _sign _sigsign" + local bVs2 "se var t z p ci_l ci_u" + local c = colsof(`bc') + forv j = 3/`c' { + gettoken v coefs : coefs + if `"`v'"'=="" continue, break + if `: list v in bVs1' { + ComputeCoefs_`v' `bc' `j' `level' + continue + } + if `: list v in bVs2' { + if `hasmargin' { + ComputeCoefs_`v' `bc' `j' `level' + continue + } + capt confirm matrix e(`v') + if _rc { + ComputeCoefs_`v' `bc' `j' `level' + } + } + } +end + +program CopyColFromTo + args m from to cname + tempname tmp + mat `tmp' = `m'[1...,`from'] + mat coln `tmp' = `cname' + local c = colsof(`m') + if `to'==`c' { + mat `m' = `m'[1...,1..`c'-1], `tmp' + exit + } + mat `m' = `m'[1...,1..`to'-1], `tmp', `m'[1...,`to'+1..`c'] +end + +program ComputeCoefs_b + args bc j + CopyColFromTo `bc' 1 `j' "b" +end + +program ComputeCoefs_se + args bc j + local r = rowsof(`bc') + forv i = 1/`r' { + local var `bc'[`i',2] + local res `bc'[`i',`j'] + if `var'>=. mat `res' = `var' + else if `var'==0 mat `res' = . + else mat `res' = sqrt(`var') + } +end + +program ComputeCoefs_var + args bc j + CopyColFromTo `bc' 2 `j' "var" +end + +program ComputeCoefs_t + args bc j + local r = rowsof(`bc') + forv i = 1/`r' { + local b `bc'[`i',1] + local var `bc'[`i',2] + local res `bc'[`i',`j'] + if `b'>=. mat `res' = `b' + else if `var'>=. mat `res' = `var' + else mat `res' = `b'/sqrt(`var') + } +end + +program ComputeCoefs_z + ComputeCoefs_t `0' +end + +program ComputeCoefs_p + args bc j + local r = rowsof(`bc') + local df_r = e(df_r) + if `"`e(mi)'"'=="mi" { // get df_mi + capt confirm matrix e(df_mi) + if _rc==0 { + tempname dfmi + matrix `dfmi' = e(df_mi) + } + } + if c(stata_version)<10 local dfmax 1e12 + else local dfmax 2e17 + forv i = 1/`r' { + local b `bc'[`i',1] + local var `bc'[`i',2] + local res `bc'[`i',`j'] + if `b'>=. mat `res' = `b' + else if `var'>=. mat `res' = `var' + else if "`dfmi'"!="" { + if `dfmi'[1,`i']<=`dfmax' { + mat `res' = ttail(`dfmi'[1,`i'],abs(`b'/sqrt(`var'))) * 2 + } + else { + mat `res' = (1 - norm(abs(`b'/sqrt(`var')))) * 2 + } + } + else if `df_r'<=`dfmax' mat `res' = ttail(`df_r',abs(`b'/sqrt(`var'))) * 2 + else mat `res' = (1 - norm(abs(`b'/sqrt(`var')))) * 2 + } +end + +program ComputeCoefs_ci_l + args bc j + ComputeCoefs_ci - `0' +end + +program ComputeCoefs_ci_u + args bc j + ComputeCoefs_ci + `0' +end + +program ComputeCoefs_ci + args sign bc j level + local r = rowsof(`bc') + local df_r = e(df_r) + if `"`e(mi)'"'=="mi" { // get df_mi + capt confirm matrix e(df_mi) + if _rc==0 { + tempname dfmi + matrix `dfmi' = e(df_mi) + } + } + if c(stata_version)<10 local dfmax 1e12 + else local dfmax 2e17 + forv i = 1/`r' { + local b `bc'[`i',1] + local var `bc'[`i',2] + local res `bc'[`i',`j'] + if `b'>=. mat `res' = `b' + else if `var'>=. mat `res' = `var' + else if "`dfmi'"!="" { + if `dfmi'[1,`i']<=`dfmax' { + mat `res' = `b' `sign' /// + invttail(`dfmi'[1,`i'],(100-`level')/200) * sqrt(`var') + } + else { + mat `res' = `b' `sign' /// + invnorm(1-(100-`level')/200) * sqrt(`var') + } + } + else if `df_r'<=`dfmax' /// + mat `res' = `b' `sign' invttail(`df_r',(100-`level')/200) * sqrt(`var') + else mat `res' = `b' `sign' invnorm(1-(100-`level')/200) * sqrt(`var') + } +end + +program ComputeCoefs__star + args bc j + CopyColFromTo `bc' 1 `j' "_star" +end + +program ComputeCoefs__sign + args bc j + CopyColFromTo `bc' 1 `j' "_sign" +end + +program ComputeCoefs__sigsign + args bc j + CopyColFromTo `bc' 1 `j' "_sigsign" +end + +program GetMarginals + args bc margin meqs + tempname D dfdx + mat `D' = `bc'[1...,1]*0 + mat coln `D' = "_dummy" + local type `e(Xmfx_type)' + if "`type'"!="" { + mat `dfdx' = e(Xmfx_`type') + capture confirm matrix e(Xmfx_se_`type') + if _rc==0 { + mat `dfdx' = `dfdx' \ e(Xmfx_se_`type') + } + if "`e(Xmfx_discrete)'"=="discrete" local dummy `e(Xmfx_dummy)' + } + else if "`e(cmd)'"=="dprobit" { + mat `dfdx' = e(dfdx) \ e(se_dfdx) + local dummy `e(dummy)' + } + else if "`e(cmd)'"=="tobit" & inlist("`margin'","u","c","p") { + capture confirm matrix e(dfdx_`margin') + if _rc==0 { + mat `dfdx' = e(dfdx_`margin') \ e(se_`margin') + } + local dummy `e(dummy)' + } + else if "`e(cmd)'"=="truncreg" { + capture confirm matrix e(dfdx) + if _rc==0 { + tempname V se + mat `V' = e(V_dfdx) + forv k= 1/`=rowsof(`V')' { + mat `se' = nullmat(`se') , sqrt(`V'[`k',`k']) + } + mat `dfdx' = e(dfdx) \ `se' + } + } + capture confirm matrix `dfdx' + if _rc==0 { + QuotedRowNames `bc' + local rnames `"`value'"' + if `"`meqs'"'!="" local reqs: roweq `bc', q + local i 1 + foreach row of loc rnames { + if `"`meqs'"'!="" { + local eq: word `i' of `reqs' + } + local col = colnumb(`dfdx',"`row'") + if `col'>=. | !`:list eq in meqs' { + mat `bc'[`i',1] = .y + mat `bc'[`i',2] = .y + mat `D'[`i',1] = .y + } + else { + mat `bc'[`i',1] =`dfdx'[1,`col'] + mat `bc'[`i',2] = (`dfdx'[2,`col'])^2 + if "`:word `col' of `dummy''"=="1" mat `D'[`i',1] = 1 + } + local ++i + } + c_local hasmargin 1 + } + mat `bc' = `bc', `D' +end + +program TransformCoefs + args bc coefs transform + local c = colsof(`bc') + forv j = 3/`c' { + gettoken v coefs : coefs + if inlist("`v'", "b", "ci_l", "ci_u") { + _TransformCoefs `bc' `j' 0 "" "" `"`transform'"' + } + else if "`v'"=="se" { + _TransformCoefs `bc' `j' 1 "abs" "" `"`transform'"' + } + else if "`v'"=="var" { + _TransformCoefs `bc' `j' 1 "" "^2" `"`transform'"' + } + } +end + +program _TransformCoefs + args bc j usedf abs sq transform + local r = rowsof(`bc') + gettoken coef rest : transform + gettoken f rest : rest + gettoken df rest : rest + while `"`coef'`f'`df'"'!="" { + if `"`df'`rest'"'=="" { // last element of list may be without coef + local df `"`f'"' + local f `"`coef'"' + local coef "" + } + local trcoefs `"`trcoefs'`"`coef'"' "' + if `usedf' { + local trs `"`trs'`"`df'"' "' + } + else { + local trs `"`trs'`"`f'"' "' + } + gettoken coef rest : rest + gettoken f rest : rest + gettoken df rest : rest + } + local trs : subinstr local trs "@" "\`b'", all + forv i = 1/`r' { + gettoken coef coefrest : trcoefs + gettoken tr trrest : trs + while `"`coef'`tr'"'!="" { + MatchCoef `"`coef'"' `bc' `i' + if `match' { + if `usedf' { + local b `bc'[`i',1] + local res `bc'[`i',`j'] + if `res'<. { + mat `res' = `res' * `abs'(`tr')`sq' + } + } + else { + local b `bc'[`i',`j'] + if `b'<. { + mat `b' = (`tr') + } + } + continue, break + } + gettoken coef coefrest : coefrest + gettoken tr trrest : trrest + } + } +end + +program MatchCoef + args eqx b i + if inlist(trim(`"`eqx'"'),"","*") { + c_local match 1 + exit + } + tempname tmp + mat `tmp' = `b'[`i',1...] + local eqi: roweq `tmp' + local xi: rown `tmp' + gettoken eq x : eqx, parse(:) + local eq: list clean eq + if `"`eq'"'==":" { // case 1: ":[varname]" + local x: list clean x + local eq + } + else if `"`x'"'=="" { // case 2: "varname" + local x `"`eq'"' + local eq + } + else { // case 3. "eqname:[varname]" + if `"`eq'"'=="_" local eq "__" + gettoken colon x : x, parse(:) + local x: list clean x + } + if `"`eq'"'=="" local eq "*" + if `"`x'"'=="" local x "*" + c_local match = match(`"`eqi'"', `"`eq'"') & match(`"`xi'"', `"`x'"') +end + +program NumberMlabels + args M mlabels + forv m = 1/`M' { + local num "(`m')" + local lab: word `m' of `macval(mlabels)' + if `"`macval(lab)'"'!="" { + local lab `"`num' `macval(lab)'"' + } + else local lab `num' + local labels `"`macval(labels)'`"`macval(lab)'"' "' + } + c_local mlabels `"`macval(labels)'"' +end + +program ModelEqCheck + args B eq m ccols + tempname Bsub + local a = (`m'-1)*`ccols'+1 + local b = `a' + `ccols'-1 + mat `Bsub' = `B'["`eq':",`a'..`b'] + local R = rowsof(`Bsub') + local value 0 + forv c = 1/`ccols' { + forv r = 1/`R' { + if `Bsub'[`r',`c']<. { + local value 1 + continue, break + } + } + } + c_local value `value' +end + +program Add2Vblock + args block col + foreach v of local col { + gettoken row block: block + local row "`row' `v'" + local row: list retok row + local vblock `"`vblock'"`row'" "' + } + c_local vblock `"`vblock'"' +end + +program CountNofEqs + args ms es + local m0 0 + local e0 0 + local i 0 + local eqs 0 + foreach m of local ms { + local ++i + local e: word `i' of `es' + if `m'!=`m0' | `e'!=`e0' { + local ++eqs + } + local m0 `m' + local e0 `e' + } + c_local value `eqs' +end + +program InsertAtVariables + args value type span M E width hline rtf rtfrowdefbrdrt rtfrowdefbrdrb rtfrowdef rtfemptyrow /// + title note discrete starlegend + if `type'==1 local atvars span + else { + local atvars span M E width hline + if `rtf' local atvars `atvars' rtfrowdefbrdrt rtfrowdefbrdrb rtfrowdef rtfemptyrow + if `type'!=2 local atvars `atvars' title note discrete starlegend + } + foreach atvar of local atvars { + StableSubinstr value `"`macval(value)'"' "@`atvar'" `"`macval(`atvar')'"' all + } + c_local value `"`macval(value)'"' +end + +program Abbrev + args width value abbrev + if c(stata_version)>=14 { + local substr udsubstr + local length udstrlen + } + else { + local substr substr + local length length + } + if "`abbrev'"!="" { + if `width'>32 { + local value = `substr'(`"`macval(value)'"',1,`width') + } + else if `width'>0 { + if `length'(`"`macval(value)'"')>`width' { + local value = abbrev(`"`macval(value)'"',`width') + } + } + } + c_local value `"`macval(value)'"' +end + +program MgroupsPattern + args mrow pattern + local i 0 + local m0 0 + local j 0 + foreach m of local mrow { + if `m'>=. { + local newpattern `newpattern' . + continue + } + if `m'!=`m0' { + local p: word `++i' of `pattern' + if `i'==1 local p 1 + if "`p'"=="1" local j = `j' + 1 + } + local newpattern `newpattern' `j' + local m0 `m' + } + c_local mgroupspattern `newpattern' +end + +program WriteCaption + args file delimiter stardetach row rowtwo labels starsrow span /// + abbrev colwidth delwidth starwidth repeat prefix suffix haslabcol2 + local c 0 + local nspan 0 + local c0 = 2 + `haslabcol2' + local spanwidth -`delwidth' + local spanfmt + local ncolwidth: list sizeof colwidth + foreach r of local row { + local rtwo: word `++c' of `rowtwo' + local colwidthj: word `=1+mod(`c'-1,`ncolwidth')' of `colwidth' + if `colwidthj'>0 local colfmt "%`colwidthj's" + else local colfmt + if "`r'"=="." { + local ++c0 + file write `file' `macval(delimiter)' `colfmt' (`""') + } + else if `"`span'"'=="" { + if ( "`r'"!="`lastr'" | "`rtwo'"!="`lastrtwo'" | `"`rowtwo'"'=="" ) { + local value: word `r' of `macval(labels)' + Abbrev `colwidthj' `"`macval(value)'"' "`abbrev'" + local value `"`macval(prefix)'`macval(value)'`macval(suffix)'"' + InsertAtVariables `"`macval(value)'"' 1 "1" + } + else local value + if `:length local value'<245 { + local value: di `colfmt' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + if `:word `c' of `starsrow''==1 { + file write `file' `macval(stardetach)' _skip(`starwidth') + } + local lastr "`r'" + local lastrtwo "`rtwo'" + } + else { + local ++nspan + local spanwidth=`spanwidth'+`colwidthj'+`delwidth' + if `:word `c' of `starsrow''==1 { + local spanwidth = `spanwidth' + `starwidth' + if `"`macval(stardetach)'"'!="" { + local ++nspan + local spanwidth = `spanwidth' + `delwidth' + } + } + local nextrtwo: word `=`c'+1' of `rowtwo' + local nextr: word `=`c'+1' of `row' + if "`r'"!="." & /// + ("`r'"!="`nextr'" | "`rtwo'"!="`nextrtwo'" | `"`rowtwo'"'=="") { + local value: word `r' of `macval(labels)' + Abbrev `spanwidth' `"`macval(value)'"' "`abbrev'" + local value `"`macval(prefix)'`macval(value)'`macval(suffix)'"' + InsertAtVariables `"`macval(value)'"' 1 "`nspan'" + if `spanwidth'>0 local spanfmt "%-`spanwidth's" + if `:length local value'<245 { + local value: di `spanfmt' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + InsertAtVariables `"`macval(repeat)'"' 1 "`c0'-`=`c0'+`nspan'-1'" + local repeatlist `"`macval(repeatlist)'`macval(value)'"' + local c0 = `c0' + `nspan' + local nspan 0 + local spanwidth -`delwidth' + } + } + } + c_local value `"`macval(repeatlist)'"' +end + +program WriteBegin + args file pre begin post + foreach line of local pre { + file write `file' `newline' `"`macval(line)'"' + local newline _n + } + file write `file' `macval(begin)' `macval(post)' +end + +program WriteEnd + args file end post post2 + file write `file' `macval(end)' + WriteStrLines `"`file'"' `"`macval(post)'"' + WriteStrLines `"`file'"' `"`macval(post2)'"' + file write `file' _n +end + +program WriteStrLines + args file lines + foreach line of local lines { + file write `file' `newline' `"`macval(line)'"' + local newline _n + } +end + +program WriteEqrow + args file delimiter stardetach value row span vwidth fmt_v /// + abbrev mwidth delwidth starwidth prefix suffix /// + haslabcol2 labcolwidth fmt_l2 + local nspan 1 + local spanwidth `vwidth' + local spanfmt + local c 0 + local nmwidth: list sizeof mwidth + if `"`span'"'=="" { + Abbrev `vwidth' `"`macval(value)'"' "`abbrev'" + local value `"`macval(prefix)'`macval(value)'`macval(suffix)'"' + InsertAtVariables `"`macval(value)'"' 1 "1" + if `:length local value'<245 { + local value: di `fmt_v' `"`macval(value)'"' + } + file write `file' `"`macval(value)'"' + if `haslabcol2' { + file write `file' `macval(delimiter)' `fmt_l2' ("") + } + foreach r of local row { + local mwidthj: word `=1+mod(`c++',`nmwidth')' of `mwidth' + if `mwidthj'>0 local fmt_m "%`mwidthj's" + else local fmt_m + file write `file' `macval(delimiter)' `fmt_m' ("") + if `r'==1 { + file write `file' `macval(stardetach)' _skip(`starwidth') + } + } + } + else { + if `haslabcol2' { + local ++nspan + local spanwidth = `spanwidth' + `delwidth' + `labcolwidth' + } + foreach r of local row { + local mwidthj: word `=1+mod(`c++',`nmwidth')' of `mwidth' + local ++nspan + local spanwidth = `spanwidth' + `delwidth' + `mwidthj' + if `r'==1 { + local spanwidth = `spanwidth' + `starwidth' + if `"`macval(stardetach)'"'!="" { + local ++nspan + local spanwidth = `spanwidth' + `delwidth' + } + } + } + Abbrev `spanwidth' `"`macval(value)'"' "`abbrev'" + local value `"`macval(prefix)'`macval(value)'`macval(suffix)'"' + InsertAtVariables `"`macval(value)'"' 1 "`nspan'" + if `spanwidth'>0 local spanfmt "%-`spanwidth's" + if `:length local value'<245 { + local value: di `spanfmt' `"`macval(value)'"' + } + file write `file' `"`macval(value)'"' + } +end + +prog WriteStrRow + args file mrow eqrow neq labels delimiter stardetach starsrow /// + abbrev colwidth delwidth starwidth + local c 0 + local ncolwidth: list sizeof colwidth + foreach mnum of local mrow { + local eqnum: word `++c' of `eqrow' + local colwidthj: word `=1+mod(`c'-1,`ncolwidth')' of `colwidth' + if `colwidthj'>0 local colfmt "%`colwidthj's" + else local colfmt + if "`mnum'"=="." { + file write `file' `macval(delimiter)' `colfmt' (`""') + continue + } + if ( "`mnum'"!="`lastmnum'" | "`eqnum'"!="`lasteqnum'" ) { + local value: word `=(`mnum'-1)*`neq'+`eqnum'' of `macval(labels)' + Abbrev `colwidthj' `"`macval(value)'"' "`abbrev'" + } + else local value + if `:length local value'<245 { + local value: di `colfmt' `"`macval(value)'"' + } + file write `file' `macval(delimiter)' `"`macval(value)'"' + if `:word `c' of `starsrow''==1 { + file write `file' `macval(stardetach)' _skip(`starwidth') + } + local lastmnum "`mnum'" + local lasteqnum "`eqnum'" + } +end + +program VarInList + args var unstack eqvar eq list + local value + local L: word count `macval(list)' + forv l = 1(2)`L' { + local lvar: word `l' of `macval(list)' + local lab: word `=`l'+1' of `macval(list)' + if "`unstack'"!="" { + if `"`var'"'==`"`lvar'"' { + local value `"`macval(lab)'"' + continue, break + } + } + else { + if substr(`"`lvar'"', 1, 2)=="_:" local lvar `"_`lvar'"' + if inlist(`"`lvar'"',`"`var'"',`"`eqvar'"',`"`eq':"') { + local value `"`macval(lab)'"' + continue, break + } + } + } + c_local value `"`macval(value)'"' +end + +program vFormat + args value fmt lz dmarker msign par + if substr(`"`fmt'"',1,1)=="a" { + SignificantDigits `fmt' `value' + } + else { + capt confirm integer number `fmt' + if !_rc { + local fmt %`=`fmt'+9'.`fmt'f + } + } + else if `"`fmt'"'=="%g" | `"`fmt'"'=="g" local fmt "%9.0g" + else if substr(`"`fmt'"',1,1)!="%" { + di as err `"`fmt': invalid format"' + exit 198 + } + local value: di `fmt' `value' + local value: list retok value + if "`lz'"=="" { + if index("`value'","0.")==1 | index("`value'","-0.") { + local value: subinstr local value "0." "." + } + } + if `"`macval(dmarker)'"'!="" { + if "`: set dp'"=="comma" local dp , + else local dp . + local val: subinstr local value "`dp'" `"`macval(dmarker)'"' + } + else local val `"`value'"' + if `"`msign'"'!="" { + if index("`value'","-")==1 { + local val: subinstr local val "-" `"`macval(msign)'"' + } + } + if `"`par'"'!="" { + tokenize `"`macval(par)'"' + local val `"`macval(1)'`macval(val)'`macval(2)'"' + } + c_local value `"`macval(val)'"' +end + +program SignificantDigits // idea stolen from outreg2.ado + args fmt value + local d = substr("`fmt'", 2, .) + if `"`d'"'=="" local d 3 + capt confirm integer number `d' + if _rc { + di as err `"`fmt': invalid format"' + exit 198 + } +// missing: format does not matter + if `value'>=. local fmt "%9.0g" +// integer: print no decimal places + else if (`value'-int(`value'))==0 { + local fmt "%12.0f" + } +// value in (-1,1): display up to 9 decimal places with d significant +// digits, then switch to e-format with d-1 decimal places + else if abs(`value')<1 { + local right = -int(log10(abs(`value'-int(`value')))) // zeros after dp + local dec = max(1,`d' + `right') + if `dec'<=9 { + local fmt "%12.`dec'f" + } + else { + local fmt "%12.`=min(9,`d'-1)'e" + } + } +// |values|>=1: display d+1 significant digits or more with at least one +// decimal place and up to nine digits before the decimal point, then +// switch to e-format + else { + local left = int(log10(abs(`value'))+1) // digits before dp + if `left'<=9 { + local fmt "%12.`=max(1,`d' - `left' + 1)'f" + } + else { + local fmt "%12.0e" // alternatively: "%12.`=min(9,`d'-1)'e" + } + } + c_local fmt "`fmt'" +end + +program Stars + args starlevels P + if inrange(`P',0,1) { + local nstar: word count `macval(starlevels)' + forv i=1(2)`nstar' { + local istarsym: word `i' of `macval(starlevels)' + local istar: word `=`i'+1' of `macval(starlevels)' + if `istar'<=`P' continue, break + local value "`macval(istarsym)'" + } + } + c_local value `"`macval(value)'"' +end + +program CellStars + args starlevels P par + Stars `"`macval(starlevels)'"' `P' + if `"`par'"'!="" { + tokenize `"`macval(par)'"' + local value `"`macval(1)'`macval(value)'`macval(2)'"' + } + c_local value `"`macval(value)'"' +end + +prog MakeSign + args value msign par starlevels P + if "`P'"!="" { + local factor = 0 + while 1 { + gettoken istar starlevels : starlevels + gettoken istar starlevels : starlevels + if `"`istar'"'=="" continue, break + if `P'<`istar' local factor = `factor' + 1 + else if `istar'==1 local factor = 1 + } + } + else local factor 1 + if `"`macval(msign)'"'=="" local msign "-" + if `value'<0 { + local val: di _dup(`factor') `"`macval(msign)'"' + } + else if `value'==0 local val: di _dup(`factor') "0" + else if `value'>0 & `value'<. local val: di _dup(`factor') "+" + else local val `value' + if `"`par'"'!="" { + tokenize `"`macval(par)'"' + local val `"`macval(1)'`macval(val)'`macval(2)'"' + } + c_local value `"`macval(val)'"' +end + +program DropOrKeep + args type b spec // type=0: drop; type=1: keep + capt confirm matrix `b' + if _rc { + exit + } + tempname res bt + local R = rowsof(`b') + forv i=1/`R' { + local hit 0 + mat `bt' = `b'[`i',1...] + foreach sp of local spec { + if rownumb(`bt', `"`sp'"')==1 { + local hit 1 + continue, break + } + } + if `hit'==`type' mat `res' = nullmat(`res') \ `bt' + } + capt mat drop `b' + capt mat rename `res' `b' +end + +program Order + args b spec + capt confirm matrix `b' + if _rc { + exit + } + tempname bt res + local eqlist: roweq `b', q + local eqlist: list uniq eqlist + mat `bt' = `b' + gettoken spi rest : spec + while `"`spi'"'!="" { + gettoken spinext rest : rest + if !index(`"`spi'"',":") { + local vars `"`vars'`"`spi'"' "' + if `"`spinext'"'!="" & !index(`"`spinext'"',":") { + local spi `"`spinext'"' + continue + } + foreach eq of local eqlist { + foreach var of local vars { + local splist `"`splist'`"`eq':`var'"' "' + } + local splist `"`splist'`"`eq':"' "' // rest + } + local vars + } + else local splist `"`spi'"' + gettoken sp splist : splist + while `"`sp'"'!="" { + local isp = rownumb(`bt', "`sp'") + if `isp' >= . { + gettoken sp splist : splist + continue + } + while `isp' < . { + mat `res' = nullmat(`res') \ `bt'[`isp',1...] + local nb = rowsof(`bt') + if `nb' == 1 { // no rows left in `bt' + capt mat drop `b' + capt mat rename `res' `b' + exit + } + if `isp' == 1 { + mat `bt' = `bt'[2...,1...] + } + else if `isp' == `nb' { + mat `bt' = `bt'[1..`=`nb'-1',1...] + } + else { + mat `bt' = `bt'[1..`=`isp'-1',1...] \ `bt'[`=`isp'+1'...,1...] + } + local isp = rownumb(`bt', "`sp'") + } + gettoken sp splist : splist + } + local spi `"`spinext'"' + } + capt mat `res' = nullmat(`res') \ `bt' + capt mat drop `b' + capt mat rename `res' `b' +end + +prog MakeQuotedFullnames + args names eqs + foreach name of local names { + gettoken eq eqs : eqs + local value `"`value'`"`eq':`name'"' "' + } + c_local value: list clean value +end + +program define QuotedRowNames + args matrix + capt confirm matrix `matrix' + if _rc { + c_local value "" + exit + } + tempname extract + if substr(`"`matrix'"',1,2)=="r(" { + local matrix0 `"`matrix'"' + tempname matrix + mat `matrix' = `matrix0' + } + local R = rowsof(`matrix') + forv r = 1/`R' { + mat `extract' = `matrix'[`r',1...] + local name: rownames `extract' + local value `"`value'`"`name'"' "' + } + c_local value: list clean value +end + +prog EqReplaceCons + args names eqlist eqlabels varlabels + local skip 0 + foreach v of local varlabels { + if `skip' { + local skip 0 + continue + } + local vlabv `"`vlabv'`"`v'"' "' + local skip 1 + } + local deqs: list dups eqlist + local deqs: list uniq deqs + local i 0 + foreach eq of local eqlist { + local ++i + if `"`eq'"'!=`"`last'"' { + gettoken eqlab eqlabels : eqlabels + } + local last `"`eq'"' + if `:list eq in deqs' | `"`eq'"'=="__" continue + local name: word `i' of `names' + local isinvlabv: list posof `"`eq':`name'"' in vlabv + if `"`name'"'=="_cons" & `isinvlabv'==0 { + local value `"`value'`space'`"`eq':`name'"' `"`eqlab'"'"' + local space " " + } + } + c_local value `"`value'"' +end + +prog UniqEqsAndDims + local n 0 + foreach el of local 1 { + if `"`macval(el)'"'!=`"`macval(last)'"' { + if `n'>0 local eqsdims "`eqsdims' `n'" + local eqs `"`macval(eqs)' `"`macval(el)'"'"' + local n 0 + } + local ++n + local last `"`macval(el)'"' + } + local eqsdims "`eqsdims' `n'" + c_local eqsdims: list clean eqsdims + c_local eqs: list clean eqs +end + +prog RerrangeEqs + args B eqlist eqs + local equ: list uniq eqlist + if `: list sizeof equ'==`: list sizeof eqs' exit // equations are in order + tempname C + foreach eq of local equ { + local i 0 + foreach eqi of local eqlist { + local ++i + if `"`eq'"'!="`eqi'" continue + mat `C' = nullmat(`C') \ `B'[`i',1...] + } + } + matrix drop `B' + matrix rename `C' `B' + local eqlist: roweq `B', q + local eqlist: list clean eqlist + UniqEqsAndDims `"`eqlist'"' + c_local eqlist `"`eqlist'"' + c_local eqs `"`eqs'"' + c_local eqsdims `"`eqsdims'"' +end + +prog InsertAtCols + args colnums row symb + if `"`symb'"'=="" local symb . + gettoken c rest : colnums + local i 0 + foreach r of local row { + local ++i + while `"`c'"'!="" { + if `c'<=`i' { + local value `"`value' `symb'"' + gettoken c rest : rest + } + else continue, break + } + local value `"`value' `"`r'"'"' + } + while `"`c'"'!="" { + local value `"`value' `symb'"' + gettoken c rest : rest + } + c_local value: list clean value +end + +prog GetVarnamesFromOrder + foreach sp of local 1 { + if index(`"`sp'"', ":") { + gettoken trash sp: sp, parse(:) + if `"`trash'"'!=":" { + gettoken trash sp: sp, parse(:) + } + } + local value `"`value'`space'`sp'"' + local space " " + } + c_local value `"`value'"' +end + +prog ParseIndicateOpts + syntax [anything(equalok)] [, Labels(str asis) ] + gettoken tok rest : anything, parse(" =") + while `"`macval(tok)'"'!="" { + if `"`macval(tok)'"'=="=" { + local anything `"`"`macval(anything)'"'"' + continue, break + } + gettoken tok rest : rest, parse(" =") + } + c_local indicate `"`macval(anything)'"' + c_local indicatelabels `"`macval(labels)'"' +end + +prog ProcessIndicateGrp + args i B nmodels ccols unstack yesno indicate + gettoken yes no : yesno + gettoken no : no + gettoken tok rest : indicate, parse(=) + while `"`macval(tok)'"'!="" { + if `"`macval(rest)'"'=="" { + local vars `"`indicate'"' + continue, break + } + if `"`macval(tok)'"'=="=" { + local vars `"`rest'"' + continue, break + } + local name `"`macval(name)'`space'`macval(tok)'"' + local space " " + gettoken tok rest : rest, parse(=) + } + if `"`macval(name)'"'=="" { + local name: word 1 of `"`vars'"' + } + ExpandEqVarlist `"`vars'"' `B' + local evars `"`value'"' + IsInModels `B' `nmodels' `ccols' "`unstack'" `"`macval(yes)'"' `"`macval(no)'"' `"`evars'"' + local lbls `"`macval(value)'"' + DropOrKeep 0 `B' `"`evars'"' + c_local indicate`i'name `"`macval(name)'"' + c_local indicate`i'lbls `"`macval(lbls)'"' + c_local indicate`i'eqs `"`eqs'"' +end + +prog IsInModels + args B nmodels ccols unstack yes no vars + capt confirm matrix `B' + if _rc { + forv i = 1/`nmodels' { + local lbls `"`macval(lbls)' `"`macval(no)'"'"' + } + c_local value `"`macval(lbls)'"' + if `"`unstack'"'!="" { + c_local eqs "__" + } + exit + } + local eqs: roweq `B', q + local eqs: list uniq eqs + tempname Bt Btt Bttt + forv j = 1/`nmodels' { + local stop 0 + mat `Bt' = `B'[1..., (`j'-1)*`ccols' + 1] + foreach eq of local eqs { + mat `Btt' = `Bt'[`"`eq':"',1] + if `"`unstack'"'!="" local stop 0 + foreach var of local vars { + if !index(`"`var'"',":") { + local var `"`eq':`var'"' + } + capt mat `Bttt' = `Btt'["`var'",1] + if _rc continue + forv i = 1/`= rowsof(`Bttt')' { + if `Bttt'[`i',1]<.z { + local lbls `"`macval(lbls)' `"`macval(yes)'"'"' + local stop 1 + continue, break + } + } + if `stop' continue, break + } + if `"`unstack'"'!="" { + if `stop'==0 { + local lbls `"`macval(lbls)' `"`macval(no)'"'"' + } + } + else if `stop' continue, break + } + if `"`unstack'"'=="" & `stop'==0 { + local lbls `"`macval(lbls)' `"`macval(no)'"'"' + } + } + c_local value `"`macval(lbls)'"' + if `"`unstack'"'!="" { + c_local eqs `"`eqs'"' + } +end + +prog ReorderEqsInIndicate + args nmodels eqs ieqs lbls + local neq: list sizeof ieqs + foreach eq of local eqs { + local i: list posof `"`eq'"' in ieqs + if `i' { + local pos `pos' `i' + } + } + forv m=1/`nmodels' { + foreach i of local pos { + local mi = (`m'-1)*`neq' + `i' + local lbl: word `mi' of `macval(lbls)' + local value `"`macval(value)'`"`macval(lbl)'"' "' + } + } + c_local value `"`macval(value)'"' +end + +prog ParseRefcatOpts + syntax [anything(equalok)] [, NOLabel Label(str) Below ] + c_local refcatbelow "`below'" + c_local norefcatlabel "`nolabel'" + c_local refcatlabel `"`macval(label)'"' + c_local refcat `"`macval(anything)'"' +end + +prog PrepareRefcat + gettoken coef rest : 1 + gettoken name rest : rest + while `"`macval(coef)'"'!="" { + local coefs `"`coefs'`"`coef'"' "' + local names `"`macval(names)'`"`macval(name)'"' "' + gettoken coef rest : rest + gettoken name rest : rest + } + c_local refcatcoefs `"`coefs'"' + c_local refcatnames `"`macval(names)'"' +end + +prog GenerateRefcatRow + args B ccols var eqs label + local models: coleq `B', q + local models: list uniq models + local col 1 + foreach model of local models { + foreach eq of local eqs { + local eqvar `"`eq':`var'"' + local row = rownumb(`B',"`eqvar'") + if `B'[`row', `col']<.z { + local value `"`macval(value)'`"`macval(label)'"' "' + } + else { + local value `"`macval(value)'`""' "' + } + } + local col = `col' + `ccols' + } + c_local value `"`macval(value)'"' +end + +prog ParseTransformSubopts + syntax anything(equalok) [, Pattern(string) ] + c_local transform `"`anything'"' + c_local transformpattern "`pattern'" +end + +prog MakeTransformList + args B transform + local R = rowsof(`B') + if `:list sizeof transform'<=2 { + gettoken f rest : transform + gettoken df : rest + forv i = 1/`R' { + local valuef `"`valuef'`f' "' + local valuedf `"`valuedf'`df' "' + } + c_local valuef: list retok valuef + c_local valuedf: list retok valuedf + exit + } + gettoken coef rest : transform + gettoken f rest : rest + gettoken df rest : rest + while (`"`coef'"'!="") { + if (`"`df'`rest'"'!="") { // last element of list may be without coef + ExpandEqVarlist `"`coef'"' `B' + local coef `"`value'"' + } + local coefs `"`coefs'`"`coef'"' "' + local fs `"`fs'`"`f'"' "' + local dfs `"`dfs'`"`df'"' "' + gettoken coef rest : rest + gettoken f rest : rest + gettoken df rest : rest + } + tempname b + local value + forv i = 1/`R' { + mat `b' = `B'[`i',1...] + local i 0 + local hit 0 + foreach coef of local coefs { + local f: word `++i' of `fs' + local df: word `i' of `dfs' + if (`"`df'`rest'"'=="") { + local valuef `"`valuef'`"`coef'"' "' // sic! (see above) + local valuedf `"`valuedf'`"`f'"' "' + local hit 1 + continue, break + } + foreach c of local coef { + if rownumb(`b', `"`c'"')==1 { + local valuef `"`valuef'`"`f'"' "' + local valuedf `"`valuedf'`"`df'"' "' + local hit 1 + continue, break + } + } + if `hit' continue, break + } + if `hit'==0 { + local valuef `"`valuef'"" "' + local valuedf `"`valuedf'"" "' + } + } + c_local valuef: list retok valuef + c_local valuedf: list retok valuedf +end + +prog TableIsAMess + local ccols = r(ccols) + local eq: roweq r(coefs), q + local eq: list uniq eq + if `: list sizeof eq'<=1 { + c_local value 0 + exit + } + tempname b bt + mat `b' = r(coefs) + gettoken eq : eq + mat `b' = `b'[`"`eq':"', 1...] + local R = rowsof(`b') + local models: coleq `b', q + local models: list uniq models + local value 0 + local i = 1 - `ccols' + foreach model of local models { + local i = `i' + `ccols' + if `i'==1 continue // skip first model + mat `bt' = `b'[1...,`i'] + local allz 1 + forv r = 1/`R' { + if `bt'[`r',1]<.z { + local allz 0 + continue, break + } + } + if `allz' { + local value 1 + continue, break + } + } + c_local value `value' +end + +prog ExpandEqVarlist + args list B append + ParseEqVarlistRelax `list' + QuotedRowNames `B' + local coefs `"`value'"' + local value + local ucoefs: list uniq coefs + capt confirm matrix `B' + if _rc==0 { + local eqs: roweq `B', q + } + else local eqs "__" + local ueqs: list uniq eqs + while `"`list'"'!="" { +// get next element + local eq0 + gettoken eqx list : list +// separate eq and x + gettoken eq x : eqx, parse(:) + local eq: list clean eq + if `"`eq'"'==":" { // case 1: ":[varname]" + local x: list clean x + local eq + } + else if `"`x'"'=="" { // case 2: "varname" + local x `"`eq'"' + local eq + } + else { // case 3. "eqname:[varname]" + local eq0 `"`eq'"' // eq specified by user + if `"`eq'"'=="_" local eq "__" + gettoken colon x : x, parse(:) + local x: list clean x + } +// match equations + local eqmatch + if `:list eq in ueqs' { // (note: evaluates to 1 if eq empty) + local eqmatch `"`eq'"' + } + else { + foreach e of local ueqs { + if match(`"`e'"', `"`eq'"') { + local eqmatch `"`eqmatch' `"`e'"'"' + } + } + if `"`eqmatch'"'=="" & "`relax'"=="" { + if !("`append'"!="" & `"`x'"'!="") { + di as err `"equation `eq0' not found"' + exit 111 + } + } + local eqmatch: list clean eqmatch + } + if `"`x'"'=="" { + foreach e of local eqmatch { + local value `"`value' `"`e':"'"' + } + continue + } +// match coefficients + local vlist +// - without equation + if `"`eq'"'=="" { + if `:list x in ucoefs' { + local value `"`value' `"`x'"'"' + continue + } + foreach coef of local ucoefs { + if match(`"`coef'"', `"`x'"') { + local vlist `"`vlist' `"`coef'"'"' + } + } + if `"`vlist'"'=="" { + if "`append'"!="" { + local appendlist `"`appendlist' `"__:`x'"'"' + local value `"`value' `"`x'"'"' + } + else if "`relax'"=="" { + di as err `"coefficient `x' not found"' + exit 111 + } + } + else { + local value `"`value' `vlist'"' + } + continue + } +// - within equations + local rest `"`eqs'"' + foreach coef of local coefs { + gettoken e rest : rest + if !`:list e in eqmatch' { + continue + } + if match(`"`coef'"', `"`x'"') { + local vlist `"`vlist' `"`e':`coef'"'"' + } + } + if `"`vlist'"'=="" { + if "`append'"!="" { + local appendlist `"`appendlist' `"`eq':`x'"'"' + local value `"`value' `"`eq':`x'"'"' + } + else if "`relax'"=="" { + di as err `"coefficient `eq0':`x' not found"' + exit 111 + } + } + else { + local value `"`value' `vlist'"' + } + } + if "`append'"!="" { + local nappend : list sizeof appendlist + if `nappend'>0 { + capt confirm matrix `B' + if _rc==0 { + tempname tmp + mat `tmp' = J(`nappend', colsof(`B'), .z) + mat rown `tmp' = `appendlist' + matrix `B' = `B' \ `tmp' + } + } + } + c_local value: list clean value +end + +program ParseEqVarlistRelax + syntax [anything] [, Relax ] + c_local list `"`anything'"' + c_local relax `relax' +end + +program IsInString //, rclass + args needle haystack + local trash: subinstr local haystack `"`needle'"' "", count(local count) + c_local strcount = `count' +end + +program MakeRtfRowdefs + args str srow sdetach vwidth mwidth haslc2 lc2width + local factor 120 + ParseRtfcmdNum `"`str'"' "trgaph" 0 + ParseRtfcmdNum `"`str'"' "trleft" 0 + if `vwidth'<=0 local vwidth 12 + if real(`"`trgaph'"')>=. local trgaph 0 + if real(`"`trleft'"')>=. local trleft 0 + local swidth = 3 + local vtwips = `vwidth'*`factor' + local stwips = `swidth'*`factor' + local ipos = `vtwips' + 2*`trgaph' + (`trleft') + local brdrt "\clbrdrt\brdrw10\brdrs" + local brdrb "\clbrdrb\brdrw10\brdrs" + local emptycell "\pard\intbl\ql\cell" + local rtfdef "\cellx`ipos'" + local rtfdefbrdrt "`brdrt'\cellx`ipos'" + local rtfdefbrdrb "`brdrb'\cellx`ipos'" + local rtfrow "`emptycell'" + if `haslc2' { + if `lc2width'<=0 local lc2width 12 + local lc2twips = `lc2width'*`factor' + local ipos = `ipos' + `lc2twips' + 2*`trgaph' + local rtfdef "`rtfdef'\cellx`ipos'" + local rtfdefbrdrt "`rtfdefbrdrt'`brdrt'\cellx`ipos'" + local rtfdefbrdrb "`rtfdefbrdrb'`brdrb'\cellx`ipos'" + local rtfrow "`rtfrow'`emptycell'" + } + local j 0 + local nmwidth: list sizeof mwidth + foreach i of local srow { + local mwidthj: word `=1 + mod(`j++',`nmwidth')' of `mwidth' + if `mwidthj'<=0 local mwidthj 12 + local mtwips = `mwidthj'*`factor' + local ipos = `ipos' + `mtwips' + 2*`trgaph' + if `i' & "`sdetach'"=="" local ipos = `ipos' + `stwips' + local rtfdef "`rtfdef'\cellx`ipos'" + local rtfdefbrdrt "`rtfdefbrdrt'`brdrt'\cellx`ipos'" + local rtfdefbrdrb "`rtfdefbrdrb'`brdrb'\cellx`ipos'" + local rtfrow "`rtfrow'`emptycell'" + if `i' & "`sdetach'"!="" { + local ipos = `ipos' + `stwips' + 2*`trgaph' + local rtfdef "`rtfdef'\cellx`ipos'" + local rtfdefbrdrt "`rtfdefbrdrt'`brdrt'\cellx`ipos'" + local rtfdefbrdrb "`rtfdefbrdrb'`brdrb'\cellx`ipos'" + local rtfrow "`rtfrow'`emptycell'" + } + } + c_local rtfrowdef "`rtfdef'" + c_local rtfrowdefbrdrt "`rtfdefbrdrt'" + c_local rtfrowdefbrdrb "`rtfdefbrdrb'" + c_local rtfemptyrow "`rtfdef'`rtfrow'" +end + +prog ParseRtfcmdNum + args str cmd default + local pos = index(`"`str'"', `"\\`cmd'"') + if `pos' { + local pos = `pos' + strlen(`"`cmd'"') + 1 + local digit = substr(`"`str'"',`pos',1) + if `"`digit'"'=="-" { + local value "`digit'" + local digit = substr(`"`str'"',`++pos',1) + } + while real(`"`digit'"')<. { + local value "`value'`digit'" + local digit = substr(`"`str'"',`++pos',1) + } + } + local value = real(`"`value'"') + if `value'>=. local value = `default' + c_local `cmd' `"`value'"' +end + +prog ParseLabCol2 + syntax [anything(equalok)] [ , Title(str asis) Width(numlist max=1 int >=0) ] + c_local labcol2 `"`macval(anything)'"' + c_local labcol2title `"`macval(title)'"' + c_local labcol2width `"`width'"' +end + +prog StableSubinstr + // use mata in stata>=9 because -:subinstr- breaks if length of + // is more than 502 characters + args new old from to all word + if c(stata_version)>=9 { + if "`all'"=="all" local cnt . + else if "`all'"=="" local cnt 1 + else error 198 + if "`word'"=="" local word str + else if "`word'"!="word" error 198 + mata: st_local("tmp", subin`word'(st_local("old"), /// + st_local("from"), st_local("to"), `cnt')) + c_local `new' `"`macval(tmp)'"' + } + else { + capt local tmp: subinstr local old `"`macval(from)'"' /// + `"`macval(to)'"', `all' `word' + if _rc==0 { + c_local `new' `"`macval(tmp)'"' + } + } +end + +prog MakeMMDdef + args varw labcol2 labcol2w modelw starsrow stardetachon starw + if "`varw'"=="0" | "`varw'"=="" local varw 1 + if "`labcol2w'"=="0" | "`labcol2w'"=="" local labcol2w 1 + if "`modelw'"=="0" | "`modelw'"=="" local modelw 1 + if "`starw'"=="0" | "`starw'"=="" local starw 1 + local varw = max(1,`varw') + local labcol2w = max(1,`labcol2w'-2) + if "`stardetachon'"=="1" local starw = max(1,`starw'-2) + else local starw = max(1,`starw') + + local mmddef `"| `:di _dup(`varw') "-"'"' + if "`labcol2'"=="1" { + local mmddef `"`mmddef' | :`:di _dup(`labcol2w') "-"':"' + } + local nmodelw: list sizeof modelw + local c 0 + foreach col of local starsrow { + local modelwj: word `=1+mod(`c++',`nmodelw')' of `modelw' + local modelwj = max(1,`modelwj'-2) + local mmddef `"`mmddef' | :`:di _dup(`modelwj') "-"'"' + if "`col'"=="1" { + if "`stardetachon'"=="1" { + local mmddef `"`mmddef': | :"' + } + local mmddef `"`mmddef'`:di _dup(`starw') "-"'"' + } + local mmddef `"`mmddef':"' + } + c_local value `"`mmddef' |"' +end + +program MatrixMode, rclass + capt syntax [, Matrix(str asis) e(str asis) r(str asis) rename(str asis) ] + if _rc | `"`matrix'`e'`r'"'=="" { + c_local matrixmode 0 + exit + } + if ((`"`matrix'"'!="") + (`"`e'"'!="") + (`"`r'"'!=""))>1 { + di as err "only one of matrix(), e(), or r() allowed" + exit 198 + } + ParseMatrixOpt `matrix'`e'`r' + if `"`e'"'!="" { + local name "e(`name')" + } + else if `"`r'"'!="" { + local name "r(`name')" + } + confirm matrix `name' + tempname bc + if "`transpose'"=="" { + mat `bc' = `name'' + } + else { + mat `bc' = `name' + } + QuotedRowNames `bc' + local rnames `"`value'"' + local eqs: roweq `bc', q + mat `bc' = `bc'' + local cols = colsof(`bc') + local cells + local space + gettoken fmti fmtrest : fmt, match(par) + gettoken rname rnames : rnames + gettoken eq eqs : eqs + forv i = 1/`cols' { + if `"`fmti'"'!="" { + local fmtopt `"f(`fmti') "' + gettoken fmti fmtrest : fmtrest, match(par) + if `"`fmti'"'=="" & `"`fmtrest'"'=="" { // recycle + gettoken fmti fmtrest : fmt, match(par) + } + } + else local fmtopt + if `"`eq'"'=="_" { + local lbl `"l(`"`rname'"')"' + } + else { + local lbl `"l(`"`eq':`rname'"')"' + } + local cells `"`cells'`space'c`i'(`fmtopt'`lbl')"' + local space " " + gettoken rname rnames : rnames + gettoken eq eqs : eqs + } + SubstEmptyEqname `bc' // replace empty eqname "_" by "__" + if `"`rename'"'!="" { + local rename : subinstr local rename "," "", all + RenameCoefs `bc' `"`rename'"' + } + return local names "`name'" + return scalar nmodels = 1 + return scalar ccols = `cols' + return matrix coefs = `bc' + c_local matrixmode 1 + c_local cells (`cells') +end + +program ParseMatrixOpt + syntax name [, Fmt(str asis) Transpose ] + c_local name `"`namelist'"' + c_local fmt `"`fmt'"' + c_local transpose `"`transpose'"' +end + +program CompileVarl + syntax [, vname(str asis) interaction(str) ] + gettoken vi vname: vname, parse("#") + while (`"`vi'"') !="" { + local xlabi + if `"`vi'"'=="#" { + local xlabi `"`macval(interaction)'"' + } + else if strpos(`"`vi'"',".")==0 { + capt confirm variable `vi', exact + if _rc==0 { + local xlabi: var lab `vi' + } + } + else { + gettoken li vii : vi, parse(".") + gettoken dot vii : vii, parse(".") + capt confirm variable `vii', exact + if _rc==0 { + capt confirm number `li' + if _rc { + local xlabi: var lab `vii' + if (`"`macval(xlabi)'"'=="") local xlabi `"`vii'"' + if substr(`"`li'"',1,1)=="c" /// + local li = substr(`"`li'"',2,.) + if (`"`li'"'!="") local xlabi `"`li'.`macval(xlabi)'"' + } + else { + local viilab : value label `vii' + if `"`viilab'"'!="" { + local xlabi: label `viilab' `li' + } + else { + local viilab: var lab `vii' + if (`"`macval(viilab)'"'=="") local viilab `"`vii'"' + local xlabi `"`macval(viilab)'=`li'"' + } + } + } + } + if `"`macval(xlabi)'"'=="" { + local xlabi `"`vi'"' + } + local xlab `"`macval(xlab)'`macval(xlabi)'"' + gettoken vi vname: vname, parse("#") + } + c_local varl `"`macval(xlab)'"' +end + +if c(stata_version)<11 exit +version 11 +mata: +mata set matastrict on + +void estout_omitted_and_base() +{ + real colvector p + real matrix bc + string matrix rstripe, cstripe + string colvector coefnm + + bc = st_matrix(st_local("bc")) + rstripe = st_matrixrowstripe(st_local("bc")) + cstripe = st_matrixcolstripe(st_local("bc")) + coefnm = rstripe[,2] + //coefnm = subinstr(coefnm,"bn.", ".") // *bn. + //coefnm = subinstr(coefnm,"bno.", "o.") // *bno. + p = J(rows(bc), 1, 1) + if (st_local("omitted")=="") { + p = p :* (!strmatch(coefnm, "*o.*")) + } + else { + coefnm = substr(coefnm, 1:+2*(substr(coefnm, 1, 2):=="o."), .) // o. + coefnm = subinstr(coefnm, "o.", ".") // *o. + } + if (st_local("baselevels")=="") { + p = p :* (!strmatch(coefnm, "*b.*")) + } + else { + coefnm = subinstr(coefnm, "b.", ".") // *b. + } + if (any(p)) { + st_matrix(st_local("bc"), select(bc, p)) + st_matrixrowstripe(st_local("bc"), select((rstripe[,1], coefnm), p)) + st_matrixcolstripe(st_local("bc"), cstripe) + st_local("hasbc", "1") + } + else { + st_local("hasbc", "0") + } +} + +void estout_rown_hasblanks(string scalar lnm, string rowvector m) +{ + real scalar j + + for (j=1;j<=2;j++) { + if (m[j]=="") return + if (any(strpos(st_matrixrowstripe(m[j])[,2], " "))) { + st_local(lnm, "1") + return + } + } +} + +void estout_mat_capp(string scalar m1, string rowvector m) +{ + real scalar i, j + string scalar key, val + string colvector rown + transmorphic A + + // replace rownames that contain blanks + A = asarray_create() + asarray_notfound(A, "") + for (j=1;j<=2;j++) { + rown = st_matrixrowstripe(m[j])[,2] + for (i=rows(rown);i;i--) { + val = rown[i] + if (strpos(val, " ")) { + key = subinstr(val, " ", "_") + asarray(A, key, val) + rown[i] = key + } + st_matrixrowstripe(m[j], (st_matrixrowstripe(m[j])[,1],rown)) + } + } + // apply mat_capp + stata("mat_capp " + st_local("0")) + // restore original names + rown = st_matrixrowstripe(m1)[,2] + for (i=rows(rown);i;i--) { + val = asarray(A, rown[i]) + if (val!="") rown[i] = val + } + st_matrixrowstripe(m1, (st_matrixrowstripe(m1)[,1], rown)) +} +end + +if c(stata_version)<14 exit +version 14 +mata: +mata set matastrict on + +void estout_rtfencode(string scalar lname) +{ // non-ASCII characters are translated to "\u#?" where # is the base 10 code + // (up to code 65535; replacement character is used for larger codes) + real scalar n, l, i, ci + real rowvector c + string scalar s, snew + string rowvector S + + s = st_local(lname) + if (isascii(s)) return + l = ustrlen(s) + snew = "" + for (n=1;n<=l;n=n+200) { + c = frombase(16, /// possible hex formats: \uhhhh or \Uhhhhhhhh + substr(tokens(subinstr(ustrtohex(s, n), "\", " ")), 2, .)) + i = length(c) + S = J(1,i,"") + for (;i;i--) { + ci = c[i] + if (ci<=127) S[i] = char(ci) + else if (ci<=32767) S[i] = "\u" + strofreal(ci) + "?" + else if (ci<=65535) S[i] = "\u" + strofreal(ci-65536) + "?" + else S[i] = "\u65533?" // unicode replacement character \ufffd + } + snew = snew + invtokens(S, "") + } + st_local(lname, snew) +} +end + diff --git a/110/replication_package/replication/ado/plus/e/estout.hlp b/110/replication_package/replication/ado/plus/e/estout.hlp new file mode 100644 index 0000000000000000000000000000000000000000..a0b7d6e34dcffd473dff89eba9df208006dcf580 --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estout.hlp @@ -0,0 +1,2452 @@ +{smcl} +{* 01feb2017}{...} +{cmd:help estout}{right:also see: {helpb esttab}, {helpb eststo}, {helpb estadd}, {helpb estpost}} +{right: {browse "http://repec.sowi.unibe.ch/stata/estout/"}} +{hline} + +{title:Title} + +{p 4 4 2}{hi:estout} {hline 2} Making regression tables from stored estimates + + +{title:Table of contents} + + {help estout##syn:Syntax} + {help estout##des:Description} + {help estout##opt:Options} + {help estout##exa:Examples} + {help estout##rem:Remarks} + {help estout##ret:Saved results} + {help estout##ref:Backmatter} + +{marker syn} +{title:Syntax} + +{p 8 15 2} +{cmd:estout} [ {help estout##what:{it:what}} ] + [ {cmd:using} {it:filename} ] + [ {cmd:,} {help estout##opt0:{it:options}} ] + +{marker what} + {it:what}{col 30}description + {hline 70} + {it:namelist}{col 30}{...} +tabulate stored estimation sets; {it:namelist} is +{col 32}a name, a list of names, or {cmd:_all}; the {cmd:*} and +{col 32}{cmd:?} wildcards are allowed; a name may also be +{col 32}{cmd:.}, meaning the current (active) estimates + + {cmdab:m:atrix:(}{it:name}[{cmd:,} {it:subopts}]{cmd:)}{col 30}{...} +tabulate matrix {it:name} + {cmd:e(}{it:name}[{cmd:,} {it:subopts}]{cmd:)}{col 30}{...} +tabulate matrix {cmd:e(}{it:name}{cmd:)} + {cmd:r(}{it:name}[{cmd:,} {it:subopts}]{cmd:)}{col 30}{...} +tabulate matrix {cmd:r(}{it:name}{cmd:)} + {it:subopts}: + {helpb estout##mfmt:{ul:f}mt}{cmd:(}{it:fmtlist}{cmd:)}{col 30}{...} +set the display format(s) + {helpb estout##mtranspose:{ul:t}ranspose}{col 30}{...} +tabulate transposed matrix + {hline 70} + +{marker opt0} + {it:options}{col 38}description + {hline 70} + Parameter statistics + {helpb estout##cells:{ul:c}ells}{cmd:(}{it:elements and subopts}{cmd:)}{col 38}{...} +contents of the table cells, where +{col 40}an {it:element}'s {it:subopts} are in paren- +{col 40}theses, i.e. {it:element}[{cmd:(}{it:subopts}{cmd:)}] + {it:elements}: + {cmd:b}{col 38}raw coefficient (point estimate) + {cmd:se}{col 38}standard error + {cmd:var}{col 38}variance + {cmd:t}{col 38}t or z statistic + {cmd:z}{col 38}t or z statistic (synonym for {cmd:t}) + {cmd:p}{col 38}p-value + {cmd:ci}{col 38}confidence interval + {cmd:ci_l}{col 38}lower bound of confidence interval + {cmd:ci_u}{col 38}upper bound of confidence interval + {cmd:_star}{col 38}"significance stars" + {cmd:_sign}{col 38}sign of point estimate + {cmd:_sigsign}{col 38}sign and significance of estimate + {cmd:.}{col 38}null element (empty cell) + {cmd:&}{col 38}combine elements in single cell + {it:myel}{col 38}results from {cmd:e(}{it:myel}{cmd:)} + {it:myel}{cmd:[}{it:#}{cmd:]}{col 38}results from row {it:#} in {cmd:e(}{it:myel}{cmd:)} + {it:myel}{cmd:[}{it:rowname}{cmd:]}{col 38}results from row {it:rowname} in {cmd:e(}{it:myel}{cmd:)} + + + {it:subopts} (for each {it:element}, + except for {cmd:.} and {cmd:&}): + [{cmdab:no:}]{helpb estout##cstar:{ul:s}tar}{col 38}{...} +attach "significance stars" + {helpb estout##cfmt:{ul:f}mt}{cmd:(}{it:{help estout##fmt:fmt}} [{it:{help estout##fmt:fmt}} ...]{cmd:)}{col 38}{...} +set the display format(s) + {helpb estout##clabel:{ul:l}abel}{cmd:(}{it:string}{cmd:)}{col 38}{...} +define a label for {it:element} + {helpb estout##cpar:par}[{cmd:(}{it:l} {it:r}{cmd:)}] | {cmd:nopar}{col 38}{...} +place results in parentheses + {helpb estout##cvacant:{ul:v}acant}{cmd:(}{it:string}{cmd:)}{col 38}{...} +print {it:string} if coefficient is absent + {helpb estout##cdrop:{ul:d}rop}{cmd:(}{it:droplist}{cmd:)}{col 38}{...} +drop certain individual results + {helpb estout##ckeep:{ul:k}eep}{cmd:(}{it:keeplist}{cmd:)}{col 38}{...} +keep certain individual results + {helpb estout##cpattern:{ul:pat}tern}{cmd:(}{it:pattern}{cmd:)}{col 38}{...} +model selection + {helpb estout##cpvalue:{ul:pval}ue}{cmd:(}{it:name}{cmd:)}{col 38}{...} +set p-values for {cmd:star} (default: {cmd:p}) + [{cmd:no}]{helpb estout##cabs:abs}{col 38}{...} +use absolute t-statistics + [{cmdab:no:}]{helpb estout##ctranspose:{ul:t}ranspose}{col 38}{...} +transpose {cmd:e(}{it:myel}{cmd:)} for tabulation + + {helpb estout##drop:{ul:d}rop}{cmd:(}{it:droplist}{cmd:)}{col 38}{...} +drop individual coefficients + [{cmdab:no:}]{helpb estout##omitted:{ul:omit}ted}{col 38}{...} +include omitted coefficients + [{cmdab:no:}]{helpb estout##baselevels:{ul:base}levels}{col 38}{...} +include base levels + {helpb estout##keep:{ul:k}eep}{cmd:(}{it:keeplist}{cmd:)}{col 38}{...} +keep individual coefficients + {helpb estout##order:{ul:o}rder}{cmd:(}{it:orderlist}{cmd:)}{col 38}{...} +change order of coefficients + {helpb estout##indicate:{ul:i}ndicate}{cmd:(}{it:groups} [{cmd:,} {it:subopt}]{cmd:)}{col 38}{...} +indicate presence of parameters + {it:subopt}: {cmdab:l:abels(}{it:yes} {it:no}{cmd:)}{col 38}{...} +redefine "Yes" and "No" labels + {helpb estout##rename:{ul:ren}ame}{cmd:(}{it:old} {it:new} [{it:old} {it:new} ...]{cmd:)}{col 38}{...} +rename individual coefficients + {helpb estout##equations:{ul:eq}uations}{cmd:(}{it:eqmatchlist}{cmd:)}{col 38}{...} +match the models' equations + {helpb estout##eform:eform}[{cmd:(}{it:pattern}{cmd:)}] | {cmd:noeform}{col 38}{...} +report exponentiated coefficients + {helpb estout##transform:{ul:tr}ansform}{cmd:(}{it:list} [{cmd:,} {it:subopt}]{cmd:)}{col 38}{...} +apply transformations to coefficients + {it:subopt}: {cmdab:p:attern:(}{it:pattern}{cmd:)}]{cmd:)}{col 38}{...} +select models + {helpb estout##margin:{ul:m}argin}[{cmd:(}{cmd:u}|{cmd:c}|{cmd:p}{cmd:)}] | {cmdab:nom:argin}{col 38}{...} +report marginal effects after {helpb mfx} + {helpb estout##discrete:{ul:di}screte}{cmd:(}{it:string}{cmd:)} | {cmdab:nodi:screte}{col 38}{...} +identify 0/1 variables (if {cmd:margin}) + {helpb estout##meqs:{ul:meq}s}{cmd:(}{it:eq_list}{cmd:)}{col 38}{...} +select equations for marginal effects + {helpb estout##dropped:dropped}[{cmd:(}{it:string}{cmd:)}] | {cmd:nodropped}{col 38}{...} +indicate null coefficients as dropped + {helpb estout##level:level}{cmd:(}{it:#}{cmd:)}{col 38}{...} +set level for confidence intervals + + Summary statistics + {helpb estout##stats:{ul:s}tats}{cmd:(}{it:scalarlist}[{cmd:,} {it:subopts}]{cmd:)}{col 38}{...} +display summary statistics at the +{col 38}bottom of the table + {it:subopts}: + {helpb estout##statsfmt:{ul:f}mt}{cmd:(}{it:{help estout##fmt:fmt}} [{it:{help estout##fmt:fmt}} ...]{cmd:)}{col 38}{...} +set the display formats + {helpb estout##statslabels:{ul:l}abels}{cmd:(}{it:strlist}[{cmd:,} {col 38}{...} +label the summary statistics + {it:{help estout##lsub0:label_subopts}}]{cmd:)} + {helpb estout##statsstar:{ul:s}tar}[{cmd:(}{it:sca'list}{cmd:)}] | {cmdab:nos:tar}{col 38}{...} +denote the model significance + {helpb estout##statslayout:{ul:lay}out}{cmd:(}{it:array}{cmd:)}{col 38}{...} +arrange the summary statistics + {helpb estout##statspchar:{ul:pc}har}{cmd:(}{it:symbol}{cmd:)}{col 38}{...} +placeholder in {cmdab:layout()}; default is {cmd:@} + + Significance stars + {helpb estout##starlevels:{ul:starl}evels}{cmd:(}{it:levelslist}{cmd:)}{col 38}{...} +define thresholds and symbols, +{col 40}where '{it:levelslist}' is '{it:symbol} {it:#} +{col 40}[{it:symbol} {it:#} ...]' with {it:#} in (0,1] and +{col 40}listed in descending order + {helpb estout##stardrop:{ul:stard}rop}{cmd:(}{it:droplist}{cmd:)}{col 38}{...} +drop stars for individual coefs + {helpb estout##starkeep:{ul:stark}eep}{cmd:(}{it:keeplist}{cmd:)}{col 38}{...} +keep stars for individual coefs + [{cmdab:no:}]{helpb estout##stardetach:{ul:stard}etach}{col 38}{...} +display the stars in their own column + + Layout + {helpb estout##varwidth:{ul:var}width}{cmd:(}{it:#}{cmd:)}{col 38}{...} +set width of the table's left stub + {helpb estout##modelwidth:{ul:model}width}{cmd:(}{it:#} [{it:#} ...]{cmd:)}{col 38}{...} +set width of the results columns + [{cmdab:no:}]{helpb estout##unstack:{ul:uns}tack}{col 38}{...} +place equations from multiple- +{col 40}equation models in separate columns + {helpb estout##begin:{ul:beg}in}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify the beginning of the rows + {helpb estout##delimiter:{ul:del}imiter}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify the column delimiter + {helpb estout##end:end}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify the ending of the table rows + {helpb estout##incelldel:{ul:incell}delimiter}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify delimiter within cell + {helpb estout##dmarker:{ul:dm}arker}{cmd:(}{it:string}{cmd:)}{col 38}{...} +define the decimal marker + {helpb estout##msign:{ul:ms}ign}{cmd:(}{it:string}{cmd:)}{col 38}{...} +define the minus sign + [{cmd:no}]{helpb estout##lz:lz}{col 38}{...} +print the leading zero of fixed +{col 40}format numbers in (-1,1) + {helpb estout##extracols:{ul:extra}cols}{cmd:(}{it:numlist}{cmd:)}{col 38}{...} +add empty column to the table + {helpb estout##substitute:{ul:sub}stitute}{cmd:(}{it:subst}{cmd:)}{col 38}{...} +apply end-of-pipe substitutions, where +{col 40}'{it:subst}' is '{it:from} {it:to} [{it:from} {it:to} ... ]' + + Labeling + [{cmdab:no:}]{helpb estout##label:{ul:l}abel}{col 38}{...} +make use of variable labels + [{cmdab:no:}]{helpb estout##abbrev:{ul:ab}brev}{col 38}{...} +abbreviate long names and labels + [{cmdab:no:}]{helpb estout##wrap:wrap}{col 38}{...} +wrap long labels (if space permits) + {helpb estout##interaction:{ul:interact}ion}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify interaction operator + {helpb estout##title:{ul:ti}tle}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify a title for the table + {helpb estout##note:note}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify a note for the table + [{cmdab:no:}]{helpb estout##legend:{ul:le}gend}{col 38}{...} +add a significance symbols legend + {helpb estout##prehead:{ul:preh}ead}{cmd:(}{it:strlist}{cmd:)}{col 38}{...} +add text before the table heading + {helpb estout##prehead:{ul:posth}ead}{cmd:(}{it:strlist}{cmd:)}{col 38}{...} +add text after the table heading + {helpb estout##prehead:{ul:pref}oot}{cmd:(}{it:strlist}{cmd:)}{col 38}{...} +add text before the table footer + {helpb estout##prehead:{ul:postf}oot}{cmd:(}{it:strlist}{cmd:)}{col 38}{...} +add text after the table footer + {helpb estout##hlinechar:{ul:hl}inechar}{cmd:(}{it:string}{cmd:)}{col 38}{...} +specify look of {cmd:@hline} + {helpb estout##varlabels:{ul:varl}abels}{cmd:(}{it:matchlist}[{cmd:,} {it:sub.}]{cmd:)} {col 38}{...} +relabel the parameters + {it:subopts}: + {cmdab:bl:ist:(}{it:matchlist}{cmd:)}{col 38}{...} +assign prefixes to certain rows + {cmdab:el:ist:(}{it:matchlist}{cmd:)}{col 38}{...} +assign suffixes to certain rows + {it:{help estout##lsub0:label_subopts}} + {helpb estout##labcol2:{ul:labcol}2}{cmd:(}{it:strlist}[{cmd:,} {it:subopts}]{cmd:)} {col 38}{...} +add a second labeling column + {it:subopts}: + {cmdab:t:itle:(}{it:strlist}{cmd:)}{col 38}{...} +add column title in table header + {cmdab:w:idth:(}{it:#}{cmd:)}{col 38}{...} +set width of column + {helpb estout##refcat:{ul:ref}cat}{cmd:(}{it:matchlist}[{cmd:,} {it:subopts}]{cmd:)} {col 38}{...} +add reference category information + {it:subopts}: + {cmdab:l:abel:(}{it:string}{cmd:)} | {cmdab:nol:abel}{col 38}{...} +redefine the "ref." label + {cmdab:b:elow}{col 38}{...} +change positioning of refcat + {helpb estout##mlabels:{ul:ml}abels}{cmd:(}{it:strlist}[{cmd:,} {it:subopts}]{cmd:)}{col 38}{...} +label the models + {it:subopts}: + [{cmdab:no:}]{cmdab:dep:vars}{col 38}{...} +use the name/label of the dependent +{col 42}variable as model label + [{cmdab:no:}]{cmdab:ti:tles}{col 38}{...} +use estimates title as model label + [{cmdab:no:}]{cmdab:num:bers}{col 38}{...} +number models labels consecutively + {it:{help estout##lsub0:label_subopts}} + {helpb estout##collabels:{ul:coll}abels}{cmd:(}{it:strlist}[{cmd:,} {col 38}{...} +label the columns within models + {it:{help estout##lsub0:label_subopts}}]{cmd:)} + {helpb estout##eqlabels:{ul:eql}abels}{cmd:(}{it:strlist}[{cmd:,} {it:subopts}]{cmd:)}{col 38}{...} +label the equations + {it:subopts}: + [{cmdab:no:}]{cmdab:m:erge}{col 38}{...} +merge equation and parameter labels + {it:{help estout##lsub0:label_subopts}} + {helpb estout##mgroups:{ul:mgr}oups}{cmd:(}{it:strlist}[{cmd:,} {it:subopts}]{cmd:)}{col 38}{...} +define and label groups of models + {it:subopts}: + {cmdab:pat:tern:(}{it:pattern}{cmd:)}{col 38}{...} +define the grouping of the models + {it:{help estout##lsub0:label_subopts}} + {helpb estout##numbers:{ul:num}bers}[{cmd:(}{it:l} {it:r}{cmd:)}] | {cmdab:nonum:bers}{col 38}{...} +add a row containing model numbers + + Output + [{cmdab:no:}]{helpb estout##replace:{ul:r}eplace}{col 38}{...} +overwrite an existing file + [{cmdab:no:}]{helpb estout##append:{ul:a}ppend}{col 38}{...} +append the output to an existing file + [{cmdab:no:}]{helpb estout##type:{ul:ty}pe}{col 38}{...} +print the table in the results window + [{cmd:no}]{helpb estout##showtabs:showtabs}{col 38}{...} +display tabs as {cmd:}s + {helpb estout##topfile:{ul:top}file}{cmd:(}{it:filename}{cmd:)}{col 38}{...} +insert file contents above table + {helpb estout##topfile:{ul:bot}tomfile}{cmd:(}{it:filename}{cmd:)}{col 38}{...} +insert file contents below table + + Defaults + {helpb estout##style:{ul:sty}le}{cmd:(}{it:style}{cmd:)}{col 38}{...} +specify a style for the output table + + {it:styles}: + {cmd:smcl}{col 38}SMCL formatted table (screen default) + {cmd:tab}{col 38}tab delimited table (export default) + {cmd:fixed}{col 38}fixed format table + {cmd:tex}{col 38}table for use with LaTeX + {cmd:html}{col 38}table for use with HTML + {it:mystyle}{col 38}user defined addition + {hline 70} + +{marker lsub0} + {it:{help estout##lsub:label_subopts}}{col 38}Description + {hline 70} + [{cmd:no}]{cmd:none}{col 38}{...} +suppress the labels + {cmdab:p:refix:(}{it:string}{cmd:)}{col 38}{...} +add a common prefix + {cmdab:s:uffix:(}{it:string}{cmd:)}{col 38}{...} +add a common suffix + {cmdab:b:egin:(}{it:strlist}{cmd:)}{col 38}{...} +add an overall prefix + [{cmdab:no:}]{cmdab:f:irst}{col 38}{...} +print the first occurrence of {cmd:begin()} + {cmdab:e:nd:(}{it:strlist}{cmd:)}{col 38}{...} +add an overall suffix + [{cmdab:no:}]{cmdab:l:ast}{col 38}{...} +print the last occurrence of {cmd:end()} + {cmdab:r:eplace}{col 38}{...} +replace global {cmd:begin()}/{cmd:end()} + [{cmd:no}]{cmd:span}{col 38}{...} +span columns if appropriate + {cmdab:er:epeat:(}{it:string}{cmd:)}{col 38}{...} +add a "span" suffix + {cmd:lhs(}{it:string}{cmd:)}{col 38}{...} +label the table's left stub + {hline 70} + +{marker des} +{title:Description} + +{p 4 4 2} + {cmd:estout} assembles a table of coefficients, "significance + stars", summary statistics, standard errors, t- or z-statistics, p-values, + confidence intervals, and other statistics for one or more models + previously fitted and stored by {helpb estimates store} or {helpb eststo}. + It then displays the table in Stata's results window or writes it to a text + file specified by {cmd:using}. The default is to use {help smcl:SMCL} + formatting tags and horizontal lines to structure the table. However, + if {cmd:using} is specified, a tab-delimited table without lines + is produced. + +{p 4 4 2} + {it:namelist} provides the names of the stored estimation + sets to be tabulated. You may use the {cmd:*} and {cmd:?} wildcards in + {it:namelist}. The results estimated last may be indicated by a period + ({cmd:.}), even if they have not yet been stored. If no model is + specified, {cmd:estout} tabulates the estimation sets stored by + {cmd:eststo} (see help {helpb eststo}) or, if no such estimates are + present, the currently active estimates (i.e. the model fit last). + {cmd:estout} may be used after any estimation command that + returns its results in {cmd:e()}. + +{p 4 4 2} + See the {help estout##intro:Introduction} in the + {help estout##exa:Examples} section for an introduction on using + {cmd:estout}. See help {helpb estimates} for general information + about managing estimation results. Furthermore, see help {helpb eststo} + for an alternative to the {cmd:estimates store} command. + +{p 4 4 2} + The default for {cmd:estout} is to produce a plain + table containing point estimates. Producing a fully formatted + end-product may involve specifying many options. However, note that a + simple-to-use {cmd:estout} wrapper producing pre-formatted + publication style tables is available as {helpb esttab}. Furthermore, + use {helpb estadd} to make additional results available for + tabulation (such as the standardized coefficients or the means and + standard deviations of the regressors) and {helpb estpost} to + tabulate results from non-estimation commands such as {helpb summarize} + or {helpb tabulate}. + +{p 4 4 2} + {cmd:estout} can also be used to tabulate the contents of a Stata + matrix (see help {helpb matrix}). Type {cmd:estout marix(}{it:name}{cmd:)}, + where {it:name} is the name of the matrix, instead of providing a + {it:namelist} of stored estimation sets. See the + {help estout##ex7:examples} below. Alternatively, you may also specify + {cmd:e(}{it:name}{cmd:)} or {cmd:r(}{it:name}{cmd:)} to tabulate an + {cmd:e()}-matrix or an {cmd:r()}-matrix. The {cmd:cells()} option is + disabled if tabulating a matrix. + +{p 4 4 2} + Programms similar to {cmd:estout} include {cmd:outreg} by John Luke + Gallup, {cmd:outreg2} by Roy Wada, {cmd:modltbl} by John H. Tyler, + {cmd:mktab} by Nicholas Winter, {cmd:outtex} by Antoine Terracol, or + {cmd:est2tex} by Marc Muendler. Also see Newson (2003) for a very + appealing approach. + +{marker opt} +{title:Options} + + Contents + + {help estout##par:Parameter statistics} + {help estout##sum:Summary statistics} + {help estout##sig:Significance stars} + {help estout##lay:Layout} + {help estout##lab:Labeling} + {help estout##out:Output} + {help estout##def:Defaults} + {it:{help estout##lsub:label_subopts}} + {it:{help estout##msub:matrix_subopts}} +{marker par} +{dlgtab:Parameter statistics} +{marker cells} +{p 4 8 2} + {cmd:cells(}{it:array}{cmd:)} specifies the parameter statistics to be + reported and how they are to be arranged. The default is for cells to + report point estimates only, i.e. {cmd:cells(b)}. {cmd:cells(none)} may + be used to completely suppress the printing of parameter statistics. + Alternatively, {cmd:cells(b se)} would result in the reporting of point + estimates and standard errors. Multiple statistics are placed in + separate rows beneath one another by default. However, elements of + {it:array} that are listed in quotes or in parentheses, e.g. + {bind:{cmd:"b se"}} or {bind:{cmd:`"b se"'}} or {bind:{cmd:(b se)}}, + are placed beside one another. For example, {bind:{cmd:cells("b p" se)}} + or, equivalently, {bind:{cmd:cells((b p) se)}} would produce a + table with point estimates and p-values beside one another in first row + and standard errors in the second row beneath the point estimates. + +{p 8 8 2} + The parameter statistics available are {cmd:b} (point estimates), + {cmd:se} (standard errors), {cmd:var} (variance), {cmd:t} + (t/z-statistics), {cmd:z} (synonym for {cmd:t}), {cmd:p} (p-values), and + {cmd:ci} (confidence + intervals; to display the lower and upper bounds in separate cells use + {cmd:ci_l} and {cmd:ci_u}). Any additional parameter statistics + included in the {cmd:e()}-returns for the models can be tabulated as + well. If, for example, {cmd:e(beta)} contains the standardized + coefficients, type {cmd:cells(beta)} to tabulate them (use + {helpb estadd} to add statistics such as the standardized coefficients to the + {cmd:e()}-returns of a model). The syntax {it:name}{cmd:[}{it:#}{cmd:]} + or {it:name}{cmd:[}{it:rowname}{cmd:]} can be used to refer to specific + rows in {cmd:e(}{it:name}{cmd:)}. For example, type {cmd:cell(ci_bc[1] ci_bc[2])} + or {cmd:cell(ci_bc[ll] ci_bc[ul])} to tabulate the lower and upper + bounds of the bias-corrected confidence intervals after {helpb bootstrap}. + The default is to report the results from the first row. + Also see the {cmd:eform} and {cmd:transform()} options for more information + on the kinds of statistics that can be displayed. + +{p 8 8 2} + Further available elements in {it:array} are {cmd:_star}, + {cmd:_sign}, and {cmd:_sigsign}. {cmd:_star} causes stars denoting the + significance of the coefficients to be printed (* for p<.05, ** for p<.01, + and *** for p<.001; customizable via the {cmd:starlevels()} option below). + {cmd:_star} places the significance stars in their own cells. See the + {cmd:star} suboption below if you want to attach the stars to another + element. {cmd:_sign} prints the signs of the coefficients ("+", "-", or + "0"). {cmd:_sigsign}, a combination of {cmd:_star} and {cmd:_sign}, repeats + the signs of the coefficients where the number of repetitions reflects the + level of significance (non-significant coefficients are left empty; + however, you may set the first level to 1 in the {cmd:starlevels()} + option). + +{p 8 8 2} + Finally, {cmd:.} and {cmd:&} may be used in {it:array}. {cmd:.} inserts a + "null" element. Use this to add empty cells. For example, + {cmd:cells("b p" ". se")} would produce a table with point estimates in the + first column and p-values and standard errors beneath one another in the + second column. {cmd:&} is used to combine elements in the same cell. Use + the {helpb estout##incelldel:incelldelimiter()} option to specify the text to + be printed between the combined elements (the default is to print a + single blank). For example, in HTML, use {cmd:cell(b & se)} and + {cmd:incelldelimiter(
)} to include point estimates and standard + errors in a single cell and insert a line break between them. + +{p 8 8 2} + A set of suboptions may be specified in parentheses for each + element named in {it:array} (except for {cmd:.} and {cmd:&}). For example, + to add significance stars to the coefficients and place the standard errors + in parentheses, specify {bind:{cmd:cells(b(star) se(par))}}. The following + suboptions are available. Use: + {p_end} +{marker cstar} +{p 12 16 2} + {cmd:star} to specify that stars denoting the significance of the + coefficients be attached to the statistic: {cmd:*} for p<.05, + {cmd:**} for p<.01, and {cmd:***} for p<.001. The symbols and the + values for the thresholds and the number of levels are fully customizable + (see the {help estout##sig:Significance stars} options). + {p_end} +{marker cfmt} +{p 12 16 2} + {cmd:fmt(}{it:{help estout##fmt:fmt}} [{it:{help estout##fmt:fmt}} ...]{cmd:)} + to specify the display format(s) of a statistic. It + defaults to {cmd:%9.0g} or the format for the first statistic in + {cmd:cells()}. If only one format is specified, it is used for all + occurrences of the statistic. For example, type + +{p 20 20 2} + {inp:. estout} {it:...}{inp:, cells("b(fmt(3)) t(fmt(2))")} + +{p 16 16 2} + to print coefficients and t-values beside one another using three + decimal places for coefficients and two decimal places for + t-values. If multiple formats are specified, the first format is + used for the first regressor in the estimates table, the second + format for the second regressor, and so on. The last format is used + for the remaining regressors if the number of regressors in the + table is greater than the number of specified formats. For + instance, type + +{p 20 20 2} + {inp:. estout} {it:...}{inp:, cells(b(fmt(3 4 2)))} + +{p 16 16 2} + to use three decimal places for the first coefficient, four decimal + places for the second, and two decimal places for all remaining + coefficients. Note that, regardless of the display format chosen, + leading and trailing blanks are removed from the numbers. White + space can be added by specifying a {cmd:modelwidth()} (see the + {help estout##lay:Layout} options). {it:{help estout##fmt:fmt}} may + be any of Stata's numerical display formats, e.g., {cmd:%9.0g} or + {cmd:%8.2f}, an integer {it:#} such as {cmd:1} or {cmd:3} to use a + fixed format with {it:#} decimal places, or {cmd:a}{it:#} such as + {cmd:a1} or {cmd:a3} to use {cmd:estout}'s adaptive display format + (see {help estout##fmt:Numerical formats} in the + {help estout##rem:Remarks} section for details). + {p_end} +{marker clabel} +{p 12 16 2} + {cmd:label(}{it:string}{cmd:)} to specify a label to appear in the + column heading. The default is the name of the statistic. + {p_end} +{marker cpar} +{p 12 16 2} + {cmd:par}[{cmd:(}{it:l} {it:r}{cmd:)}] to specify that the + statistic in question be placed in parentheses. It is also possible + to specify custom "parentheses". For example, {cmd:se(par({ }))} + would display the standard errors in curly brackets. Or, + {cmd:se(par(`"="("' `")""'))} will write parentheses in a way that + Excel can recognize. For {cmd:ci} the syntax is: + +{p 20 20 2} + {cmd:ci(par}[{cmd:(}{it:l} {it:m} {it:r}{cmd:)}]{cmd:)} + {p_end} +{marker cvacant} +{p 12 16 2} + {cmd:vacant(}{it:string}{cmd:)} to print {it:string} if a + coefficient is not in the model. The default is to leave such cells + empty. + {p_end} +{marker cdrop} +{p 12 16 2} + {cmd:drop(}{it:droplist} [{cmd:, relax}]{cmd:)} to cause certain + individual statistics to be dropped. For example, specifying + {cmd:t(drop(_cons))} suppresses the t-statistics for the constants. + {it:droplist} is specified as in the global + {helpb estout##drop:drop()} option (see below). + {p_end} +{marker ckeep} +{p 12 16 2} + {cmd:keep(}{it:keeplist} [{cmd:, relax}]{cmd:)} to cause certain + individual statistics to be kept. For example, the specification + {cmd:t(keep(mpg))} would display the t-statistics exclusively for + the variable {cmd:mpg}. {it:keeplist} is specified analogous to + {it:droplist} in {helpb estout##drop:drop()} (see below). + {p_end} +{marker cpattern} +{p 12 16 2} + {cmd:pattern(}{it:pattern}{cmd:)} to designate a pattern of models + for which the statistics are to be reported, where the {it:pattern} + consists of zeros and ones. A {cmd:1} indicates that the statistic + be printed; {cmd:0} indicates that it be suppressed. For example + {cmd:beta(pattern(1 0 1))} would result in {cmd:beta} being + reported for the first and third models, but not for the second. + {p_end} +{marker cpvalue} +{p 12 16 2} + {cmd:pvalue(}{it:name}{cmd:)} to specify the p-values used to + determine the significance stars (see {cmd:star} above). The + default is {cmd:pvalue(p)}, indicating that the standard p-values + are to be used (i.e. the p-values computed form the coefficients + vector and the variance matrix). Alternatively, specify + {cmd:pvalue(}{it:mypvalue}{cmd:)}, in which case the significance + stars will be determined from the values in + {cmd:e(}{it:mypvalue}{cmd:)}. Values outside [0,1] will be ignored. + {p_end} +{marker cabs} +{p 12 16 2} + {cmd:abs} to specify that absolute t-statistics be used instead of + regular t-statistics (relevant only if used with {cmd:t()}). + {p_end} +{marker ctranspose} +{p 12 16 2} + {cmd:transpose} to specify that {cmd:e(}{it:myel}{cmd:)} be transposed + for tabulation. + {p_end} +{marker drop} +{p 4 8 2} + {cmd:drop(}{it:droplist} [{cmd:, relax}]{cmd:)} identifies the coefficients + to be dropped from the table. A {it:droplist} comprises one or more + specifications, separated by white space. A specification can be either a + parameter name (e.g. {cmd:price}), an equation name followed by a colon + (e.g. {cmd:mean:}), or a full name (e.g. {cmd:mean:price}). You may use the + {cmd:*} and {cmd:?} wildcards in equation names and parameter names. Be sure + to refer to the matched equation names, and not to the original equation names + in the models, when using the {cmd:equations()} option to match equations. + Specify the {cmd:relax} suboption to allow {it:droplist} to contain elements + for which no match can be found. + {p_end} +{marker omitted} +{p 4 8 2} + {cmd:omitted} includes omitted coefficients (only relevant in Stata 11 or + newer). This is the default. Type {cmd:noomitted} to drop omitted + coefficients. + {p_end} +{marker baselevels} +{p 4 8 2} + {cmd:baselevels} includes base levels of factor variables (only relevant + in Stata 11 or newer). This is the default. Type {cmd:nobaselevels} to drop + base levels of factor variables. + {p_end} +{marker keep} +{p 4 8 2} + {cmd:keep(}{it:keeplist} [{cmd:, relax}]{cmd:)} selects the coefficients to + be included in the table. {it:keeplist} is specified analogous to {it:droplist} in + {helpb estout##drop:drop()} (see above). Note that {cmd:keep()} does {it:not} change the + the order of the coefficients. Use {cmd:order()} to change the order + of coefficients. + {p_end} +{marker order} +{p 4 8 2} + {cmd:order(}{it:orderlist}{cmd:)} changes the order of the + coefficients and equations within the table. {it:orderlist} is specified + analogous to {it:droplist} in {cmd:drop()} (see above). Reordering of + coefficients is performed equation by equation, unless equations are + explicitly specified. Coefficients and equations that do not appear in + {it:orderlist} are placed last (in their original order). Extra table rows + are inserted for elements in {it:orderlist} that are not found in the + table. + {p_end} +{marker indicate} +{p 4 8 2} + {cmd:indicate(}{it:groups} [{cmd:,} {cmdab:l:abels(}{it:yes} + {it:no}{cmd:)}]{cmd:)} indicates for each model (or, if {cmd:unstack} + is specified, for each equation) the presence of certain groups of + coefficients at the end of the table body. The syntax for {it:groups} + is + + "{it:group}" [ "{it:group}" {it:...} ] + +{p 8 8 2} + where a {it:group} is + + [{it:name} = ] {it:list} + +{p 8 8 2} + and {it:list} is a list of coefficient specifications as defined in + {cmd:drop()} above. The single groups should be enclosed in quotes + unless there is only one group and {it:name} is specified. Note that + {it:name} may contain spaces. + +{p 8 8 2} + For example, if some of the models contain a set of year + dummies, say {cmd:y1 y2 y3}, specify + + {com}estout{txt} {it:...}{com}, indicate(year effects = y1 y2 y3){txt} + +{p 8 8 2} + to drop the dummies from the table and add a "year effects" row + containing "Yes" for models in which {it:at least one} of the dummies + is present, and "No" for the other models. Furthermore, if some models + also contain a set of region dummies, say {cmd:reg_1} through + {cmd:reg_17}, you could type + + {com}estout{txt} {it:...}{com}, indicate("years = y1 y2 y3" "regions = reg_*"){txt} + +{p 8 8 2} + Use the {cmd:labels()} suboption to redefine the indication labels to + be printed in the table. The default is {cmd:labels(Yes No)}. Use + quotes if the labels include spaces, + e.g. {bind:{cmd:labels("in model" "not in model")}}. + {p_end} +{marker rename} +{p 4 8 2} + {cmd:rename(}{it:matchlist}{cmd:)} changes the names of individual + coefficients, where {it:matchlist} is + +{p 12 12 2} + {it:oldname} {it:newname} [{it:oldname} {it:newname} ...] + +{p 8 8 2} + {it:oldname} can be a parameter name (e.g. {cmd:price}) or a full + name including an equation specification (e.g. {cmd:mean:price}) + (abbreviation and wildcards not allowed); {it:newname} is a name without + equation specification and must not already occur in a model's equation. + {cmd:rename()} is applied before matching the models and equations and can + therefore be used to merge different coefficients across models (or + equations if {cmd:unstack} is specified) into a single table row. See the + {cmd:varlabels()} option if you are interested in relabeling coefficients + after matching models and equations. + {p_end} +{marker equations} +{p 4 8 2} + {cmd:equations(}{it:matchlist}{cmd:)} specifies how the models' equations are + to be matched. The default is to match all first equations into one equation + (named {cmd:main}, if the equations have different names) and match the remaining + equations by name. Specify {cmd:equations("")} to match all equations by + name. Alternatively, specify {it:matchlist}, which has the syntax + + {it:term} [{cmd:,} {it:term} ... ] + +{p 8 8 2} + where {it:term} is + + [{it:eqname} {cmd:=}] {it:#}{cmd::}{it:#}...{cmd::}{it:#}{col 50}(syntax 1) + + [{it:eqname} {cmd:=}] {it:#}{col 50}(syntax 2) + +{p 8 8 2} + In syntax 1, each {it:#} is a number or a period ({cmd:.}). If a number, it + specifies the position of the equation in the corresponding model; + {cmd:1:3:1} would indicate that equation 1 in the first model matches equation + 3 in the second, which matches equation 1 in the third. A period indicates + that there is no corresponding equation in the model; {cmd:1:.:1} indicates + that equation 1 in the first matches equation 1 in the third. + +{p 8 8 2} + In syntax 2, you specify just one number, say, {cmd:1} or {cmd:2}, and that + is shorthand for {cmd:1:1}...{cmd::1} or {cmd:2:2}...{cmd::2}, meaning that + equation 1 matches across all models specified or that equation 2 matches + across all models specified. + +{p 8 8 2} + {it:eqname} is used to name the matched equations. If it is suppressed, + a name such as {cmd:#1} or {cmd:#2} etc. is used, depending on the position + of the {it:term}. For example, {cmd:equations(1)} + indicates that all first equations are to be matched into one equation + named {cmd:#1}. All equations not matched by position are + matched by name. + {p_end} +{marker eform} +{p 4 8 2} + {cmd:eform}[{cmd:(}{it:pattern}{cmd:)}] displays the coefficient table in + exponentiated form. The exponent of {cmd:b} is displayed in lieu of the + untransformed coefficient; standard errors and confidence intervals are + transformed as well. Specify a {it:pattern} if the exponentiation is to be + applied only for certain models. For instance, {cmd:eform(1 0 1)} would + transform the statistics for Models 1 and 3, but not for Model 2. Note that, + unlike {cmd:regress} and {cmd:estimates table}, {cmd:estout} in + eform-mode does not suppress the display of the intercept. To drop the + intercept in eform-mode, specify {cmd:drop(_cons)}. Note: {cmd:eform} is + implemented via the {cmd:transform()} option. If both options are specified, + {cmd:transform()} takes precedence over {cmd:eform}. + {p_end} +{marker transform} +{p 4 8 2} + {cmd:transform(}{it:list} [, {cmd:pattern(}{it:pattern}{cmd:)}]{cmd:)} + displays transformed coefficients, standard errors and + confidence intervals. {it:list} may be + + {it:fx} {it:dfx} + +{p 8 8 2} + where {it:fx} is the transformation function and {it:dfx} is its first + derivative. {it:fx} is applied to coefficients and confidence + intervals, that is, {it:fx}({cmd:b}) and {it:fx}({cmd:ci}) is displayed + instead of {cmd:b} and {cmd:ci}. {it:dfx} is used to delta transform + standard errors, i.e. {cmd:se}*{it:dfx}({cmd:b}) is displayed instead + of {cmd:se}. Use {cmd:@} as a placeholder for the function's argument + in {it:fx} and {it:dfx}. For example, type + + {com}estout{txt} {it:...}{com}, transform(exp(@) exp(@)){txt} + +{p 8 8 2} + to report exponentiated results (this is equivalent to specifying + the {cmd:eform} option). + +{p 8 8 2} + Alternatively, {it:list} may be specified as + +{p 12 12 2} + {it:coefs} {it:fx} {it:dfx} [ {it:...} [{it:coefs}] {it:fx} {it:dfx} ] + +{p 8 8 2} + where {it:coefs} identifies the coefficients + to be transformed. Syntax for {it:coefs} is as explained above in the + description of the {cmd:drop()} option (however, include {it:coefs} + in quotes if it contains multiple elements). Say, a model has + two equations, {cmd:price} and {cmd:select}, and you want to + exponentiate the {cmd:price} equation but not the {cmd:select} + equation. You could then type + + {com}estout{txt} {it:...}{com}, transform(price: exp(@) exp(@)){txt} + +{p 8 8 2} + Note that omitting {it:coef} in the last transformation + specification causes the last transformation to be applied to + all remaining coefficients. + +{p 8 8 2} + Specify the {cmd:pattern()} suboption if the transformations are to be + applied only for certain models. For instance, {cmd:pattern(1 0 1)} would + apply the transformation to Models 1 and 3, but not Model 2. + {p_end} +{marker margin} +{p 4 8 2} + {cmd:margin}[{cmd:(}{cmd:u}|{cmd:c}|{cmd:p}{cmd:)}] indicates that the + marginal effects or elasticities be reported instead of the raw + coefficients. This option has an effect only if {cmd:mfx} has been + applied to a model before its results were stored (see help {helpb mfx}) or if a + {cmd:dprobit} (see help {helpb probit}), {cmd:truncreg,marginal} + (help {helpb truncreg}), or {cmd:dtobit} (Cong 2000) model is estimated. One + of the parameters {cmd:u}, {cmd:c}, or {cmd:p}, corresponding to the + unconditional, conditional, and probability marginal effects, respectively, + is required for {cmd:dtobit}. Note that the standard errors, confidence + intervals, t-statistics, and p-values are transformed as well. + +{p 8 8 2} + Using the {cmd:margin} option with multiple-equation models can be tricky. + The marginal effects of variables that are used in several equations are + printed repeatedly for each equation because the equations per se are + meaningless for {cmd:mfx}. To display the effects for certain equations only, + specify the {cmd:meqs()} option. Alternatively, use the {cmd:keep()} and + {cmd:drop()} options to eliminate redundant rows. The {cmd:equations()} + option might also be of help here. + +{p 8 8 2} + As of Stata 11, the use of {helpb mfx} is no longer suggested, since + {helpb mfx} has been superseded by {helpb margins}. Results from + {helpb margins} can directly be tabulated by {cmd:estout} as long as + the {cmd:post} option is specified with {helpb margins}. Alternatively, + you may add results from {helpb margins} to an existing + model using {helpb estadd:estadd margins} or + {helpb estpost:estpost margins}. See + {browse "http://repec.sowi.unibe.ch/stata/estout/coefficients.html#002"} for + an example on tabulating results from {helpb margins}. + {p_end} +{marker discrete} +{p 4 8 2} + {cmd:discrete(}{it:string}{cmd:)} may be used to override the default symbol and + explanatory text used to identify dummy variables when applying the + {helpb estout##margin:margin} option. The first token in {it:string} is + used as the symbol. The default is: + +{p 12 12 2} + {inp:discrete(" (d)" for discrete change of dummy variable from 0 to 1)} + +{p 8 8 2} + To display explanatory text, specify either the {cmd:legend} option or use + the {cmd:@discrete} variable (see the + {help estout##atvar:Remarks on using @-variables}). + +{p 8 8 2} + Use {cmd:nodiscrete} to disable the identification of dummy variables as + such. The default is to indicate the dummy variables unless they have been + interpreted as continuous variables in all of the models for which results are + reported (for {cmd:dprobit} and {cmd:dtobit}, however, dummy variables will + always be listed as discrete variables unless {cmd:nodiscrete} is specified). + {p_end} +{marker meqs} +{p 4 8 2} + {cmd:meqs(}{it:eq_list}{cmd:)} specifies that marginal effects requested + by the {helpb estout##margin:margin} option be printed only for the + equations in {it:eq_list}. Specifying this option does not affect how the + marginal effects are calculated. An {it:eq_list} comprises one or more equation + names (without colons) separated by white space. If you use the + {cmd:equations()} option to match equations, be sure to refer to the matched + equation names and not to the original equation names in the models. + {p_end} +{marker dropped} +{p 4 8 2} + {cmd:dropped}[{cmd:(}{it:string}{cmd:)}] causes null coefficients + (coefficients for which {cmd:e(b)} and {cmd:e(V)} is zero) to be indicated + as dropped. {it:string} specifies the text to be printed in place of + the estimates. The default text is "(dropped)". + {p_end} +{marker level} +{p 4 8 2} + {cmd:level(}{it:#}{cmd:)} assigns the confidence level, in percent, for + the confidence intervals of the coefficients (see help {help level}). + +{marker sum} +{dlgtab:Summary statistics} +{marker stats} +{p 4 8 2} + {cmd:stats(}{it:scalarlist}[{cmd:,} {it:stats_subopts}]{cmd:)} specifies + one or more scalar statistics - separated by white space - to be displayed + at the bottom of the table. The {it:scalarlist} may contain numeric + {cmd:e()}-scalars such as, e.g., {cmd:N}, {cmd:r2}, or {cmd:chi2}, but also + string {cmd:e()}-macros such as {cmd:cmd} or {cmd:depvar}. In + addition, the following statistics are available: + +{p 12 24 2} + {cmd:aic}{space 5}Akaike's information criterion{p_end} +{p 12 24 2} + {cmd:bic}{space 5}Schwarz's information criterion{p_end} +{p 12 24 2} + {cmd:rank}{space 4}rank of {cmd:e(V)}, i.e. the number of free + parameters in model{p_end} +{p 12 24 2} + {cmd:p}{space 7}the p-value of the model (overall model significance) + +{p 8 8 2} + See {bf:[R] estimates table} for details on the {cmd:aic} and {cmd:bic} statistics. + The rules for the determination of {cmd:p} are as follows (note that although + the procedure outlined below is appropriate for most models, there might be + some models for which it is not): + +{p 12 15 2} + 1) p-value provided: If the {cmd:e(p)} scalar is provided by the + estimation command, it will be interpreted as indicating the p-value + of the model. + +{p 12 15 2} + 2) F test: If {cmd:e(p)} is not provided, {cmd:estout} checks for the + presence of the {cmd:e(df_m)}, {cmd:e(df_r)}, and {cmd:e(F)} + scalars and, if they are present, the p-value of the model will be + calculated as {cmd:Ftail(df_m,df_r,F)}. This p-value corresponds to + the standard overall F test of linear regression. + +{p 12 15 2} + 3) chi2 test: Otherwise, if neither {cmd:e(p)} nor {cmd:e(F)} is + provided, {cmd:estout} checks for the presence of {cmd:e(df_m)} and + {cmd:e(chi2)} and, if they are present, calculates the p-value as + {cmd:chi2tail(df_m,chi2)}. This p-value corresponds to the + Likelihood-Ratio or Wald chi2 test. + +{p 12 15 2} + 4) If neither {cmd:e(p)}, {cmd:e(F)}, nor {cmd:e(chi2)} + is available, no p-value will be reported. + +{p 8 8 2} + Type {cmd:ereturn list} after estimating a model to see a list of + the returned {cmd:e()}-scalars and macros (see help {helpb ereturn}). Use + the {helpb estadd} command to add extra statistics and + other information to the {cmd:e()}-returns. + +{p 8 8 2} + The following {it:stats_subopts} are available. Use: + {p_end} +{marker statsfmt} +{p 12 16 2} + {cmd:fmt(}{it:{help estout##fmt:fmt}} [{it:{help estout##fmt:fmt}} {it:...}]{cmd:)} + to set the display formats for the scalars statistics in {it:scalarlist}. + {it:{help estout##fmt:fmt}} may be any of Stata's numerical display + formats, e.g., {cmd:%9.0g} or {cmd:%8.2f}, an integer {it:#} such as + {cmd:1} or {cmd:3} to use a fixed format with {it:#} decimal places, or + {cmd:a}{it:#} such as {cmd:a1} or {cmd:a3} to use {cmd:estout}'s adaptive + display format (see {help estout##fmt:Numerical formats} in the {help + estout##rem:Remarks} section for details). For example, {cmd:fmt(3 0)} + would be suitable for {cmd:stats(r2_a N)}. Note that the last specified + format is used for the remaining scalars if the list of scalars is longer + than the list of formats. Thus, only one format needs to be specified if + all scalars are to be displayed in the same format. If no format is + specified, the default format is the display format of the coefficients. + {p_end} +{marker statslabels} +{p 12 16 2} + {cmd:labels(}{it:strlist}[{cmd:,} {it:{help estout##lsub:label_subopts}}]{cmd:)} + to specify labels for rows containing the scalar statistics. If + specified, the labels are used instead of the scalar names. For example: + +{p 20 20 2} + {inp:. estout} {it:...}{inp:, stats(r2_a N, labels("Adj. R-Square" "Number of Cases"))} + +{p 16 16 2} + Note that names like {cmd:r2_a} produce an error in LaTeX because the + underscore character has a special meaning in LaTeX (to print the + underscore in LaTeX, type {cmd:\_}). Use the {cmd:label()} suboption to + rename such statistics, e.g. {cmd:stats(r2_a, labels(r2\_a))}. An alternative + approach is to use {cmd:estout}'s {cmd:substitute()} option (see the + {help estout##lay:Layout} options). + {p_end} +{marker statsstar} +{p 12 16 2} + {cmd:star}[{cmd:(}{it:scalarlist}{cmd:)}] to specify that the overall + significance of the model be denoted by stars. The stars are attached to + the scalar statistics specified in {it:scalarlist}. If + {it:scalarlist} is omitted, the stars are attached to the first + reported scalar statistic. The printing of the stars is suppressed in + empty results cells (i.e. if the scalar statistic in question is missing + for a certain model). The determination of the model significance is + based on the p-value of the model (see above). + +{p 16 16 2} + Hint: It is possible to attach the stars to different scalar statistics + within the same table. For example, specify + {cmd:stats(,star(r2_a r2_p))} + when tabulating OLS estimates and, say, probit estimates. For + the OLS models, the F test will be carried out and the significance + stars will be attached to the {cmd:r2_a}; for the probit models, the + chi2 test will be used and the stars will appear next to the + {cmd:r2_p}. + {p_end} +{marker statslayout} +{p 12 16 2} + {cmd:layout(}{it:array}{cmd:)} to rearrange the summary statistics. The default + is to print the statistics in separate rows beneath one another (in + each model's first column). The syntax for {it:array} is + + <{it:row}> [ <{it:row}> ... ] + +{p 16 16 2} + where {it:row} is + + <{it:cell}> [ <{it:cell}> ... ] + +{p 16 16 2} + and {cmd:@} is used as a placeholder for the statistics, one + after another. Rows and cells that contain blanks + have to be embraced in quotes. For example, + + {com} ... stats(chi2 df_m N, layout("@ @" @)){txt} + +{p 16 16 2} + prints for each model in row 1/column 1 the chi-squared, in + row1/column 2 the degrees of freedom, and in row 2/column 1 the number of + observations. Cells may contain multiple statistics and text other than + the placeholder symbol is printed as is (provided the cells' statistics are part + of the model). For example, + + {com} ... stats(chi2 df_m N, layout(`""@ (@)""' @)){txt} + +{p 16 16 2} + prints a cell containing "chi2 (df_m)" in the first row and the + number of observations in the second row. Note that the number of columns + in the table only depends on the {cmd:cells()} option (see above) and not + on the {cmd:layout()} suboption. If, for example, the table has two columns + per model and you specify three columns of summary statistics, the summary statistics + in the third column are not printed. + {p_end} +{marker statspchar} +{p 12 16 2} + {cmd:pchar(}{it:symbol}{cmd:)} to specify the placeholder symbol + used in {cmdab:layout()}. The default placeholder is {cmd:@}. + +{marker sig} +{dlgtab:Significance stars} +{marker starlevels} +{p 4 8 2} + {cmd:starlevels(}{it:levelslist}{cmd:)} overrides the default thresholds and + symbols for "significance stars". For instance, + {bind:{cmd:starlevels(+ 0.10 * 0.05)}} + sets the following thresholds: {cmd:+} for p<.10 and {cmd:*} for + p<.05. Note that the thresholds must lie in the (0,1] interval and must be + specified in descending order. To, for example, denote insignificant + results, type {bind:{cmd:starlevels(* 1 "" 0.05)}}. + {p_end} +{marker stardrop} +{p 4 8 2} + {cmd:stardrop(}{it:droplist} [{cmd:, relax}]{cmd:)} + identifies the coefficients for which the significance stars be + suppressed. {it:droplist} is specified as in + {helpb estout##drop:drop()} (see above). + {p_end} +{marker starkeep} +{p 4 8 2} + {cmd:starkeep(}{it:keeplist} [{cmd:, relax}]{cmd:)} selects the coefficients + for which the significance stars, if requested, be printed. {it:keeplist} + is specified analogous to {it:droplist} in + {helpb estout##drop:drop()} (see above). + {p_end} +{marker stardetach} +{p 4 8 2} + {cmd:stardetach} specifies that a delimiter be placed between the statistics + and the significance stars (i.e. that the stars are to be displayed in their + own column). + +{marker lay} +{dlgtab:Layout} +{marker varwidth} +{p 4 8 2} + {cmd:varwidth(}{it:#}{cmd:)} specifies the number of characters used to display + the names (labels) of regressors and statistics (i.e. {cmd:varwidth} + specifies the width of the table's left stub). Long names (labels) are + abbreviated (depending on the {cmd:abbrev} option) and short or empty + cells are padded out with blanks to fit the width specified by the user. + {cmd:varwidth} set to 0 means that the names are not + abbreviated and no white space is added. Specifying low values may cause + misalignment. + {p_end} +{marker modelwidth} +{p 4 8 2} + {cmd:modelwidth(}{it:#} [{it:#} ...]{cmd:)} designates the number of characters + used to display the results columns. If a non-zero {cmd:modelwidth} is + specified, model names are abbreviated if necessary (depending on the + {cmd:abbrev} option) and short or empty results cells are padded out + with blanks. In contrast, {cmd:modelwidth} does not shorten or truncate + the display of the results themselves (coefficients, t-statistics, + summary statistics, etc.) although it may add blanks if needed. + {cmd:modelwidth} set to 0 means that the model names are not + abbreviated and no white space is added. Specifying low values may + cause misalignment. Specify a list of numbers in {cmd:modelwidth()} to + assign individual widths to the different results columns (the list is + recycled if there are more columns than numbers). + +{p 8 8 2} + The purpose of {cmd:modelwidth} is to be able to construct a fixed-format + table and thus make the raw table more readable. Be aware, however, that the + added blanks may cause problems with the conversion to a table in word + processors or spreadsheets. + {p_end} +{marker unstack} +{p 4 8 2} + {cmd:unstack} specifies that the individual equations from multiple-equation + models (e.g. {cmd:mlogit}, {cmd:reg3}, {cmd:heckman}) be placed in + separate columns. The default is to place the equations below one another in a + single column. Summary statistics will be reported for each equation if + {cmd:unstack} is specified and the estimation command is either {cmd:reg3}, + {cmd:sureg}, or {cmd:mvreg} (see help {helpb reg3}, help {helpb sureg}, + help {helpb mvreg}). + {p_end} +{marker begin} +{p 4 8 2} + {cmd:begin(}{it:string}{cmd:)} specifies a string to be printed at the + beginning of every table row. It is possible to + use special functions such as {cmd:_tab} or {cmd:_skip} in + {cmd:begin()}. For more information on using such functions, see the + description of the functions in help {helpb file}. + {p_end} +{marker delimiter} +{p 4 8 2} + {cmd:delimiter(}{it:string}{cmd:)} designates the delimiter used between the + table columns. See the {cmd:begin} option above for further details. + {p_end} +{marker end} +{p 4 8 2} + {cmd:end(}{it:string}{cmd:)} specifies a string to be printed at the end of + every table row. See the {cmd:begin} option above for further details. + {p_end} +{marker incelldel} +{p 4 8 2} + {cmd:incelldelimiter(}{it:string}{cmd:)} specifies text to be printed + between parameter statistics that have been combined in a single cell + by the {cmd:&} operator. See the {helpb estout##par:cells()} option + for details. The default string is a single blank. + {p_end} +{marker dmarker} +{p 4 8 2} + {cmd:dmarker(}{it:string}{cmd:)} specifies the form of the decimal marker. The + standard decimal symbol (a period or a comma, depending on the input provided + to {cmd:set dp}; see help {help format}) is replaced by {it:string}. + {p_end} +{marker msign} +{p 4 8 2} + {cmd:msign(}{it:string}{cmd:)} determines the form of the minus sign. The + standard minus sign ({cmd:-}) is replaced by {it:string}. + {p_end} +{marker lz} +{p 4 8 2} + {cmd:lz} specifies that the leading zero of fixed format numbers in the + interval (-1,1) be printed. This is the default. Use {cmd:nolz} to advise + {cmd:estout} to omit the leading zeros (that is, to print numbers like + {cmd:0.021} or {cmd:-0.33} as {cmd:.021} and {cmd:-.33}). + {p_end} +{marker extracols} +{p 4 8 2} + {cmd:extracols(}{it:{help numlist}}{cmd:)} inserts empty table columns + at the indicated positions. For example, {cmd:extracols(1)} adds + an extra column between the left stub of the table and the first + column. + {p_end} +{marker substitute} +{p 4 8 2} + {cmd:substitute(}{it:subst_list}{cmd:)} specifies that the substitutions + specified in {it:subst_list} be applied to the estimates table after it has + been created. Specify {it:subst_list} as a list of substitution pairs, that + is: + +{p 12 12 2} + {it:from} {it:to} [{it:from} {it:to} ...] + +{p 8 8 2} + For example, specify {cmd:substitute(_ \_)} to replace the underscore + character (as in {cmd:_cons} or {cmd:F_p}) with it's LaTeX equivalent {cmd:\_}. + +{marker lab} +{dlgtab:Labeling} +{marker label} +{p 4 8 2} + {cmd:label} specifies that variable labels be displayed instead of variable + names in the left stub of the table. + {p_end} +{marker abbrev} +{p 4 8 2} + {cmd:abbrev} specifies that long names and labels be abbreviated if + a {cmd:modelwidth()} and/or a {cmd:varwidth()} is specified. + {p_end} +{marker wrap} +{p 4 8 2} + {cmd:wrap} causes long variable labels to be wrapped if space permits and + a {cmd:varwidth()} is specified. The {cmd:wrap} option is only useful if + several parameter statistics are printed beneath one another and, therefore, + white space is available beneath the labels. + {p_end} +{marker interaction} +{p 4 8 2} + {cmd:interaction(}{it:string}{cmd:)} specifies the string to be used + as delimiter for interaction terms (only relevant in Stata 11 or newer). The + default is {cmd:interaction(" # ")}. For {cmd:style(tex)} the default is + {cmd:interaction(" $\times$ ")}. + {p_end} +{marker title} +{p 4 8 2} + {cmd:title(}{it:string}{cmd:)} may be used to specify a title for the table. + The {it:string} is printed at the top of the table unless {cmd:prehead()}, + {cmd:posthead()}, {cmd:prefoot()}, or {cmd:postfoot()} is specified. In + the latter case, the variable {cmd:@title} can be used to insert the title. + {p_end} +{marker note} +{p 4 8 2} + {cmd:note(}{it:string}{cmd:)} may be used to specify a note for the table. + The {it:string} is printed at the bottom, of the table unless {cmd:prehead()}, + {cmd:posthead()}, {cmd:prefoot()}, or {cmd:postfoot()} is specified. In + the latter case, the variable {cmd:@note} can be used to insert the note. + {p_end} +{marker legend} +{p 4 8 2} + {cmd:legend} adds a legend explaining the significance symbols and + thresholds. + {p_end} +{marker prehead} +{p 4 8 2} + {cmd:prehead(}{it:strlist}{cmd:)}, {cmd:posthead(}{it:strlist}{cmd:)}, + {cmd:prefoot(}{it:strlist}{cmd:)}, and {cmd:postfoot(}{it:strlist}{cmd:)} may + be used to define lists of text lines to appear before and after the table + heading or the table footer. For example, the specification + +{p 12 12 2} + {inp:. estout} {it:...}{inp:, prehead("\S_DATE \S_TIME" "")} + +{p 8 8 2} + would add a line containing the current date and time followed by + an empty line before the table. Various substitution functions can be used + as part of the text lines specified in {it:strlist} (see the + {help estout##atvar:Remarks on using @-variables}). For example, + {cmd:@hline} plots a horizontal "line" (series of dashes, by default; see + the {cmd:hlinechar()} option) or {cmd:@M} inserts the number of models + in the table. {cmd:@M} could be used in a LaTeX table heading + as follows: + +{p 12 12 2} + {inp:. estout} {it:...}{inp:, prehead(\begin{tabular}{l*{@M}{r}})} + {p_end} +{marker hlinechar} +{p 4 8 2} + {cmd:hlinechar(}{it:string}{cmd:)} specifies the character(s) to be + used in {cmd:@hline}. The default is {cmd:hlinechar(-)}, resulting in a + dashed line. To produce a solid line, specify {cmd:hlinechar(`=char(151)')} + (Windows only; other systems may use other codes). + {p_end} +{marker varlabels} +{p 4 8 2} + {cmd:varlabels(}{it:matchlist}[{cmd:,} {it:suboptions}]{cmd:)} may be used to + relabel the regressors from the models, where {it:matchlist} is + +{p 12 12 2} + {it:name} {it:label} [{it:name} {it:label} ...] + +{p 8 8 2} + A {it:name} is a parameter name (e.g. {cmd:price}) or a full name + (e.g. {cmd:mean:price}) (abbreviation and wildcards + not allowed). For example, specify {cmd:varlabels(_cons Constant)} to replace + each occurrence of {cmd:_cons} with {cmd:Constant}. (Note that, in LaTeX, + the underscore character produces an error unless it is specified as + {cmd:\_}. Thus, names such as {cmd:_cons} should always be changed if + the estimates table is to be used with LaTeX. The {cmd:substitute()} may also be + helpful; see the {help estout##lay:Layout} options.) The {it:suboptions} are: + +{p 12 16 2} + {cmd:blist(}{it:matchlist}{cmd:)} to assign specific prefixes to + certain rows in the table body. Specify the {it:matchlist} as pairs of + regressors and prefixes, that is: + +{p 20 20 2} + {it:name} {it:prefix} [{it:name} {it:prefix} ...] + +{p 16 16 2} + A {it:name} is a parameter name (e.g. {cmd:price}), an equation name + followed by a colon (e.g. {cmd:mean:}), or a full name + (e.g. {cmd:mean:price}) (abbreviation and wildcards + not allowed). Note that equation names cannot be used if the + {cmd:unstack} option is specified. + +{p 12 16 2} + {cmd:elist(}{it:matchlist}{cmd:)} to assign specific suffixes to + certain rows in the table body (see the analogous {cmd:blist()} option + above). This option may, for example, be useful for separating + thematic blocks of variables by + adding vertical space at the end of each block. A LaTeX example: + +{p 20 20 2} + {inp:. estout} {it:...}{inp:, varlabels(,elist(price \addlinespace mpg \addlinespace))} + +{p 16 16 2} + (the macro {cmd:\addlinespace} is provided by the + {cmd:booktabs} package in LaTeX) + +{p 12 16 2} + {it:{help estout##lsub:label_subopts}}, which are + explained in their own section. + {p_end} +{marker labcol2} +{p 4 8 2} + {cmd:labcol2(}{it:strlist}[{cmd:,} {it:suboptions}]{cmd:)} adds a second column + containing additional labels for the coefficients and summary statistics. Labels + containing spaces should be embraced in double quotes ({bind:{cmd:"}{it:label 1}{cmd:"}} + {bind:{cmd:"}{it:label 2}{cmd:"}} etc.). An example would be to add a column + indicating the hypothesized directions of effects, e.g., + + {com}. estout {txt}{it:...}{com}, labcol2(+ - +/- + 0){txt} + +{p 8 8 2} + The {it:suboptions} are: + +{p 12 16 2} + {cmd:title(}{it:strlist}{cmd:)} to add text in the table header above + the column. Use double quotes to break the title into several + rows (given there are multiple header rows), i.e. specify {it:strlist} + as {bind:{cmd:"}{it:line 1}{cmd:"}} {bind:{cmd:"}{it:line 2}{cmd:"}} etc. + +{p 12 16 2} + {cmd:width(}{it:#}{cmd:)} to set the width, in number of characters, of the + column. The default is the value of {cmd:modelwidth()}. + {p_end} +{marker refcat} +{p 4 8 2} + {cmd:refcat(}{it:matchlist}[{cmd:,} {it:suboptions}]{cmd:)} may be used to + insert a row containing information on the reference category + of a categorical variable in the model. {it:matchlist} is + +{p 12 12 2} + {it:name} {it:refcat} [{it:name} {it:refcat} ...] + +{p 8 8 2} + A {it:name} is a parameter name (e.g. {cmd:_Irep78_2}) + (abbreviation and wildcards not allowed). For + example, assume that you include the categorical variable {cmd:rep78} + ("Repair Record 1978" from the auto dataset) in some of your models + using {cmd:xi} (see help {helpb xi}). Since {cmd:rep78} has five + levels, 1 through 5, {cmd:xi} will create 4 dummy variables, + {cmd:_Irep78_2} through {cmd:_Irep78_5}. You can now type + +{p 12 12 2} + {inp:. estout} {it:...}{inp:, refcat(_Irep78_2 _Irep78_1)} + +{p 8 8 2} + to add a table row containing "_Irep78_1" in the left stub and + "ref." in each column in which the {cmd:_Irep78_2} + dummy appears. The {it:suboptions} are: + +{p 12 16 2} + {cmd:label(}{it:string}{cmd:)} to specify the label that is printed + in the table columns. The default is {cmd:label(ref.)}. Type {cmd:nolabel} + to suppress the default label. + +{p 12 16 2} + {cmd:below} to position the reference category row below the specified + coefficient's row. The default is above. For example, if the 5th + category of {cmd:rep78} is used as reference category, i.e. if + {cmd:_Irep78_1} through {cmd:_Irep78_4} are included in the models, + you might want to type {cmd:refcat(_Irep78_4 _Irep78_5, below)}. + {p_end} +{marker mlabels} +{p 4 8 2} + {cmd:mlabels(}{it:strlist}[{cmd:,} {it:suboptions}]{cmd:)} determines the + model captions printed in the table heading. The default is to use the names of + the stored estimation sets (or their titles, if the {cmd:label} option is + specified and titles are available). The {it:suboptions} for use with + {cmd:mlabels} are: + +{p 12 16 2} + {cmd:depvars} to specify that the name (or label) of the (first) dependent + variable of the model be used as model label. + +{p 12 16 2} + {cmd:titles} to specify that, if available, the title of the stored + estimation set be used as the model label. Note that the {cmd:label} option + implies {cmd:titles} (unless {cmd:notitles} is specified). {cmd:depvars} + takes precedence over {cmd:titles}. + +{p 12 16 2} + {cmd:numbers} to cause the model labels to be numbered consecutively. + +{p 12 16 2} + {it:{help estout##lsub:label_subopts}}, which are explained in their own section. + {p_end} +{marker collabels} +{p 4 8 2} + {cmd:collabels(}{it:strlist}[{cmd:,} {it:{help estout##lsub:label_subopts}}]{cmd:)} + specifies labels for the columns within models or equations. The + default is to compose a label from the names or labels of the + statistics printed in the cells of that column. The {it:label_subopts} + are explained in their own section below. + {p_end} +{marker eqlabels} +{p 4 8 2} + {cmd:eqlabels(}{it:strlist}[{cmd:,} {it:suboptions}]{cmd:)} + labels the equations. The default is to use the equation names as + stored by the estimation command, or to use the variable labels if the + equation names correspond to individual variables and the {cmd:label} + option is specified. The {it:suboptions} for use with {cmd:eqlabels} + are: + +{p 12 16 2} + {cmd:merge} to merge equation labels and parameter labels instead of + printing equation labels in separate rows. Equation and parameter labels + will be separated by ":" unless another delimiter is specified via the + {cmd:suffix()} suboption (see {it:{help estout##lsub:label_subopts}}). + {cmd:merge} has no effect if {cmd:unstack} is specified. + +{p 12 16 2} + {it:{help estout##lsub:label_subopts}}, which are explained in their own + section. Note that {bind:{cmd:eqlabels(none)}} causes {cmd:_cons} to be + replaced with the equation name or label, if {cmd:_cons} is the only + parameter in an equation. This is useful, e.g., for tabulating + {cmd:ologit} or {cmd:oprobit} results in Stata 9. Specify + {bind:{cmd:eqlabels("", none)}} to not replace {cmd:_cons}. + {p_end} +{marker mgroups} +{p 4 8 2} + {cmd:mgroups(}{it:strlist}[{cmd:,} {it:suboptions}]{cmd:)} may be used to + labels groups of (consecutive) models at the top of the table heading. The + labels are placed in the first physical column of the output for the group of + models to which they apply. The {it:suboptions} for use with {cmd:mgroups} + are: + +{p 12 16 2} + {cmd:pattern(}{it:pattern}{cmd:)} to establish how the models are to be grouped. + {it:pattern} should be a list of zeros and ones, with ones indicating the + start of a new group of models. For example, + +{p 20 20 2} + {inp:. estout} {it:...}{inp:, mgroups("Group 1" "Group 2", pattern(1 0 0 1 0))} + +{p 16 16 2} + would group Models 1, 2, and 3 together and then groups Models 4 and 5 + together as well. Note that the first group will always start with the first + model regardless of whether the first token of {it:pattern} is a one or a + zero. + +{p 12 16 2} + {it:{help estout##lsub:label_subopts}}, which are explained + in their own section. In + particular, the {cmd:span} suboption might be of interest here. + {p_end} +{marker numbers} +{p 4 8 2} + {cmd:numbers}[{cmd:(}{it:l} {it:r}{cmd:)}] adds a row to the table header + displaying consecutive model numbers. The default is to + enclose the numbers in parentheses, i.e. {cmd:(1)}, {cmd:(2)}, etc. + Alternatively, specify {it:l} and {it:r} to change the tokens on the + left and right of each number. For example, {cmd:numbers("" ")")} + would result in {cmd:1)}, {cmd:2)}, etc. + +{marker out} +{dlgtab:Output} +{marker replace} +{p 4 8 2} + {cmd:replace} permits {cmd:estout} to overwrite an existing file. + {p_end} +{marker append} +{p 4 8 2} + {cmd:append} specifies that the output be appended to an existing file. It + may be used even if the file does not yet exist. + {p_end} +{marker type} +{p 4 8 2} + {cmd:type} specifies that the assembled estimates table be printed in the + results window and the log file. This is the default unless {cmd:using} is + specified. Use {cmd:notype} to suppress the display of the table. + {p_end} +{marker showtabs} +{p 4 8 2} + {cmd:showtabs} requests that tabs be displayed as {cmd:}s in both the + results window and the log file instead of in expanded form. This option does + not affect how tabs are written to the text file specified by {cmd:using}. + {p_end} +{marker topfile} +{p 4 8 2} + {cmd:topfile(}{it:filename}{cmd:)} and + {cmd:bottomfile(}{it:filename}{cmd:)} may be used to insert text before + and after the table, where the text is imported from a file on disk. Note that + {cmd:substitute()} does not apply to text inserted by {cmd:topfile()} or + {cmdab:bottomfile()}. + +{marker def} +{dlgtab:Defaults} +{marker style} +{p 4 8 2} + {cmd:style(}{it:style}{cmd:)} specifies a "style" for the output + table. {cmdab:def:aults:(}{it:style}{cmd:)} is a synonym for + {cmd:style(}{it:style}{cmd:)}. A "style" is a named combination of options + that is saved in an auxiliary file called {cmd:estout_}{it:style}{cmd:.def}. + In addition, there are five internal styles called {cmd:smcl} + (default for screen display), {cmd:tab} (export default), {cmd:fixed}, + {cmd:tex}, and {cmd:html}. The {cmd:smcl} style is suitable for displaying + the table in Stata's results window and is the default unless + {cmd:using} is specified. It includes {help smcl:SMCL} formatting tags and + horizontal lines to structure the table. The particulars of the other styles are: + + settings {col 38}styles + {col 26}{cmd:tab}{col 34}{cmd:fixed}{col 42}{cmd:tex}{col 50}{cmd:html} + {hline 47} + {cmd:begin} {col 50}{cmd:} + {cmd:delimiter} {col 26}{cmd:_tab}{col 34}{cmd:" "}{col 42}{cmd:&}{col 50}{cmd:} + {cmd:end} {col 42}{cmd:\\}{col 50}{cmd:} + {cmd:varwidth} {col 26}{cmd:0}{col 34}{cmd:12/20}*{col 42}{cmd:12/20}*{col 50}{cmd:12/20}* + {cmd:modelwidth}{col 26}{cmd:0}{col 34}{cmd:12}{col 42}{cmd:12}{col 50}{cmd:12} + {cmd:abbrev} {col 26}off{col 34}on{col 42}off{col 50}off + (* if {cmd:label} is on) + +{p 8 8 2} + {cmd:tab} is the default export style (i.e. if {cmd:using} is specified). + +{p 8 8 2} + Note that explicitly specified options take precedence + over settings provided by a style. For example, if you type + + {com}. estout, delimiter("") style(tab){txt} + +{p 8 8 2} + then the column delimiter will be set to empty string since the + {cmd:delimiter()} option overwrites the default from the {cmd:tab} + style. Similarly, specifying + {cmd:noabbrev} will turn abbreviation off if using the {cmd:fixed} + style. + +{p 8 8 2} + See {help estout##defaults:Defaults files} in the + {help estout##rem:Remarks} section to make available your own style. + +{marker lsub} +{it:{dlgtab:label_subopts}} + +{p 4 4 2} +The following suboptions may be used within the {cmd:mgroups()}, +{cmd:mlabels()}, {cmd:collabels()}, {cmd:eqlabels()}, +{cmd:varlabels()}, and {cmd:stats(, labels())} options: + +{p 4 8 2} + {cmd:none} suppresses the printing of the labels or drops the + part of the table heading to which it applies. Note that instead of + typing {bind:{it:option}{cmd:(, none)}} you may simply specify + {it:option}{cmd:(none)}. + +{p 4 8 2} + {cmd:prefix(}{it:string}{cmd:)} specifies a common prefix to be added to each + label. + +{p 4 8 2} + {cmd:suffix(}{it:string}{cmd:)} specifies a common suffix to be added to each + label. + +{p 4 8 2} + {cmd:begin(}{it:strlist}{cmd:)} specifies a prefix to be printed at the + beginning of the part of the table to which it applies. If {cmd:begin} is + specified in {cmd:varlabels()} or {cmd:stats(,labels())}, the prefix will + be repeated for each regressor or summary statistic. + +{p 4 8 2} + {cmd:first} specifies that the first occurrence of the {cmd:begin()}-prefix in + {cmd:varlabels()} or {cmd:stats(,labels())} be printed. This + is the default. Use {cmd:nofirst} to suppress the first occurrence of the + prefix. In {cmd:varlabels()}, {cmd:nofirst} applies equation-wise, i.e., the first + {cmd:begin()}-prefix in each equation is suppressed (unless {cmd:unstack} is + specified). + +{p 4 8 2} + {cmd:end(}{it:strlist}{cmd:)} specifies a suffix to be printed at the end of the + part of the table to which it applies. If {cmd:end} is specified in + {cmd:varlabels()} or {cmd:stats(,labels())}, the suffix will be repeated + for each regressor or summary statistic. + +{p 4 8 2} + {cmd:last} specifies that the last occurrence of the {cmd:end()}-suffix in + {cmd:varlabels()} or {cmd:stats(,labels())} be printed. This + is the default. Use {cmd:nolast} to suppress the last occurrence of the + suffix. In {cmd:varlabels()}, {cmd:nolast} applies equation-wise, i.e., the last + {cmd:end()}-suffix in each equation is suppressed (unless {cmd:unstack} is + specified). + +{p 4 8 2} + {cmd:replace} causes the label suboption {cmd:begin()}-prefix and {cmd:end()}-suffix + to be used instead of the global {cmd:begin()} and {cmd:end()} strings. The default + is to print both. {cmd:replace} also applies to {cmd:blist()} and {cmd:elist()} + if specified in {cmd:varlabels()}. + +{p 4 8 2} + {cmd:span} causes labels to span columns, i.e. extends the labels across + several columns, if appropriate. This suboption is relevant only for the + {cmd:mgroups()}, {cmd:mlabels()}, {cmd:eqlabels()}, and + {cmd:collabels()} options. The {cmd:@span} string returns the number of + spanned columns if it is included in the label, prefix, or suffix. A LaTeX example: + +{p 8 8 2} + {inp:. estout} {it:...}{inp:, mlabels(, span prefix(\multicolumn{@span}{c}{) suffix(}))} + +{p 4 8 2} + {cmd:erepeat(}{it:string}{cmd:)} specifies a string that is repeated for each + group of spanned columns at the very end of the row if the {cmd:span} + suboption is specified. This suboption is relevant only for the + {cmd:mgroups()}, {cmd:mlabels()}, {cmd:eqlabels()}, and + {cmd:collabels()} options. If the {cmd:@span} string is included in + {it:string} it will be replaced by the range of columns spanned. A LaTeX example: + +{p 8 8 2} + {inp:. estout} {it:...}{inp:, mlabels(, span erepeat(\cline{@span}))} + +{p 4 8 2} + {cmd:lhs(}{it:string}{cmd:)} inserts {it:string} into the otherwise empty cell + in the left stub of the row of the table heading to which it applies. This + suboption is relevant only for the {cmd:mgroups()}, {cmd:mlabels()}, + {cmd:eqlabels()}, and {cmd:collabels()} options. + +{marker msub} +{it:{dlgtab:matrix_subopts}} + +{p 4 4 2} +The following suboptions may be applied within the {cmd:matrix()}, +{cmd:e()}, or {cmd:r()} argument used to tabulate a matrix: + {p_end} +{marker mfmt} +{p 4 8 2} + {cmd:fmt(}{it:fmtlist}{cmd:)} sets the display formats for the matrix. + {it:fmtlist} contains a list of format specifications, one for each + column of the matrix. {it:fmtlist} is recycled if it supplies less + specifications than there are columns in the matrix. A format + specification may be a single {it:{help estout##fmt:fmt}} such as, + e.g., {cmd:%9.0g} or {cmd:a3} (see {help estout##fmt:Numerical formats} + in the {help estout##rem:Remarks} section for details) to be applied to + all cells in the column. Alternatively, a format specification may be + a list of {it:{help estout##fmt:fmt}}s, enclosed in double quotes, to be + used for the cells in the column one by one. The last format in the + list is used for the remaining cells if the number of cells in the + column is greater than the number of formats in the list. Also see the + {help estout##ex7:examples} below. + {p_end} +{marker mtranspose} +{p 4 8 2} + {cmd:transpose} causes the matrix to be transposed for tabulation. + +{marker exa} +{title:Examples} + + Contents + {help estout##intro:Introduction} + {help estout##ex1:Publication style table} + {help estout##ex2:t-statistics for selected variables only} + {help estout##ex3:Summary statistics only} + {help estout##ex4:Table of descriptives} + {help estout##ex5:Unstack multiple equations} + {help estout##ex7:Tabulating a matrix} + +{p 4 4 2} Please first read the {help estout##intro:Introduction}. The +other examples are more advanced and intended for users +already familiar with the basic features of +{cmd:estout}. Additional examples can be found in Jann (2005) and at +{browse "http://repec.sowi.unibe.ch/stata/estout/"}. + +{marker intro} +{dlgtab:Introduction} + +{p 4 4 2} +The full syntax of {cmd:estout} is rather complex and is to be found +above. However, consider the following basic syntax, which +includes only the most important options: + +{p 8 15 2} +{cmd:estout} [ {it:namelist} ] [ {cmd:using} {it:filename} ] [ {cmd:,} + {cmdab:c:ells:(}{it:array}{cmd:)} + {cmdab:s:tats:(}{it:scalarlist}{cmd:)} + {cmdab:sty:le:(}{it:style}{cmd:)} + {it:more_options} + ] + +{p 4 4 2} +where {it:namelist} is a list of the names of stored estimation sets (the name +list can be entered as {cmd:*} to refer to all stored estimates). The +{cmd:cells()} and {cmd:stats()} options determine the primary contents of +the table. The {cmd:style()} option determines the basic formatting of the +table. + +{p 4 4 2}{ul:Basic usage} + +{p 4 4 2} +The general procedure for using {cmd:estout} is to first store several +models using the {cmd:estimates store} or the {helpb eststo} command and then apply +{cmd:estout} to display or save a table of the estimates. By default, +{cmd:estout} displays a plain table of the coefficients of the models and +uses {help SMCL} tags and horizontal lines to structure the table: + + {com}. sysuse auto + {txt}(1978 Automobile Data) + + {com}. replace price = price / 1000 + {txt}price was {res}int{txt} now {res}float + {txt}(74 real changes made) + + {com}. replace weight = weight / 1000 + {txt}weight was {res}int{txt} now {res}float + {txt}(74 real changes made) + + {com}. quietly regress price weight mpg + {txt} + {com}. estimates store m1, title(Model 1) + {txt} + {com}. generate forXmpg = foreign * mpg + {txt} + {com}. quietly regress price weight mpg forXmpg foreign + {txt} + {com}. estimates store m2, title(Model 2) + {txt} + {com}. estout m1 m2 + {res} + {txt}{hline 38} + {txt} m1 m2 + {txt} b b + {txt}{hline 38} + {txt}weight {res} 1.746559 4.613589{txt} + {txt}mpg {res} -.0495122 .2631875{txt} + {txt}forXmpg {res} -.3072165{txt} + {txt}foreign {res} 11.24033{txt} + {txt}_cons {res} 1.946068 -14.44958{txt} + {txt}{hline 38} + +{p 4 4 2}Alternatively, if {cmd:using} is specified, {cmd:estout} writes a +raw tab-delimited table (without SMCL tags and without lines) to the +indicated file ({cmd:*} is used in the following example to indicate that +all stored models be tabulated): + + {com}. estout * using example.txt + {txt}(output written to {browse `"example.txt"'}) + + {com}. type example.txt + {res} m1 m2 + b b + weight 1.746559 4.613589 + mpg -.0495122 .2631875 + forXmpg -.3072165 + foreign 11.24033 + _cons 1.946068 -14.44958 + {txt} +{p 4 4 2} The table looks messy in the Stata results window or the Stata +log because the columns are tab-separated (note that tab characters are not +preserved in the results window or the log). However, the table would look +tidy if "example.txt" were opened, for example, in a spreadsheet program. + +{p 4 4 2}{ul:Choosing a style} + +{p 4 4 2}{cmd:estout} has a {cmd:style()} option to set the basic format of +the table. The default style for screen display is the {cmd:smcl} style. +The default export style (i.e. if {cmd:using} is specified) is the +{cmd:tab} style. (See the examples above.) Other predefined styles are +{cmd:fixed}, {cmd:tex}, and {cmd:html}, but it is also possible to define +one's own styles (see {help estout##defaults:Defaults files} in the +{help estout##rem:Remarks} section). The {cmd:tex} style, for example, modifies +the output table for use with LaTeX's tabular environment: + + {com}. estout *, style(tex) varlabels(_cons \_cons) + {res} + & m1& m2\\ + & b& b\\ + weight & 1.746559& 4.613589\\ + mpg & -.0495122& .2631875\\ + forXmpg & & -.3072165\\ + foreign & & 11.24033\\ + \_cons & 1.946068& -14.44958\\ + {txt} +{p 4 4 2} +Note that {cmd:_cons} has been replaced by its LaTeX equivalent in the example above +using the {cmd:varlabels()} option (the underscore character produces an +error in LaTeX unless it is preceded by a backslash). For more +information on the {cmd:varlabels()} option, see {cmd:estout}'s +{help estout##lab:Labeling} options. + +{p 4 4 2}{ul:The cells option} + +{p 4 4 2} +Use the {cmd:cells()} option to specify the parameter statistics to be +tabulated and how they are to be arranged. The parameter statistics +available are {cmd:b} (point estimates; the default), {cmd:se} (standard +errors), {cmd:t} (t-/z-statistics), {cmd:p} (p-values), {cmd:ci} +(confidence intervals; to display the lower and upper bounds in separate +cells use {cmd:ci_l} and {cmd:ci_u}), as well as any additional +parameter statistics included in the {cmd:e()}-returns for the models +(see {cmd:estout}'s {help estout##par:Parameter Statistics} options). For +example, {cmd:cells(b se)} results +in the reporting of point estimates and standard errors: + + {com}. estout *, cells(b se) + {res} + {txt}{hline 38} + {txt} m1 m2 + {txt} b/se b/se + {txt}{hline 38} + {txt}weight {res} 1.746559 4.613589{txt} + {res} .6413538 .7254961{txt} + {txt}mpg {res} -.0495122 .2631875{txt} + {res} .086156 .1107961{txt} + {txt}forXmpg {res} -.3072165{txt} + {res} .1085307{txt} + {txt}foreign {res} 11.24033{txt} + {res} 2.751681{txt} + {txt}_cons {res} 1.946068 -14.44958{txt} + {res} 3.59705 4.42572{txt} + {txt}{hline 38} + +{p 4 4 2} +Multiple statistics are placed in separate rows beneath one another by +default as in the example above. However, elements that are listed in +quotes or in parentheses are placed beside one another. For +example, specifying {bind:{cmd:cells("b se t p")}} or, equivalently, +{bind:{cmd:cells((b se t p))}} produces the following table: + + {com}. estout m2, cells("b se t p") + {res} + {txt}{hline 64} + {txt} m2 + {txt} b se t p + {txt}{hline 64} + {txt}weight {res} 4.613589 .7254961 6.359219 1.89e-08{txt} + {txt}mpg {res} .2631875 .1107961 2.375421 .0203122{txt} + {txt}forXmpg {res} -.3072165 .1085307 -2.830687 .0060799{txt} + {txt}foreign {res} 11.24033 2.751681 4.084896 .0001171{txt} + {txt}_cons {res} -14.44958 4.42572 -3.26491 .0017061{txt} + {txt}{hline 64} + +{p 4 4 2} +The two approaches can be combined. For example, {cmd:cells("b p" se)} +would produce a table with point estimates and standard errors beneath one +another in the first column and p-values in the top row of the second +column for each model. + +{p 4 4 2} +Note that for each statistic named in the {cmd:cells()} option a set of +suboptions may be specified in parentheses. For example, in social sciences +it is common to report standard errors or t-statistics in parentheses beneath +the coefficients and to indicate the significance of individual +coefficients with stars. Furthermore, the results are rounded. Just such a +table can be created using the following procedure: + + {com}. estout *, cells(b(star fmt(3)) t(par fmt(2))) + {res} + {txt}{hline 44} + {txt} m1 m2 + {txt} b/t b/t + {txt}{hline 44} + {txt}weight {res} 1.747** 4.614***{txt} + {res} (2.72) (6.36) {txt} + {txt}mpg {res} -0.050 0.263* {txt} + {res} (-0.57) (2.38) {txt} + {txt}forXmpg {res} -0.307** {txt} + {res} (-2.83) {txt} + {txt}foreign {res} 11.240***{txt} + {res} (4.08) {txt} + {txt}_cons {res} 1.946 -14.450** {txt} + {res} (0.54) (-3.26) {txt} + {txt}{hline 44} + +{p 4 4 2} +The {cmd:estout} default is to display {cmd:*} for p<.05, +{cmd:**} for p<.01, and {cmd:***} for p<.001. However, note that +the significance thresholds and symbols are fully customizable (see {cmd:estout}'s +{help estout##sig:Significance stars} options). + +{p 4 4 2}{ul:The stats option} + +{p 4 4 2} +Finally, use the {cmd:stats()} option to specify scalar +statistics to be displayed for each model in the table footer. The +available scalar statistics are {cmd:aic} (Akaike's information criterion), +{cmd:bic} (Schwarz's information criterion), {cmd:rank} (the rank of +{cmd:e(V)}, i.e. the number of free parameters in model), {cmd:p} (the +p-value of the model), as well as any numeric or string scalars contained in the +{cmd:e()}-returns for the models (see +{cmd:estout}'s +{help estout##sum:Summary statistics} options). For example, specify +{cmd:stats(r2 bic N)} to add the R-squared, BIC, and the number of cases: + + {com}. estout *, stats(r2 bic N) + {res} + {txt}{hline 38} + {txt} m1 m2 + {txt} b b + {txt}{hline 38} + {txt}weight {res} 1.746559 4.613589{txt} + {txt}mpg {res} -.0495122 .2631875{txt} + {txt}forXmpg {res} -.3072165{txt} + {txt}foreign {res} 11.24033{txt} + {txt}_cons {res} 1.946068 -14.44958{txt} + {txt}{hline 38} + {txt}r2 {res} .2933891 .5516277{txt} + {txt}bic {res} 356.2918 331.2406{txt} + {txt}N {res} 74 74{txt} + {txt}{hline 38} +{marker ex1} +{dlgtab:Publication style table} + + {com}. label variable foreign "Foreign car type" + {txt} + {com}. label variable forXmpg "Foreign*Mileage" + {txt} + {com}. estout *, cells(b(star fmt(%9.3f)) se(par)) /// + > stats(r2_a N, fmt(%9.3f %9.0g) labels(R-squared)) /// + > legend label collabels(none) varlabels(_cons Constant) + {res} + {txt}{hline 52} + {txt} Model 1 Model 2 + {txt}{hline 52} + {txt}Weight (lbs.) {res} 1.747** 4.614***{txt} + {res} (0.641) (0.725) {txt} + {txt}Mileage (mpg) {res} -0.050 0.263* {txt} + {res} (0.086) (0.111) {txt} + {txt}Foreign*Mileage {res} -0.307** {txt} + {res} (0.109) {txt} + {txt}Foreign car type {res} 11.240***{txt} + {res} (2.752) {txt} + {txt}Constant {res} 1.946 -14.450** {txt} + {res} (3.597) (4.426) {txt} + {txt}{hline 52} + {txt}R-squared {res} 0.273 0.526 {txt} + {txt}N {res} 74 74 {txt} + {txt}{hline 52} + {txt}* p<0.05, ** p<0.01, *** p<0.001 +{marker ex2} +{dlgtab:t-statistics for selected variables only} + + {com}. estout *, cells(b(star) t(par keep(mpg))) + {res} + {txt}{hline 44} + {txt} m1 m2 + {txt} b/t b/t + {txt}{hline 44} + {txt}weight {res} 1.746559** 4.613589***{txt} + {txt}mpg {res} -.0495122 .2631875* {txt} + {res} (-.5746806) (2.375421) {txt} + {txt}forXmpg {res} -.3072165** {txt} + {txt}foreign {res} 11.24033***{txt} + {txt}_cons {res} 1.946068 -14.44958** {txt} + {txt}{hline 44} +{marker ex3} +{dlgtab:Summary statistics only} + + {com}. estout *, cells(none) stats(r2_a bic N, star) + {res} + {txt}{hline 44} + {txt} m1 m2 + {txt}{hline 44} + {txt}r2_a {res} .2734846*** .5256351***{txt} + {txt}bic {res} 356.2918 331.2406 {txt} + {txt}N {res} 74 74 {txt} + {txt}{hline 44} +{marker ex4} +{dlgtab:Table of descriptives} + + {com}. quietly generate x = uniform() + {txt} + {com}. quietly regress x price weight mpg foreign + {txt} + {com}. estadd mean + + {txt}added matrix: + e(mean) : {res}1 x 5 + {txt} + {com}. estadd sd, nobinary + + {txt}added matrix: + e(sd) : {res}1 x 5 + {txt} + {com}. estout, cells("mean sd") stats(N) mlabels(,none) drop(_cons) + {res} + {txt}{hline 38} + {txt} mean sd + {txt}{hline 38} + {txt}price {res} 6.165257 2.949496{txt} + {txt}weight {res} 3.019459 .7771936{txt} + {txt}mpg {res} 21.2973 5.785503{txt} + {txt}foreign {res} .2972973 {txt} + {txt}{hline 38} + {txt}N {res} 74 {txt} + {txt}{hline 38} +{marker ex5} +{dlgtab:Unstack multiple equations} + + {com}. quietly sureg (price foreign weight length) /// + > (mpg displ = foreign weight) + {txt} + {com}. estout, cells(b t(par)) stats(r2 chi2 p) unstack + {res} + {txt}{hline 51} + {txt} price mpg displacement + {txt} b/t b/t b/t + {txt}{hline 51} + {txt}foreign {res} 3.57526 -1.650029 -25.6127{txt} + {res} (5.749891) (-1.565555) (-2.047999){txt} + {txt}weight {res} 5.691462 -6.587886 96.75485{txt} + {res} (6.182983) (-10.55641) (13.06594){txt} + {txt}length {res} -.0882711 {txt} + {res} (-2.809689) {txt} + {txt}_cons {res} 4.506212 41.6797 -87.23547{txt} + {res} (1.255897) (19.64914) (-3.46585){txt} + {txt}{hline 51} + {txt}r2 {res} .548808 .6627029 .8115213{txt} + {txt}chi2 {res} 89.73586 145.3912 318.6174{txt} + {txt}p {res} 2.50e-19 2.68e-32 6.50e-70{txt} + {txt}{hline 51} +{marker ex7} +{dlgtab:Tabulating a matrix} + +{p 4 4 2} + Use {cmd:estout matrix(}{it:matname}{cmd:)} to tabulate Stata matrix + {it:matname}. Example: + + {com}. set seed 123 + {txt} + {com}. matrix A = matuniform(3,2) + {txt} + {com}. matrix list A + + {txt}A[3,2] + c1 c2 + r1 {res}.91204397 .0075452 + {txt}r2 {res}.28085881 .46027868 + {txt}r3 {res}.56010592 .67319061 + {txt} + {com}. estout matrix(A) + {res} + {txt}{hline 38} + {txt} A + {txt} c1 c2 + {txt}{hline 38} + {txt}r1 {res} .912044 .0075452{txt} + {txt}r2 {res} .2808588 .4602787{txt} + {txt}r3 {res} .5601059 .6731906{txt} + {txt}{hline 38} + +{p 4 4 2} + Numeric formats for the columns can be set using the {cmd:fmt()} + suboption: + + {com}. estout matrix(A, fmt(2 3)) + {res} + {txt}{hline 38} + {txt} A + {txt} c1 c2 + {txt}{hline 38} + {txt}r1 {res} 0.91 0.008{txt} + {txt}r2 {res} 0.28 0.460{txt} + {txt}r3 {res} 0.56 0.673{txt} + {txt}{hline 38} + +{p 4 4 2} + A list of formats can be specified for each column: + + {com}. estout matrix(A, fmt("2 3 4" "4 3 2")) + {res} + {txt}{hline 38} + {txt} A + {txt} c1 c2 + {txt}{hline 38} + {txt}r1 {res} 0.91 0.0075{txt} + {txt}r2 {res} 0.281 0.460{txt} + {txt}r3 {res} 0.5601 0.67{txt} + {txt}{hline 38} +{marker rem} +{title:Remarks} + + Contents + + {help estout##fmt:Numerical formats} + {help estout##spchar:Special characters} + {help estout##atvar:Using @-variables} + {help estout##defaults:Defaults files} +{marker fmt} +{dlgtab:Numerical formats} + +{p 4 4 2} +Numerical display formats may be specified in {cmd:estout} +as follows: + +{p 5 8 2} +1. Official Stata's display formats: You may specify formats, such as +{cmd:%9.0g} or {cmd:%8.2f}. See help {help format} for a list +of available formats. {cmd:%g} or {cmd:g} may be used as a +synonym for {cmd:%9.0g}. + +{p 5 8 2} +2. Fixed format: You may specify an integer value such as {cmd:0}, +{cmd:1}, {cmd:2}, etc. to request a display format with a fixed number +of decimal places. For example, {cmd:cells(t(fmt(3)))} would display +t-statistics with three decimal places. + +{p 5 8 2} +3. Automatic format: You may specify {cmd:a1}, {cmd:a2}, ..., or +{cmd:a9} to cause {cmd:esttab} to choose a reasonable display format for +each number depending on the number's value. {cmd:a} may be used as a +synonym for {cmd:a3}. The {it:#} in +{cmd:a}{it:#} determines the minimum precision according to the +following rules: + +{p 10 12 2} +o Absolute numbers smaller than 1 are displayed with {it:#} +significant decimal places (i.e. with {it:#} decimal places ignoring +any leading zeros after the decimal point). For example, +{cmd:0.00123456} is displayed as {cmd:0.00123} if the format is +{cmd:a3}. + +{p 10 12 2} +o Absolute numbers greater than 1 are displayed with as many digits +required to retain at least one decimal place and are displayed with +a minimum of ({it:#} + 1) digits. For example, if the format is +{cmd:a3}, {cmd:1.23456} is displayed as {cmd:1.235}, {cmd:12.3456} is +displayed as {cmd:12.35}, and {cmd:1234.56} is displayed as +{cmd:1234.6}. + +{p 10 12 2} +o In any case, integers are displayed with zero decimal places, and +very large or very small absolute numbers are displayed in +exponential format. + +{marker spchar} +{dlgtab:Special characters} + +{p 4 4 2} +The {cmd:\} and {cmd:$} characters and quotation marks have +special meanings in Stata. You should therefore consider the following +instructions if you, for example, intend to specify akward delimiters or +specify special characters in labels: + +{p 6 8 2}- Strings containing unmatched quotes should be enclosed in compound double +quotes (thus, {cmd:delimiter(`"""')} results in columns +delimited by {cmd:"}, while {cmd:delimiter(")} produces an error). + +{p 6 8 2}- The backslash character is used to delay macro expansion in +Stata. Specifying {cmd:\\} in Stata 8 just results in the printing of {cmd:\}. To get +a double backslash in Stata 8 (the {cmd:\newline} command in TeX), type {cmd:\\\}. + +{p 6 8 2}- The dollar sign is used for global macro expansion in Stata. Thus, +{cmd:$x} would result in the display of the contents of global macro +{cmd:x} (or nothing, if the macro is empty). Therefore, use +{cmd:\$} to produce {cmd:$} in the output. For math mode in LaTeX I +recommend using {cmd:\(}...{cmd:\)} instead of {cmd:$}...{cmd:$}. + +{p 4 4 2} +Stata's {cmd:char()} function may also be used to specify odd characters +(see help {help strfun}). In particular, {cmd:"`=char(9)'"} +results in a tab character and {cmd:"`=char(13)'"} results +in a carriage return. For example, {bind:{cmd:delimiter(" `=char(9)' ")}} +specifies that a tab character with a leading and +a trailing blank be used as delimiter. + +{p 4 4 2} {it:Tip:} It is sometimes very useful to set the format of all cells in a +spreadsheet to "Text" before pasting the estimates table. This prevents the +spreadsheet program from trying to interpret the cells and ensures that the contents +of the table remain unchanged. + +{marker atvar} +{dlgtab:Using @-variables} + +{p 4 4 2} +{cmd:estout} features several variables that can be used within string +specifications. The following list provides an overview of these variables. + +{p 5 8 2}o{space 2}In {cmd:prehead()}, {cmd:posthead()}, {cmd:prefoot()}, + and {cmd:postfoot()}, in the {cmd:begin()} and {cmd:end()} label + suboptions, and in the {cmd:blist()} and {cmd:elist()} suboptions + in {cmd:varlabels()}: + +{p 12 16 2}{cmd:@span} to return the value of a count variable for the total number of physical + columns of the table. + +{p 12 16 2}{cmd:@M} to return the number of models in the table. + +{p 12 16 2}{cmd:@E} to return the total number columns containing separate equations. + +{p 12 16 2}{cmd:@width} to return the total width of the table (number of characters). + +{p 12 16 2}{cmd:@hline} to return a horizontal line (series of dashes, by default; + see the {cmd:hlinechar()} option). + +{p 5 8 2}o{space 2}In {cmd:prehead()}, {cmd:posthead()}, {cmd:prefoot()}, + and {cmd:postfoot()}: + +{p 12 16 2}{cmd:@title} to return the title specified with the {cmd:title()} option. + +{p 12 16 2}{cmd:@note} to return the note specified with the {cmd:note()} option. + +{p 12 16 2}{cmd:@discrete} to return the explanations provided by the + {cmd:discrete()} option (provided that the {cmd:margin} option is activated). + +{p 12 16 2}{cmd:@starlegend} to return a legend explaining the significance symbols. + +{p 5 8 2}o{space 2}In the {cmd:prefix()} and {cmd:suffix()} suboptions of {cmd:mgroups()}, + {cmd:mlabels()}, {cmd:eqlabels()}, and + {cmd:collabels()}, and in the labels specified in these options: + +{p 12 16 2}{cmd:@span} to return the number of spanned columns. + +{p 5 8 2}o{space 2}In the {cmd:erepeat()} suboption of + {cmd:mgroups()}, {cmd:mlabels()}, {cmd:eqlabels()}, and + {cmd:collabels()}: + +{p 12 16 2}{cmd:@span} to return the range of spanned columns (e.g. {cmd:2-4} if columns 2, 3 and 4 + are spanned). + +{marker defaults} +{dlgtab:Defaults files} + +{p 4 4 2}{ul:Creating new defaults files:} + +{p 4 4 2} +To make available an own set +of default options, proceed as follows: + +{p 8 11 2} +1. Download "estout_mystyle.def" from the SSC +Archive (click +{stata "copy http://fmwww.bc.edu/repec/bocode/e/estout_mystyle.def estout_mystyle.def, text":here} +to copy the file from SSC and store it in the working directory). + +{p 8 11 2} +2. Open "estout_mystyle.def" in a text editor and make the desired modifications +(click {stata "doedit estout_mystyle.def":here} to open "estout_mystyle.def" in Stata's Do-File +Editor). + +{p 8 11 2} +3. Save the file in the current directory or elsewhere +in the ado-file path as {cmd:estout_}{it:newstyle}{cmd:.def} (see help {help sysdir}). + +{p 4 4 2}To use the new options set in {cmd:estout}, then type: + + {inp:. estout} {it:...} {inp:, style(}{it:newstyle}{inp:)} + + +{p 4 4 2}{ul:Defaults files syntax:} + +{p 4 4 2} +{cmd:estout} has two main types of options, which are treated differentially +in defaults files. On the one hand, there are simple on/off options without +arguments, like {cmd:legend} or {cmd:showtabs}. To turn such an option on, +enter the option followed by the options name as an argument, i.e. add the line + + {it:option} {it:option} + +{p 4 4 2} +to the defaults file. For example, + + {inp:legend legend} + +{p 4 4 2} +specifies that a legend be printed in the table footer. Otherwise, if you want +to turn the option of, just delete or comment out the line that contains it (or +specify {it:option} without an argument). + +{p 4 4 2} +To temporarily turn off an option that has been activated in a defaults file, +specify {cmd:no}{it:option} in the command line (do not, however, use +{cmd:no}{it:option} in defaults files). For example, if the legend has been +turned on in the defaults file, but you want to suppress it in a specific call of +{cmd:estout}, type + + {inp:. estout} {it:...}{inp:, nolegend} + +{p 4 4 2} +On the other hand, there are options that take arguments, such as +{cmd:prehead(}{it:args}{cmd:)}, {cmd:delimiter(}{it:args}{cmd:)}, or +{cmd:stats(}{it:args}{cmd:,} {it:...}{cmd:)}. Such options are specified as + + {it:option} {it:args} + +{p 4 4 2} +in the defaults file (where {it:args} must not include suboptions; see +below). Specifying an option in the command line overwrites the settings from +the defaults file. However, note that a {cmd:no} form, which exists for the +first options type, is not available here. + +{p 4 4 2} +Last but not least, there are two options that reflect a combination of the first +and second types: {cmd:eform}[{cmd:(}{it:args}{cmd:)}] and +{cmd:margin}[{cmd:(}{it:args}{cmd:)}]. These options can be specified +as either + + {it:option} {it:option} + +{p 4 4 2} +or + + {it:option} {it:args} + +{p 4 4 2} +in the defaults file; the {cmd:no} form is allowed. + +{p 4 4 2} +Many {cmd:estout} options have suboptions, i.e., an option might take the +form {it:option}{cmd:(}{it:...}{cmd:,} {it:suboption}{cmd:)} or +{it:option}{cmd:(}{it:...}{cmd:,} {it:suboption}{cmd:(}{it:args}{cmd:))}. In +the defaults file, the suboptions cannot be included in the +definition of a higher-level option. Instead, they must be +specified in their own lines, as either + + {it:optionsuboption} {it:suboption} + +{p 4 4 2} +or + + {it:optionsuboption} {it:args} + +{p 4 4 2} +In the case of a two-level nesting of options, the name +used to refer to the suboption is a concatenation of the option's name and the +suboption's name, +i.e. {cmd:"}{it:optionsuboption}{cmd:"="}{it:option}{cmd:"+"}{it:suboption}{cmd:"}. For +example, the {cmd:labels()} suboption of the {cmd:stats()} option would be +set by the term {cmd:statslabels}. Analogously, the three level nesting in +the {cmd:stats()} option yields suboption names composed of three names. For +instance, the suboption called by the command + + {inp:. estout} {it:...}{inp:, stats(}{it:...}{inp:, labels(}{it:...}{inp:, prefix(}{it:args}{inp:)))} + +{p 4 4 2} +would be referred to as + + {inp:statslabelsprefix} {it:args} + +{p 4 4 2} +in the defaults file. The {cmd:cells()} option represents an exception to +this rule. It may be defined in the defaults file using +only a plain array of cells elements without suboptions, e.g. + + {inp:cells "b se" p} + +{p 4 4 2} +However, the suboptions of the cells elements may be referred to as +{it:el_suboption}, for example + + {inp:b_star star} + +{p 4 4 2} +or + + {inp:se_par [ ]} + + +{p 4 4 2}{ul:Comments in defaults files:} + +{p 4 4 2} +Be aware that the support for comments in defaults files is limited. In +particular, the {cmd:/*} and {cmd:*/} comment indicators cannot be used. +The other comment indicators work (more or less) as usual, that is: + +{p 5 8 2} + o{space 2}Empty lines and lines beginning with {cmd:*} (with or without preceding +blanks) will be ignored. + +{p 5 8 2} + o{space 2}{cmd://} preceded by one or more blanks indicates that the rest of the +line should be ignored. Lines beginning with {cmd://} (with or without preceding +blanks) will be ignored. + +{p 5 8 2} + o{space 2}{cmd:///} preceded by one or more blanks indicates that the rest of the +line should be ignored and the part of the line preceding it should be added to +the next line. In other words, {cmd:///} can be used to split commands into +two or more lines of code. + +{marker ret} +{title:Saved results} + +{p 4 4 2} +{cmd:estout} saves the following in {cmd:r()}: + +{p 4 4 2}Scalars + {p_end} +{p 6 20 2}{cmd:r(nmodels)}{space 4}number of models + {p_end} +{p 6 20 2}{cmd:r(ccols)}{space 6}number of columns per model in {cmd:r(coefs)} + {p_end} + +{p 4 4 2}Macros + {p_end} +{p 6 20 2}{cmd:r(cmdline)}{space 4}command as typed + {p_end} +{p 6 20 2}{cmd:r(names)}{space 6}names of models + {p_end} +{p 6 20 2}{cmd:r(m}{it:#}{cmd:_}{it:name}{cmd:)}{space 4}model-specific +macros where {it:#} is the model number and {it:name} is macro name + {p_end} + +{p 4 4 2}Matrices + {p_end} +{p 6 20 2}{cmd:r(coefs)}{space 6}coefficients + {p_end} +{p 6 20 2}{cmd:r(stats)}{space 6}summary statistics + {p_end} + +{marker ref} +{title:References} + +{p 4 8 2}Cong, R. (2000). sg144: Marginal effects of the tobit model. +{it:Stata Technical Bulletin} 56: 27-34. + +{p 4 8 2}Jann, B. (2005). Making regression tables from stored estimates. +{it:The Stata Journal} 5(3): 288-308. + +{p 4 8 2}Jann, B. (2007). Making regression tables simplified. +{it:The Stata Journal} 7(2): 227-244. + +{p 4 8 2}Newson, R. (2003). Confidence intervals and p-values for delivery to the end +user. {it:The Stata Journal} 3(3): 245-269. + +{marker ack} +{title:Acknowledgements} + +{p 4 4 2}I would like to thank numerous people +for their comments and suggestions. Among them +are +Joao Pedro Azevedo, +Kit Baum, +Elisabeth Coutts, +Henriette Engelhardt, +Jonathan Gardnerand, +Simone Hirschvogl, +Daniel Hoechle, +Friedrich Huebler, +Maren Kandulla, +J. Scott Long, +David Newhouse, +Clive Nicholas, +Fredrik Wallenberg, +Ian Watson, and +Vince Wiggins. + +{marker aut} +{title:Author} + + Ben Jann, Institute of Sociology, University of Bern, jann@soz.unibe.ch + +{marker als} +{title:Also see} + + Manual: {hi:[R] estimates} + + SJ: SJ5-3 st0085 (Jann 2005) + SJ7-2 st0085_1 (Jann 2007) + +{p 4 13 2}Online: help for + {helpb estimates}, + {help estcom}, + {helpb est_table:estimates table}, + {helpb ereturn}, + {help format}, + {helpb file}, + {helpb mfx}, + {helpb eststo}, + {helpb esttab}, + {helpb estadd}, + {helpb estpost} +{p_end} diff --git a/110/replication_package/replication/ado/plus/e/estpost.ado b/110/replication_package/replication/ado/plus/e/estpost.ado new file mode 100644 index 0000000000000000000000000000000000000000..1c5aa6fd19a66b64151883180559a42677cedb66 --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estpost.ado @@ -0,0 +1,2124 @@ +*! version 1.2.2 10feb2023 Ben Jann +* 1. estpost +* 2. estpost_summarize +* 3. estpost_tabulate +* 4. estpost_tabstat +* 5. estpost_ttest +* 6. estpost_correlate +* 7. estpost_stci (Stata 9 required) +* 8. estpost_ci +* 9. estpost_prtest +* 10. estpost__svy_tabulate +* 12. estpost_gtabstat +* 99. _erepost + +* 1. estpost +program estpost, rclass // rclass => remove r()'s left behind by subcommand + version 8.2 + local caller : di _caller() + capt syntax [, * ] + if _rc==0 { // => for bootstrap + _coef_table_header + ereturn display, `options' + exit + } + gettoken subcommand rest : 0, parse(" ,:") + capt confirm name `subcommand' + if _rc { + di as err "invalid subcommand" + exit 198 + } + + local l = length(`"`subcommand'"') + if `"`subcommand'"'==substr("summarize",1,max(2,`l')) local subcommand "summarize" + else if `"`subcommand'"'==substr("tabulate",1,max(2,`l')) local subcommand "tabulate" + else if `"`subcommand'"'==substr("correlate",1,max(3,`l')) local subcommand "correlate" + else if `"`subcommand'"'=="svy" { + _estpost_parse_svy `macval(rest)' + } + else if substr(`"`subcommand'"',1,5)=="_svy_" { + di as err "invalid subcommand" + exit 198 + } + + capt local junk: properties estpost_`subcommand' // does not work in Stata 8 + if _rc==199 { + di as err "invalid subcommand" + exit 198 + } + + version `caller': estpost_`subcommand' `macval(rest)' + //eret list +end +program _estpost_markout2 // marks out obs that are missing on *all* variables + gettoken touse varlist: 0 + if `:list sizeof varlist'>0 { + tempname touse2 + gen byte `touse2' = 0 + foreach var of local varlist { + qui replace `touse2' = 1 if !missing(`var') + } + qui replace `touse' = 0 if `touse2'==0 + } +end +program _estpost_parse_svy + version 9.2 + _on_colon_parse `0' + local 0 `"`s(after)'"' + gettoken subcommand rest : 0, parse(" ,") + local l = length(`"`subcommand'"') + if `"`subcommand'"'==substr("tabulate",1,max(2,`l')) local subcommand "tabulate" + c_local subcommand `"_svy_`subcommand'"' + c_local rest `"`s(before)' : `rest'"' +end +program _estpost_namesandlabels // used by some routines such as estpost_tabulate + version 8.2 // returns locals names, savenames, and labels + args varname values0 labels0 elabel + if `"`values0'"'=="" { // generate values: 1 2 3 ... + local i 0 + foreach label of local labels0 { + local values0 `values0' `++i' + } + } + local haslabels = "`elabel'"!="" + if `"`labels0'"'=="" & "`varname'"!="" { + local vallab: value label `varname' + } + while (1) { + gettoken value values0 : values0 + if "`value'"=="" continue, break //=> exit loop + if `"`vallab'"'!="" { + local lbl: label `vallab' `value', strict + } + else { + gettoken lbl labels0 : labels0 + } + if index("`value'",".") { + local haslabels 1 + if `"`macval(lbl)'"'=="" { + local lbl "`value'" + } + local value: subinstr local value "." "_missing_" + } + local names0 `names0' `value' + if `"`macval(lbl)'"'!="" { + local labels `"`macval(labels)'`lblspace'`value' `"`macval(lbl)'"'"' + local lblspace " " + } + if `haslabels' continue + if `"`macval(lbl)'"'=="" { + local names `"`names'`space'`value'"' + local savenames `"`savenames'`space'`value'"' + } + else { + if regexm(`"`macval(lbl)'"', `"[:."]"') local haslabels 1 + else if length(`"`macval(lbl)'"')>30 local haslabels 1 + else { + local names `"`names'`space'`"`lbl'"'"' + local lbl: subinstr local lbl " " "_", all + local savenames `"`savenames'`space'`lbl'"' + } + } + local space " " + } + if `haslabels' { + local names `names0' + local savenames `names0' + } + c_local names `"`names'"' // to be used as matrix row- or colnames + c_local savenames `"`savenames'"' // names without spaces (for matlist) + if `haslabels' { + c_local labels `"`macval(labels)'"' // label dictionary + } + else c_local labels "" +end +program _estpost_eqnamesandlabels // used by some routines such as estpost_tabulate + version 8.2 // returns locals eqnames and eqlabels + args varname values0 labels0 elabel + if `"`values0'"'=="" { // generate values: 1 2 3 ... + local i 0 + foreach label of local labels0 { + local values0 `values0' `++i' + } + } + local haslabels = "`elabel'"!="" + if `"`labels0'"'=="" & "`varname'"!="" { + local vallab: value label `varname' + } + while (1) { + gettoken value values0 : values0 + if "`value'"=="" continue, break //=> exit loop + if `"`vallab'"'!="" { + local lbl: label `vallab' `value', strict + } + else { + gettoken lbl labels0 : labels0 + } + if index("`value'",".") { + local haslabels 1 + if `"`macval(lbl)'"'=="" { + local lbl "`value'" + } + local value: subinstr local value "." "_missing_" + } + local names0 `names0' `value' + if `"`macval(lbl)'"'=="" local lbl "`value'" + local labels `"`macval(labels)'`lblspace'`"`macval(lbl)'"'"' + local lblspace " " + if `haslabels' continue + if `"`macval(lbl)'"'=="" { + local names `"`names'`space'`value'"' + } + else { + if regexm(`"`macval(lbl)'"', `"[:."]"') local haslabels 1 + else if length(`"`macval(lbl)'"')>30 local haslabels 1 + else { + local names `"`names'`space'`"`lbl'"'"' + } + } + local space " " + } + if `haslabels' { + local names `names0' + } + c_local eqnames `"`names'"' // to be used as matrix roweqs or coleqs + if `haslabels' { + c_local eqlabels `"`macval(labels)'"' // list of labels + } + else c_local eqlabels "" +end + +* 2. estpost_summarize: wrapper for -summarize- +prog estpost_summarize, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax [varlist] [if] [in] [aw fw iw] [, ESample Quietly /// + LISTwise CASEwise Detail MEANonly ] + if "`casewise'"!="" local listwise listwise + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // gather results + local nvars: list sizeof varlist + tempname emptymat + mat `emptymat' = J(1, `nvars', .) + mat coln `emptymat' = `varlist' + local i 0 + local rnames "" + foreach v of local varlist { + local ++i + qui summarize `v' if `touse' [`weight'`exp'], `detail' `meanonly' + local rnamesi: r(scalars) + local rnamesi: list rnamesi - rnames + if `"`rnamesi'"'!="" { + foreach name of local rnamesi { + tempname _`name' + mat `_`name'' = `emptymat' + } + local rnames: list rnames | rnamesi + } + foreach rname of local rnames { + mat `_`rname''[1,`i'] = r(`rname') + } + } + + // display + if "`quietly'"=="" { + tempname res + local rescoln + foreach rname of local rnames { + mat `res' = nullmat(`res'), `_`rname''' + if "`rname'"=="N" { + local rescoln `rescoln' e(count) + } + else { + local rescoln `rescoln' e(`rname') + } + } + mat coln `res' = `rescoln' + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + matlist `res', nohalf lines(oneline) + } + mat drop `res' + } + + // post results + local b + local V + if c(stata_version)<9 { // b and V required in Stata 8 + tempname b V + mat `b' = J(1, `nvars', 0) + mat coln `b' = `varlist' + mat `V' = `b'' * `b' + } + if "`esample'"!="" local esample esample(`touse') + eret post `b' `V', obs(`N') `esample' + + eret scalar k = `nvars' + + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local subcmd "summarize" + eret local cmd "estpost" + + local nmat: list sizeof rnames + forv i=`nmat'(-1)1 { + local rname: word `i' of `rnames' + if "`rname'"=="N" { + eret matrix count = `_N' + continue + } + eret matrix `rname' = `_`rname'' + } +end + + +* 2. estpost_tabulate: wrapper for -tabulate- +prog estpost_tabulate, eclass + version 8.2 + local caller : di _caller() // not used + syntax varlist(min=1 max=2) [if] [in] [fw aw iw pw] [, * ] + if `:list sizeof varlist'==1 { + version `caller': estpost_tabulate_oneway `0' + } + else { + version `caller': estpost_tabulate_twoway `0' + } +end +prog estpost_tabulate_oneway, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax varname [if] [in] [fw aw iw] [, ESample Quietly /// + noTOTal subpop(passthru) Missing sort noLabel ELabels ] + + // sample + if "`missing'"!="" marksample touse, nov strok + else marksample touse, strok + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // handle string variables + capt confirm numeric variable `varlist' + if _rc { + tempname varname + qui encode `varlist' if `touse', generate(`varname') + } + else local varname `varlist' + + // gather results + tempname count vals + tab `varname' if `touse' [`weight'`exp'], nofreq /// + matcell(`count') matrow(`vals') `subpop' `missing' `sort' + local N = r(N) + mat `count' = `count'' + local R = r(r) + forv r = 1/`R' { + local value: di `vals'[`r',1] + local values `values' `value' + } + if "`label'"=="" { + _estpost_namesandlabels `varname' "`values'" "" "`elabels'" // sets names, savenames, labels + } + else { + _estpost_namesandlabels "" "`values'" "" "`elabels'" + } + if "`total'"=="" { + mat `count' = `count', `N' + local names `"`names' Total"' + local savenames `"`savenames' Total"' + local linesopt "lines(rowtotal)" + } + mat colname `count' = `names' + tempname percent cum + mat `percent' = `count'/`N'*100 + mat `cum' = J(1, colsof(`count'), .z) + mat colname `cum' = `names' + mat `cum'[1,1] = `count'[1,1] + forv r = 2/`R' { + mat `cum'[1,`r'] = `cum'[1,`r'-1] + `count'[1,`r'] + } + mat `cum' = `cum'/`N'*100 + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `count'', `percent'', `cum'' + mat coln `res' = e(b) e(pct) e(cumpct) + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) nodotz + } + else { + mat rown `res' = `savenames' + matlist `res', nohalf `linesopt' rowtitle(`varlist') nodotz + } + mat drop `res' + if `"`macval(labels)'"'!="" { + di _n as txt "row labels saved in macro e(labels)" + } + } + + // post results + local V + if c(stata_version)<9 { // V required in Stata 8 + tempname V + mat `V' = `count'' * `count' * 0 + } + if "`esample'"!="" local esample esample(`touse') + eret post `count' `V', depname(`varlist') obs(`N') `esample' + eret scalar r = r(r) + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local labels `"`macval(labels)'"' + eret local depvar "`varlist'" + eret local subcmd "tabulate" + eret local cmd "estpost" + eret mat cumpct = `cum' + eret mat pct = `percent' +end +prog estpost_tabulate_twoway, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax varlist(min=2 max=2) [if] [in] [fw aw iw] [, ESample Quietly /// + noTOTal Missing noLabel ELabels /// + CHi2 Exact Exact2(passthru) Gamma LRchi2 Taub v All noLOg ] + local v = upper("`v'") + local qui2 "`quietly'" + local hastests = `"`chi2'`exact'`exact2'`gamma'`lrchi2'`taub'`v'`all'"'!="" + if `hastests' local nofreq nofreq + else local qui2 "quietly" + + // sample + if "`missing'"!="" marksample touse, nov strok + else marksample touse, strok + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // handle string variables + gettoken rvar cvar : varlist + gettoken cvar : cvar + foreach d in r c { + capt confirm numeric variable ``d'var' + if _rc { + tempname `d'varname + qui encode ``d'var' if `touse', generate(``d'varname') + } + else local `d'varname ``d'var' + } + + // gather results + tempname cell rvals cvals + if `hastests' { + `quietly' di "" + } + `qui2' tab `rvarname' `cvarname' if `touse' [`weight'`exp'], `nofreq' /// + matcell(`cell') matrow(`rvals') matcol(`cvals') `missing' /// + `chi2' `exact' `exact2' `gamma' `lrchi2' `taub' `v' `all' `log' + mat `cvals' = `cvals'' + local N = r(N) + tempname rtot ctot + mat `ctot' = J(1,rowsof(`cell'),1) * `cell' + mat `rtot' = `cell' * J(colsof(`cell'),1,1) + foreach d in r c { + local I = r(`d') + forv i = 1/`I' { + local value: di ``d'vals'[`i',1] + local `d'values ``d'values' `value' + } + } + if "`label'"=="" { + _estpost_namesandlabels `rvarname' "`rvalues'" "" "`elabels'" // sets names, savenames, labels + _estpost_eqnamesandlabels `cvarname' "`cvalues'" "" "`elabels'" // sets eqnames, eqlabels + } + else { + _estpost_namesandlabels "" "`rvalues'" "" "`elabels'" // sets names, savenames, labels + _estpost_eqnamesandlabels "" "`cvalues'" "" "`elabels'" // sets eqnames, eqlabels + } + local savenames0 `"`savenames'"' + local savenames + if "`total'"=="" { + mat `ctot' = `ctot', `N' + mat `cell' = (`cell', `rtot') \ `ctot' + mat `rtot' = `rtot' \ `N' + local names `"`names' Total"' + local savenames0 `"`savenames0' Total"' + local eqnames `"`eqnames' Total"' + } + mat rowname `cell' = `names' + tempname count col row tot tmp + forv i = 1/`=colsof(`cell')' { + gettoken eq eqnames : eqnames + mat `tmp' = `cell'[1...,`i'] + mat roweq `tmp' = `"`eq'"' + mat `tmp' = `tmp'' + mat `count' = nullmat(`count'), `tmp' + mat `col' = nullmat(`col'), `tmp' / `ctot'[1,`i']*100 + forv j = 1/`=colsof(`tmp')' { + mat `tmp'[1,`j'] = `tmp'[1,`j'] / `rtot'[`j',1]*100 + } + mat `row' = nullmat(`row'), `tmp' + local savenames `"`savenames' `savenames0'"' + } + mat `tot' = `count' / `N'*100 + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `count'', `tot'', `col'', `row'' + mat coln `res' = e(b) e(pct) e(colpct) e(rowpct) + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + mat rown `res' = `savenames' + di _n as res %-12s abbrev("`cvar'",12) as txt " {c |}{space 44}" + matlist `res', twidth(12) format(%9.0g) noblank nohalf rowtitle(`rvar') + } + mat drop `res' + if `"`macval(labels)'`macval(eqlabels)'"'!="" { + di "" + if `"`macval(labels)'"'!="" { + di as txt "row labels saved in macro e(labels)" + } + if `"`macval(eqlabels)'"'!="" { + di as txt "column labels saved in macro e(eqlabels)" + } + } + } + + // post results + local V + if c(stata_version)<9 { // V required in Stata 8 + tempname V + mat `V' = `count'' * `count' * 0 + } + if "`esample'"!="" local esample esample(`touse') + eret post `count' `V', obs(`N') `esample' + local rscalars: r(scalars) + local rscalars: subinstr local rscalars "N" "", word + foreach rsc of local rscalars { + eret scalar `rsc' = r(`rsc') + } + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local labels `"`macval(labels)'"' + eret local eqlabels `"`macval(eqlabels)16jun2015'"' + eret local colvar "`cvar'" + eret local rowvar "`rvar'" + eret local subcmd "tabulate" + eret local cmd "estpost" + eret mat rowpct = `row' + eret mat colpct = `col' + eret mat pct = `tot' +end + + +* 4. estpost_tabstat: wrapper for -tabstat- +prog estpost_tabstat, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax varlist [if] [in] [aw fw] [, ESample Quietly /// + Statistics(passthru) stats(passthru) LISTwise CASEwise /// + by(varname) noTotal Missing Columns(str) ELabels ] + if "`casewise'"!="" local listwise listwise + local l = length(`"`columns'"') + if `"`columns'"'==substr("variables",1,max(1,`l')) local columns "variables" + else if `"`columns'"'==substr("statistics",1,max(1,`l')) local columns "statistics" + else if `"`columns'"'=="stats" local columns "statistics" + else if `"`columns'"'=="" { + if `:list sizeof varlist'>1 local columns "variables" + else local columns "statistics" + } + else { + di as err `"columns(`columns') invalid"' + exit 198 + } + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + if "`by'"!="" { + capt confirm string variable `by' + local numby = (_rc!=0) + if `numby' { + tempname tmpby + qui gen `:type `by'' `tmpby' = `by' + } + else local tmpby `by' + if "`missing'"=="" markout `touse' `by', strok + local byopt "by(`tmpby')" + } + else local numby 0 + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // gather results + if "`total'"!="" & "`by'"=="" { + di as txt "nothing to post" + eret clear + exit + } + qui tabstat `varlist' if `touse' [`weight'`exp'], save /// + `statistics' `stats' `byopt' `total' `missing' columns(`columns') + tempname tmp + capt confirm matrix r(StatTot) + if _rc { + mat `tmp' = r(Stat1) + } + else { + mat `tmp' = r(StatTot) + } + if `"`columns'"'=="statistics" { + local cnames: rownames `tmp' + local cnames: subinstr local cnames "N" "count", word all + local cnames: subinstr local cnames "se(mean)" "semean", word all + local R = colsof(`tmp') + local stats "`cnames'" + local vars: colnames `tmp' + } + else { + local cnames: colnames `tmp' + local R = rowsof(`tmp') + local stats: rownames `tmp' + local stats: subinstr local stats "N" "count", word all + local stats: subinstr local stats "se(mean)" "semean", word all + local vars "`cnames'" + local cnames: subinstr local cnames "b" "_b", word all + local cnames: subinstr local cnames "V" "_V", word all + } + local j 0 + foreach cname of local cnames { + tempname _`++j' + } + local groups: r(macros) + local g: list sizeof groups + local space + local labels + forv i = 1/`g' { + local labels `"`labels'`space'`"`r(name`i')'"'"' + } + if `R'==1 { + if `numby' { + _estpost_namesandlabels "`by'" `"`labels'"' "" "`elabels'" // sets names, savenames, labels + } + else { + _estpost_namesandlabels "" "" `"`labels'"' "`elabels'" // sets names, savenames, labels + } + } + else { + if `numby' { + _estpost_eqnamesandlabels "`by'" `"`labels'"' "" "`elabels'" // sets eqnames, eqlabels + } + else { + _estpost_eqnamesandlabels "" "" `"`labels'"' "`elabels'" // sets eqnames, eqlabels + } + local names `"`eqnames'"' + local labels `"`macval(eqlabels)'"' + } + forv i = 1/`g' { + gettoken name names : names + mat `tmp' = r(Stat`i') + mat rown `tmp' = `stats' + if `"`columns'"'=="statistics" { + mat `tmp' = `tmp'' + } + if `R'==1 { + mat rown `tmp' = `"`name'"' + } + else { + mat roweq `tmp' = `"`name'"' + } + local j 0 + foreach cname of local cnames { + local ++j + mat `_`j'' = nullmat(`_`j''), `tmp'[1..., `j']' + } + } + if "`total'"=="" { + mat `tmp' = r(StatTot) + mat rown `tmp' = `stats' + if `"`columns'"'=="statistics" { + mat `tmp' = `tmp'' + } + if `g'>0 { + if `R'==1 { + mat rown `tmp' = "Total" + local savenames `"`savenames' Total"' + local rowtotal "lines(rowtotal)" + } + else { + mat roweq `tmp' = "Total" + if `"`labels'"'!="" { + local labels `"`macval(labels)' Total"' + } + } + } + local j 0 + foreach cname of local cnames { + local ++j + mat `_`j'' = nullmat(`_`j''), `tmp'[1..., `j']' + } + } + + // display + if "`quietly'"=="" { + tempname res + local rescoln + local j 0 + foreach cname of local cnames { + local ++j + mat `res' = nullmat(`res'), `_`j''' + local rescoln `rescoln' e(`cname') + } + mat coln `res' = `rescoln' + di _n as txt "Summary statistics: `stats'" + di as txt " for variables: `vars'" + if "`by'"!="" { + di as txt " by categories of: `by'" + } + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + if `R'==1 & `g'>0 { + mat rown `res' = `savenames' + } + matlist `res', nohalf `rowtotal' rowtitle(`by') + } + if `"`macval(labels)'"'!="" { + di _n as txt "category labels saved in macro e(labels)" + } + mat drop `res' + } + + // post results + local b + local V + if c(stata_version)<9 { // b and V required in Stata 8 + tempname b V + mat `b' = `_1' \ J(1, colsof(`_1'), 0) + mat `b' = `b'[2,1...] + mat `V' = `b'' * `b' + } + if "`esample'"!="" local esample esample(`touse') + eret post `b' `V', obs(`N') `esample' + + eret local labels `"`macval(labels)'"' + eret local byvar "`by'" + eret local vars "`vars'" + eret local stats "`stats'" + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local subcmd "tabstat" + eret local cmd "estpost" + + local nmat: list sizeof cnames + forv j=`nmat'(-1)1 { + local cname: word `j' of `cnames' + eret matrix `cname' = `_`j'' + } +end + + +* 5. estpost_ttest: wrapper for -ttest- (two-sample) +prog estpost_ttest, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax varlist(numeric) [if] [in] , by(varname) [ ESample Quietly /// + LISTwise CASEwise UNEqual Welch ] + if "`casewise'"!="" local listwise listwise + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + markout `touse' `by', strok + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // gather results + local nvars: list sizeof varlist + tempname diff count + mat `diff' = J(1, `nvars', .) + mat coln `diff' = `varlist' + mat `count' = `diff' + local mnames se /*sd*/ t df_t p_l p p_u N_1 mu_1 /*sd_1*/ N_2 mu_2 /*sd_2*/ + foreach m of local mnames { + tempname `m' + mat ``m'' = `diff' + } + local i 0 + foreach v of local varlist { + local ++i + qui ttest `v' if `touse', by(`by') `unequal' `welch' + mat `diff'[1,`i'] = r(mu_1) - r(mu_2) + mat `count'[1,`i'] = r(N_1) + r(N_2) + foreach m of local mnames { + mat ``m''[1,`i'] = r(`m') + } + } + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `diff'', `count'' + local rescoln "e(b) e(count)" + foreach m of local mnames { + mat `res' = `res', ``m''' + local rescoln `rescoln' e(`m') + } + mat coln `res' = `rescoln' + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + matlist `res', nohalf lines(oneline) + } + mat drop `res' + } + + // post results + local V + if c(stata_version)<9 { // V required in Stata 8 + tempname V + mat `V' = diag(vecdiag(`se'' * `se')) + } + if "`esample'"!="" local esample esample(`touse') + eret post `diff' `V', obs(`N') `esample' + + eret scalar k = `nvars' + + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local welch "`welch'" + eret local unequal "`unequal'" + eret local byvar "`by'" + eret local subcmd "ttest" + eret local cmd "estpost" + + local nmat: list sizeof mnames + forv i=`nmat'(-1)1 { + local m: word `i' of `mnames' + eret matrix `m' = ``m'' + } + eret matrix count = `count' +end + + +* 6. estpost_correlate: wrapper for -correlate- +prog estpost_correlate, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax varlist [if] [in] [aw fw iw pw] [, ESample Quietly /// + LISTwise CASEwise Matrix noHalf noLabel ELabels ELabels2(str asis) /// + Print(real 1) /*Covariance*/ Bonferroni SIDak ] + if "`casewise'"!="" local listwise listwise + if "`bonferroni'"!="" & "`sidak'"!="" { + di as err "only one of bonferroni and sidak allowed" + exit 198 + } + local pw = ("`weight'"=="pweight") + if `:list sizeof varlist'<=1 & `"`matrix'"'=="" { + di as err "too few variables specified" + exit 102 + } + if `"`matrix'"'!="" & `"`half'"'!="" local fullmatrix fullmatrix + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // gather results + tempname b rho pval count + if "`bonferroni'`sidak'"!="" { + local nvars : list sizeof varlist + local k = `nvars' * (`nvars'-1) / 2 + } + foreach depvar of local varlist { + if `"`fullmatrix'"'!="" { + local indepvars `varlist' + } + else if `"`matrix'"'!="" { + local indepvars `depvar' `ferest()' + } + else { + local indepvars `ferest()' + } + foreach v of local indepvars { + qui reg `depvar' `v' [`weight'`exp'] if `touse' + local r = sqrt(e(r2)) * (-1)^(_b[`v']<0) + local n = e(N) + mat `b' = nullmat(`b'), `r' + if "`depvar'"=="`v'" { + mat `rho' = nullmat(`rho'), `r' + mat `count' = nullmat(`count'), `n' + mat `pval' = nullmat(`pval'), .z + continue + } + local p = Ftail(e(df_m), e(df_r), e(F)) + if `pw' { + qui reg `v' `depvar' [`weight'`exp'] if `touse' + local p = max(`p', Ftail(e(df_m), e(df_r), e(F))) + } + if "`bonferroni'"!="" { + local p = min(1, `k'*`p') + } + else if "`sidak'"!="" { + local p = min(1, 1 - (1-`p')^`k') + } + if `p'>`print' { + local r .z + local n .z + local p .z + } + mat `rho' = nullmat(`rho'), `r' + mat `count' = nullmat(`count'), `n' + mat `pval' = nullmat(`pval'), `p' + } + if `"`matrix'`fullmatrix'"'=="" { + local colnames `indepvars' + local depname `depvar' + continue, break + } + foreach v of local indepvars { + local colnames `"`colnames'`depvar':`v' "' + } + } + mat coln `b' = `colnames' + mat coln `rho' = `colnames' + mat coln `count' = `colnames' + mat coln `pval' = `colnames' + local vce `"`e(vce)'"' // from last -regress- call + local vcetype `"`e(vcetype)'"' + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `b'', `rho'', `pval'', `count'' + mat coln `res' = e(b) e(rho) e(p) e(count) + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) nodotz + } + else { + matlist `res', nohalf lines(oneline) rowtitle(`depname') nodotz + } + mat drop `res' + } + + // post results + local V + if c(stata_version)<9 { // V required in Stata 8 + tempname V + mat `V' = `b'' * `b' * 0 + } + if "`esample'"!="" local esample esample(`touse') + eret post `b' `V', depname(`depname') obs(`N') `esample' + eret local vcetype `"`vcetype'"' + eret local vce `"`vce'"' + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local depvar `depname' + eret local subcmd "correlate" + eret local cmd "estpost" + eret matrix count = `count' + eret matrix p = `pval' + eret matrix rho = `rho' + // additional labels in case of matrix + if `"`matrix'"'=="" exit + if `"`elabels'`elabels2'"'=="" exit + gettoken lhs rhs : elabels2 + gettoken rhs : rhs + local space + local vlbls + local eqlbls + local i 0 + foreach v of local varlist { + local ++i + local num `"`lhs'`i'`rhs'"' + local eqlbls `"`eqlbls'`space'`"`num'"'"' + local space " " + local lbl + if "`label'"=="" { + local lbl: var lab `v' + } + if `"`lbl'"'=="" local lbl "`v'" + local vlbls `vlbls' `v' `"`num' `lbl'"' + } + eret local labels `"`vlbls'"' + eret local eqlabels `"`eqlbls'"' +end + + +* 7. estpost_stci: wrapper for -stci- +prog estpost_stci, eclass + version 9.2 // Stata 8 not supported because levelsof is used + local caller : di _caller() // not used + + // syntax + syntax [if] [in] [ , ESample Quietly by(varname) /// + Median Rmean Emean p(numlist >0 <100 integer max=1) /// + CCorr Level(real `c(level)') ELabels ] + local stat "p50" + if `"`p'"'!="" { + local stat `"p`p'"' + local p `"p(`p')"' + } + else if "`rmean'"!="" local stat "rmean" + else if "`emean'"!="" local stat "emean" + + // sample + marksample touse + if `"`by'"'!="" { + markout `touse' `by', strok + } + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // get results + tempname _`stat' se N_sub lb ub + if "`by'"!="" { + qui levelsof `by' if `touse', local(levels) + capt confirm string variable `by' + if _rc { + local vallab: value label `by' + if `"`vallab'"'!="" { + _estpost_namesandlabels `by' `"`levels'"' "" "`elabels'" // sets names, savenames, labels + } + else { + local names `"`levels'"' + local savenames `"`levels'"' + } + } + else { + _estpost_namesandlabels `by' "" `"`levels'"' "`elabels'" // sets names, savenames, labels + } + } + local levels `"`levels' "total""' + local names `"`names' "total""' + local savenames `"`savenames' "total""' + gettoken l rest : levels, quotes + while (`"`l'"'!="") { + if `"`rest'"'=="" local lcond + else local lcond `" & `by'==`l'"' + qui stci if `touse'`lcond', `median' `rmean' `emean' `p' `ccorr' level(`level') + mat `_`stat'' = nullmat(`_`stat''), r(`stat') + mat `se' = nullmat(`se'), r(se) + mat `N_sub' = nullmat(`N_sub'), r(N_sub) + mat `lb' = nullmat(`lb'), r(lb) + mat `ub' = nullmat(`ub'), r(ub) + gettoken l rest : rest, quotes + } + foreach m in _`stat' se N_sub lb ub { + mat coln ``m'' = `names' + } + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `N_sub'', `_`stat''', `se'', `lb'', `ub'' + mat coln `res' = e(count) e(`stat') e(se) e(lb) e(ub) + di as txt "(confidence level is " `level' "%)" + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) nodotz + } + else { + mat rown `res' = `savenames' + matlist `res', nohalf lines(rowtotal) nodotz + } + mat drop `res' + if `"`labels'"'!="" { + di _n as txt "labels saved in macro e(labels)" + } + } + + // post results + local b + local V + if c(stata_version)<9 { // b and V required in Stata 8 + tempname b V + mat `b' = `_`stat'' \ J(1, colsof(`_`stat''), 0) + mat `b' = `b'[2,1...] + mat `V' = `b'' * `b' + } + if "`esample'"!="" local esample esample(`touse') + eret post `b' `V', obs(`N') `esample' + eret scalar level = `level' + + eret local ccorr `ccorr' + eret local labels `"`labels'"' + eret local subcmd "stci" + eret local cmd "estpost" + + eret matrix ub = `ub' + eret matrix lb = `lb' + eret matrix se = `se' + eret matrix `stat' = `_`stat'' + eret matrix count = `N_sub' +end + + +* 8. estpost_ci: wrapper for -ci- +prog estpost_ci, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax [varlist] [if] [in] [aw fw], [ ESample Quietly /// + LISTwise CASEwise Level(real `c(level)') /// + Binomial EXAct WAld Wilson Agresti Jeffreys /// + Poisson Exposure(varname) /// + ] + if "`casewise'"!="" local listwise listwise + if "`exposure'"!="" local exposureopt "exposure(`exposure')" + if "`binomial'"!="" & "`exact'`wald'`wilson'`agresti'`jeffreys'"=="" local exact exact + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // gather results + local mnames se lb ub + tempname mean count `mnames' + local i 0 + foreach v of local varlist { + local ++i + qui ci `v' if `touse' [`weight'`exp'], level(`level') /// + `binomial' `exact' `wald' `wilson' `agresti' `jeffreys' /// + `poisson' `exposureopt' + if r(N)>=. continue + mat `mean' = nullmat(`mean'), r(mean) + mat `count' = nullmat(`count'), r(N) + foreach m of local mnames { + mat ``m'' = nullmat(``m''), r(`m') + } + local rnames "`rnames' `v'" + } + capt confirm matrix `count' + if _rc { + di as txt "nothing to post" + eret clear + exit + } + foreach m in mean count `mnames' { + mat coln ``m'' = `rnames' + } + if "`listwise'"=="" { // update sample + if colsof(`count') < `: list sizeof varlist' { + _estpost_markout2 `touse' `rnames' + qui count if `touse' + local N = r(N) + } + } + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `mean'', `count'' + local rescoln "e(b) e(count)" + foreach m of local mnames { + mat `res' = `res', ``m''' + local rescoln `rescoln' e(`m') + } + mat coln `res' = `rescoln' + di as txt "(confidence level is " `level' "%)" + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + matlist `res', nohalf lines(oneline) + } + mat drop `res' + } + + // post results + local V + if c(stata_version)<9 { // V required in Stata 8 + tempname V + mat `V' = diag(vecdiag(`se'' * `se')) + } + if "`esample'"!="" local esample esample(`touse') + eret post `mean' `V', obs(`N') `esample' + + eret scalar k = colsof(`count') + eret scalar level = `level' + + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local exposure "`exposure'" + eret local poisson "`poisson'" + eret local binomial "`exact'`wald'`wilson'`agresti'`jeffreys'" + eret local subcmd "ci" + eret local cmd "estpost" + + local nmat: list sizeof mnames + forv i=`nmat'(-1)1 { + local m: word `i' of `mnames' + eret matrix `m' = ``m'' + } + eret matrix count = `count' +end + + +* 9. estpost_prtest: wrapper for -prtest- (two-sample) +prog estpost_prtest, eclass + version 8.2 + local caller : di _caller() // not used + + // syntax + syntax varlist(numeric) [if] [in] , by(varname) [ ESample Quietly /// + LISTwise CASEwise ] + if "`casewise'"!="" local listwise listwise + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + markout `touse' `by', strok + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + // gather results + local nvars: list sizeof varlist + tempname diff count + mat `count' = J(1, `nvars', .) + mat coln `count' = `varlist' + mat `diff' = `count' + local mnames se se0 z p_l p p_u N_1 P_1 N_2 P_2 + foreach m of local mnames { + tempname `m' + mat ``m'' = `count' + } + local i 0 + foreach v of local varlist { + local ++i + qui prtest `v' if `touse', by(`by') + mat `count'[1,`i'] = r(N_1) + r(N_2) + mat `diff'[1,`i'] = r(P_1) - r(P_2) + mat `se'[1,`i'] = sqrt(r(P_1)*(1-r(P_1))/r(N_1) + r(P_2)*(1-r(P_2))/r(N_2)) + mat `se0'[1,`i'] = `diff'[1,`i'] / r(z) + mat `p_l'[1,`i'] = normal(r(z)) + mat `p'[1,`i'] = (1-normal(abs(r(z))))*2 + mat `p_u'[1,`i'] = 1-normal(r(z)) + foreach m in z N_1 P_1 N_2 P_2 { + mat ``m''[1,`i'] = r(`m') + } + } + + // display + if "`quietly'"=="" { + tempname res + mat `res' = `diff'', `count'' + local rescoln "e(b) e(count)" + foreach m of local mnames { + mat `res' = `res', ``m''' + local rescoln `rescoln' e(`m') + } + mat coln `res' = `rescoln' + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + matlist `res', nohalf lines(oneline) + } + mat drop `res' + } + + // post results + local V + if c(stata_version)<9 { // V required in Stata 8 + tempname V + mat `V' = diag(vecdiag(`se'' * `se')) + } + if "`esample'"!="" local esample esample(`touse') + eret post `diff' `V', obs(`N') `esample' + + eret scalar k = `nvars' + + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local byvar "`by'" + eret local subcmd "prtest" + eret local cmd "estpost" + + local nmat: list sizeof mnames + forv i=`nmat'(-1)1 { + local m: word `i' of `mnames' + eret matrix `m' = ``m'' + } + eret matrix count = `count' +end + + +* 10. estpost__svy_tabulate: wrapper for -svy:tabulate- +prog estpost__svy_tabulate + version 9.2 + local caller : di _caller() + _on_colon_parse `0' + local svyopts `"svyopts(`s(before)')"' + local 0 `"`s(after)'"' + syntax varlist(min=1 max=2) [if] [in] [ , * ] + if `:list sizeof varlist'==1 { + version `caller': _svy_tabulate_oneway `varlist' `if' `in', /// + `svyopts' `options' + } + else { + version `caller': _svy_tabulate_twoway `varlist' `if' `in', /// + `svyopts' `options' + } +end +prog _svy_tabulate_oneway + version 9.2 + local caller : di _caller() // not used + + // syntax + syntax varname [if] [in] [, ESample Quietly /// + svyopts(str asis) MISSing Level(cilevel) /// + noTOTal noMARGinals noLabel ELabels PROPortion PERcent /// + CELl COUnt se ci deff deft * ] + if "`marginals'"!="" local total "nototal" + else if "`total'"!="" local marginals "nomarginals" + + // run svy:tabulate + `quietly' svy `svyopts' : tabulate `varlist' `if' `in', /// + level(`level') `cell' `count' `se' `ci' `deff' `deft' /// + `missing' `marginals' `label' `proportion' `percent' `options' + if "`count'"!="" & "`cell'`se'`ci'`deff'`deft'"=="" { // => return count in e(b) + quietly svy `svyopts' : tabulate `varlist' `if' `in', count se /// + level(`level') `missing' `marginals' `label' `proportion' `percent' `options' + } + + // get labels + qui levelsof `varlist' if e(sample), `missing' local(levels) + local R : list sizeof levels + if e(r)!=`R' { + di as err "unexpected error; number of rows unequal number of levels" + exit 499 + } + capt confirm string variable `varlist' + if _rc { + if "`label'"=="" { + _estpost_namesandlabels `varlist' "`levels'" "" "`elabels'" // sets names, savenames, labels + } + else { + _estpost_namesandlabels "" "`levels'" "" "`elabels'" // sets names, savenames, labels + } + } + else { + _estpost_namesandlabels "" "" `"`levels'"' "`elabels'" // sets names, savenames, labels + } + + // collect results + tempname cell count obs b se lb ub deff deft + local N_pop = cond(e(N_subpop)<., e(N_subpop), e(N_pop)) + local N_obs = cond(e(N_sub)<., e(N_sub), e(N)) + local tval = invttail(e(df_r), (100-`level')/200) + if `tval'>=. local tval = invnormal(1 - (100-`level')/200) + mat `cell' = e(Prop)' + mat `count' = `cell' * `N_pop' + capture confirm matrix e(ObsSub) + if _rc { + mat `obs' = e(Obs)' + } + else { + mat `obs' = e(ObsSub)' + } + capture confirm matrix e(Deff) + if _rc local DEFF "" + else { + local DEFF deff + mat `deff' = e(Deff) + } + capture confirm matrix e(Deft) + if _rc local DEFT "" + else { + local DEFT deft + mat `deft' = e(Deft) + } + mat `b' = e(b) + mata: st_matrix(st_local("se"), sqrt(diagonal(st_matrix("e(V)")))') + if "`total'"=="" { + mat `cell' = `cell', 1 + mat `count' = `count', `N_pop' + mat `obs' = `obs', `N_obs' + if "`DEFF'"!="" mat `deff' = `deff', .z + if "`DEFT'"!="" mat `deft' = `deft', .z + if e(setype)=="count" { + mat `b' = `b', `N_pop' + mat `se' = `se', sqrt(el(e(V_col),1,1)) + } + else { // e(setype)=="cell" + mat `b' = `b', 1 + mat `se' = `se', 0 + } + local names `"`names' "Total""' + local savenames `"`savenames' "Total""' + local linesopt "lines(rowtotal)" + + } + if e(setype)!="count" { + mata: st_matrix( st_local("lb"), invlogit( /// + logit(st_matrix(st_local("b"))) - strtoreal(st_local("tval")) * /// + st_matrix(st_local("se")) :/ /// + (st_matrix(st_local("b")) :* (1 :- st_matrix(st_local("b")))))) + mata: st_matrix( st_local("ub"), invlogit( /// + logit(st_matrix(st_local("b"))) + strtoreal(st_local("tval")) * /// + st_matrix(st_local("se")) :/ /// + (st_matrix(st_local("b")) :* (1 :- st_matrix(st_local("b")))))) + if "`total'"=="" { + mat `lb'[1, colsof(`lb')] = .z + mat `ub'[1, colsof(`ub')] = .z + } + } + else { + mata: st_matrix( st_local("lb"), st_matrix(st_local("b")) - /// + strtoreal(st_local("tval")) * st_matrix(st_local("se")) ) + mata: st_matrix( st_local("ub"), st_matrix(st_local("b")) + /// + strtoreal(st_local("tval")) * st_matrix(st_local("se")) ) + } + foreach m in cell count obs b se lb ub `DEFF' `DEFT' { + capt mat coln ``m'' = `names' + } + if "`percent'"!="" { + mat `cell' = `cell' * 100 + if e(setype)!="count" { + mat `b' = `b' * 100 + mat `se' = `se' * 100 + mat `lb' = `lb' * 100 + mat `ub' = `ub' * 100 + } + } + + // display + if "`quietly'"=="" { + /* + tempname res + mat `res' = `b'', `se'', `lb'', `ub'', `deff'', `deft'' ///, `cell'', `count'', `obs'' + mat coln `res' = e(b) e(se) e(lb) e(ub) e(deff) e(deft) /// e(cell) e(count) e(obs) + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) nodotz + } + else { + mat rown `res' = `savenames' + matlist `res', nohalf `linesopt' rowtitle(`varlist') nodotz + } + mat drop `res' + */ + local plabel = cond("`percent'"!="","percentages","proportions") + local blabel = cond("`e(setype)'"=="count", "weighted counts", "`e(setype)' `plabel'") + di _n as txt "saved vectors:" + di as txt %20s "e(b) = " " " as res "`blabel'" + di as txt %20s "e(se) = " " " as res "standard errors of `blabel'" + di as txt %20s "e(lb) = " " " as res "lower `level'% confidence bounds for `blabel'" + di as txt %20s "e(ub) = " " " as res "upper `level'% confidence bounds for `blabel'" + if "`DEFF'"!="" /// + di as txt %20s "e(deff) = " " " as res "deff for variances of `blabel'" + if "`DEFT'"!="" /// + di as txt %20s "e(deft) = " " " as res "deft for variances of `blabel'" + di as txt %20s "e(cell) = " " " as res "cell `plabel'" + di as txt %20s "e(count) = " " " as res "weighted counts" + di as txt %20s "e(obs) = " " " as res "number of observations" + if `"`labels'"'!="" { + di _n as txt "row labels saved in macro e(labels)" + } + } + + // post results + erepost b=`b', cmd(estpost) nov `esample' + qui estadd local labels `"`labels'"' + qui estadd local subcmd "tabulate" + qui estadd scalar level = `level' + foreach m in obs count cell `DEFT' `DEFF' ub lb se { + qui estadd matrix `m' = ``m'', replace + } +end +prog _svy_tabulate_twoway + version 9.2 + local caller : di _caller() // not used + + // syntax + syntax varlist(min=1 max=2) [if] [in] [, ESample Quietly /// + svyopts(str asis) MISSing Level(cilevel) /// + noTOTal noMARGinals noLabel ELabels PROPortion PERcent /// + CELl COUnt COLumn row se ci deff deft * ] + if "`marginals'"!="" local total "nototal" + else if "`total'"!="" local marginals "nomarginals" + + // run svy:tabulate + `quietly' svy `svyopts' : tabulate `varlist' `if' `in', /// + level(`level') `cell' `count' `column' `row' `se' `ci' `deff' `deft' /// + `missing' `marginals' `label' `proportion' `percent' `options' + if `: word count `count' `column' `row''==1 & "`cell'`se'`ci'`deff'`deft'"=="" { + quietly svy `svyopts' : tabulate `varlist' `if' `in', `count' `column' `row' se /// + level(`level') `missing' `marginals' `label' `proportion' `percent' `options' + } + + // get labels + local rvar `"`e(rowvar)'"' + qui levelsof `rvar' if e(sample), `missing' local(levels) + local R : list sizeof levels + if e(r)!=`R' { + di as err "unexpected error; number of rows unequal number of rowvar levels" + exit 499 + } + capt confirm string variable `rvar' + if _rc { + if "`label'"=="" { + _estpost_namesandlabels `rvar' "`levels'" "" "`elabels'" // sets names, savenames, labels + } + else { + _estpost_namesandlabels "" "`levels'" "" "`elabels'" // sets names, savenames, labels + } + } + else { + _estpost_namesandlabels "" "" `"`levels'"' "`elabels'" // sets names, savenames, labels + } + local cvar `"`e(colvar)'"' + qui levelsof `cvar' if e(sample), `missing' local(levels) + local C : list sizeof levels + if e(c)!=`C' { + di as err "unexpected error; number of column unequal number of colvar levels" + exit 499 + } + local savenames0 `"`savenames'"' + local savenames + capt confirm string variable `cvar' + if _rc { + if "`label'"=="" { + _estpost_eqnamesandlabels `cvar' "`levels'" "" "`elabels'" // sets eqnames, eqlabels + } + else { + _estpost_eqnamesandlabels "" "`levels'" "" "`elabels'" // sets eqnames, eqlabels + } + } + else { + _estpost_eqnamesandlabels "" "" `"`levels'"' "`elabels'" // sets eqnames, eqlabels + } + + // collect results + tempname tmp cell row col count obs b se lb ub deff deft + local N_pop = cond(e(N_subpop)<., e(N_subpop), e(N_pop)) + local N_obs = cond(e(N_sub)<., e(N_sub), e(N)) + local tval = invttail(e(df_r), (100-`level')/200) + if `tval'>=. local tval = invnormal(1 - (100-`level')/200) + mat `cell' = e(Prop) // r x c matrix + mat `cell' = (`cell', `cell' * J(`C',1,1)) \ (J(1,`R',1) * `cell', 1) + mat `count' = `cell' * `N_pop' + mat `tmp' = `cell'[1..., `C'+1] + mata: st_matrix(st_local("row"), st_matrix(st_local("cell")) :/ /// + st_matrix(st_local("tmp"))) + mat `tmp' = `cell'[`R'+1, 1...] + mata: st_matrix(st_local("col"), st_matrix(st_local("cell")) :/ /// + st_matrix(st_local("tmp"))) + mat drop `tmp' + capture confirm matrix e(ObsSub) + if _rc { + mat `obs' = e(Obs) // r x c matrix + } + else { + mat `obs' = e(ObsSub) // r x c matrix + } + capt confirm matrix e(Deff) + if _rc local DEFF "" + else { + local DEFF deff + mat `deff' = e(Deff) // vector + } + capt confirm matrix e(Deft) + if _rc local DEFT "" + else { + local DEFT deft + mat `deft' = e(Deft) // vector + } + mat `b' = e(b) // vector + mata: st_matrix(st_local("se"), sqrt(diagonal(st_matrix("e(V)")))') // vector + if e(setype)=="count" local btype count + else if e(setype)=="row" local btype row + else if e(setype)=="column" local btype col + else local btype cell + foreach m in `DEFF' `DEFT' b se { // vector -> r x c matrix + forv r = 1/`R' { + local from = (`r'-1)*`C' + 1 + local to = `r'*`C' + mat `tmp' = nullmat(`tmp') \ ``m''[1, `from'..`to'] + } + mat drop ``m'' + mat rename `tmp' ``m'' + } + if "`total'"=="" { + mat `obs' = (`obs', `obs' * J(`C',1,1)) \ (J(1,`R',1) * `obs', `N_obs') + if "`DEFF'"!="" mat `deff' = (`deff', e(Deff_row)') \ (e(Deff_col), .z) + if "`DEFT'"!="" mat `deft' = (`deft', e(Deft_row)') \ (e(Deft_col), .z) + mat `b' = (`b', ``btype''[1..`R',`C'+1]) \ ``btype''[`R'+1,1...] + mata: st_matrix(st_local("se"), /// + ((st_matrix(st_local("se")), sqrt(diagonal(st_matrix("e(V_row)")))) /// + \ (sqrt(diagonal(st_matrix("e(V_col)")))', .z))) + if "`btype'"=="row" { + mat `se' = `se'[1..., 1..`C'], J(`R'+1, 1, .z) + } + else if "`btype'"=="col" { + mat `se' = `se'[1..`R', 1...] \ J(1, `C'+1, .z) + } + local names `"`names' "Total""' + local savenames0 `"`savenames0' "Total""' + local eqnames `"`eqnames' "Total""' + } + else { + mat `cell' = `cell'[1..`R', 1..`C'] + mat `count' = `count'[1..`R', 1..`C'] + mat `row' = `row'[1..`R', 1..`C'] + mat `col' = `col'[1..`R', 1..`C'] + } + if "`btype'"!="count" { + mata: st_matrix( st_local("lb"), invlogit( /// + logit(st_matrix(st_local("b"))) - strtoreal(st_local("tval")) * /// + st_matrix(st_local("se")) :/ /// + (st_matrix(st_local("b")) :* (1 :- st_matrix(st_local("b")))))) + mata: st_matrix( st_local("ub"), invlogit( /// + logit(st_matrix(st_local("b"))) + strtoreal(st_local("tval")) * /// + st_matrix(st_local("se")) :/ /// + (st_matrix(st_local("b")) :* (1 :- st_matrix(st_local("b")))))) + } + else { + mata: st_matrix( st_local("lb"), st_matrix(st_local("b")) - /// + strtoreal(st_local("tval")) * st_matrix(st_local("se")) ) + mata: st_matrix( st_local("ub"), st_matrix(st_local("b")) + /// + strtoreal(st_local("tval")) * st_matrix(st_local("se")) ) + } + if "`total'"=="" { + if "`btype'"=="row" { + mat `lb' = `lb'[1..., 1..`C'] , J(`R'+1, 1, .z) + mat `ub' = `ub'[1..., 1..`C'] , J(`R'+1, 1, .z) + } + else if "`btype'"=="col" { + mat `lb' = `lb'[1..`R', 1...] \ J(1, `C'+1, .z) + mat `ub' = `ub'[1..`R', 1...] \ J(1, `C'+1, .z) + } + else { + mat `lb'[`R'+1, `C'+1] = .z + mat `ub'[`R'+1, `C'+1] = .z + } + } + foreach m in cell count obs row col `DEFF' `DEFT' b se lb ub { // r x c matrix -> vector + mat rown ``m'' = `names' + gettoken eq rest : eqnames + forv c = 1/`=colsof(``m'')' { + mat roweq ``m'' = `"`eq'"' + mat `tmp' = nullmat(`tmp'), ``m''[1...,`c']' + gettoken eq rest : rest + } + mat drop ``m'' + mat rename `tmp' ``m'' + } + if "`percent'"!="" { + mat `cell' = `cell' * 100 + mat `col' = `col' * 100 + mat `row' = `row' * 100 + if e(setype)!="count" { + mat `b' = `b' * 100 + mat `se' = `se' * 100 + mat `lb' = `lb' * 100 + mat `ub' = `ub' * 100 + } + } + + // display + if "`quietly'"=="" { + /* + forv c = 1/`=colsof(`cell')' { + local savenames `"`savenames' `savenames0'"' + } + tempname res + mat `res' = `b'', `se'', `lb'', `ub'', `deff'', `deft'', `cell'', `row'', `col'', `count'', `obs'' + mat coln `res' = e(b) e(se) e(lb) e(ub) e(deff) e(deft) e(cell) e(row) e(col) e(count) e(obs) + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) nodotz + } + else { + mat rown `res' = `savenames' + di _n as res %-12s abbrev("`cvar'",12) as txt " {c |}{space 44}" + matlist `res', twidth(12) format(%9.0g) noblank nohalf /// + rowtitle(`rvar') nodotz + } + mat drop `res' + */ + local plabel = cond("`percent'"!="","percentages","proportions") + local blabel = cond("`e(setype)'"=="count", "weighted counts", "`e(setype)' `plabel'") + di _n as txt "saved vectors:" + di as txt %20s "e(b) = " " " as res "`blabel'" + di as txt %20s "e(se) = " " " as res "standard errors of `blabel'" + di as txt %20s "e(lb) = " " " as res "lower `level'% confidence bounds for `blabel'" + di as txt %20s "e(ub) = " " " as res "upper `level'% confidence bounds for `blabel'" + if "`DEFF'"!="" /// + di as txt %20s "e(deff) = " " " as res "deff for variances of `blabel'" + if "`DEFT'"!="" /// + di as txt %20s "e(deft) = " " " as res "deft for variances of `blabel'" + di as txt %20s "e(cell) = " " " as res "cell `plabel'" + di as txt %20s "e(row) = " " " as res "row `plabel'" + di as txt %20s "e(col) = " " " as res "column `plabel'" + di as txt %20s "e(count) = " " " as res "weighted counts" + di as txt %20s "e(obs) = " " " as res "number of observations" + if `"`labels'`eqlabels'"'!="" { + di "" + if `"`labels'"'!="" { + di as txt "row labels saved in macro e(labels)" + } + if `"`eqlabels'"'!="" { + di as txt "column labels saved in macro e(eqlabels)" + } + } + } + + // post results + erepost b=`b', cmd(estpost) nov `esample' + qui estadd local eqlabels `"`eqlabels'"' + qui estadd local labels `"`labels'"' + qui estadd local subcmd "tabulate" + qui estadd scalar level = `level' + foreach m in obs count row col cell `DEFT' `DEFF' ub lb se { + qui estadd matrix `m' = ``m'', replace + } +end + +* 11. estpost_margins: wrapper for -margins- (Stata 11) +prog estpost_margins, eclass + version 11 + local caller : di _caller() + + // syntax + _parse comma anything 0 : 0 + syntax [ , /*ESample*/ Quietly /// + post * ] + if "`post'"!="" { + di as err "post not allowed" + exit 198 + } + + // run margins + `quietly' version `caller': margins `anything', `options' + + // post results + capt postrtoe, noclear resize + if _rc<=1 { // -postrtoe- does not work, e.g., with -regress- + error _rc // _rc=1 (break) + exit + } + tempname b V + mat `b' = r(b) + mat `V' = r(V) + erepost b = `b' V = `V' /*, `esample'*/ + foreach r in `:r(scalars)' { + eret scalar `r' = r(`r') + } + foreach r in `:r(macros)' { + eret local `r' `"`r(`r')'"' + } + tempname tmp + foreach r in `:r(matrices)' { + if inlist("`r'", "b", "V") continue + mat `tmp' = r(`r') + eret matrix `r' = `tmp' + } +end + +* 12. estpost_gtabstat: wrapper for -gstats tabstat- (gtools required) +prog estpost_gtabstat, eclass + version 13.1 + local caller : di _caller() // not used + + cap gtools + if ( _rc ) { + disp as err "gtools required for estpost gtabstat" + exit 111 + } + + // syntax + syntax varlist [if] [in] [aw fw iw pw] [, ESample Quietly /// + Statistics(passthru) stats(passthru) /// + by(varname) Missing Columns(str) ELabels ] + local l = length(`"`columns'"') + if `"`columns'"'==substr("variables",1,max(1,`l')) local columns "variables" + else if `"`columns'"'==substr("statistics",1,max(1,`l')) local columns "statistics" + else if `"`columns'"'=="stats" local columns "statistics" + else if `"`columns'"'=="" { + if `:list sizeof varlist'>1 local columns "variables" + else local columns "statistics" + } + else { + di as err `"columns(`columns') invalid"' + exit 198 + } + + // sample + if "`listwise'"!="" marksample touse + else { + marksample touse, nov + _estpost_markout2 `touse' `varlist' + } + if "`by'"!="" { + capt confirm string variable `by' + local numby = (_rc!=0) + + // NOTE(mauricio): Not sure what this does. I think it's just + // a copy of the by variable so that _estpost_eqnamesandlabels + // parses the numeric input back into value labels? + // + // if `numby' { + // tempname tmpby + // qui gen `:type `by'' `tmpby' = `by' + // } + // else local tmpby `by' + // local byopt "by(`tmpby')" + + if "`missing'"=="" markout `touse' `by', strok + local byopt by(`by') + } + else local numby 0 + qui count if `touse' + local N = r(N) + if `N'==0 error 2000 + + if ( `"`missing'"' == "" ) local missing nomissing + + // gather results + tempname tmp + tempname gtabstat + qui gstats tabstat `varlist' if `touse' [`weight'`exp'], mata(`gtabstat') /// + `statistics' `stats' `byopt' `missing' columns(`columns') + + mata st_local("stats", invtokens(`gtabstat'.statnames)) + mata st_local("vars", invtokens(`gtabstat'.statvars)) + mata st_local("R", strofreal(`gtabstat'.ksources)) + mata st_local("g", strofreal(`gtabstat'.kby? `gtabstat'.J: 0)) + + local stats: subinstr local stats "N" "count", word all + local stats: subinstr local stats "se(mean)" "semean", word all + + if `"`columns'"'=="statistics" { + local cnames: copy local stats + } + else { + local cnames: copy local vars + } + local cnames: subinstr local cnames "b" "_b", word all + local cnames: subinstr local cnames "V" "_V", word all + + local j 0 + foreach cname of local cnames { + tempname _`++j' + } + + local space + local labels + forv i = 1/`g' { + if `numby' { + mata st_local("name", sprintf(st_varformat(`gtabstat'.byvars[1]), `gtabstat'.getnum(`i', 1))) + } + else { + mata st_local("name", `gtabstat'.getchar(`i', 1, 0)) + } + local labels `"`labels'`space'`"`name'"'"' + } + + if `R'==1 { + if `numby' { + _estpost_namesandlabels "`by'" `"`labels'"' "" "`elabels'" // sets names, savenames, labels + } + else { + _estpost_namesandlabels "" "" `"`labels'"' "`elabels'" // sets names, savenames, labels + } + } + else { + if `numby' { + _estpost_eqnamesandlabels "`by'" `"`labels'"' "" "`elabels'" // sets eqnames, eqlabels + } + else { + _estpost_eqnamesandlabels "" "" `"`labels'"' "`elabels'" // sets eqnames, eqlabels + } + local names `"`eqnames'"' + local labels `"`macval(eqlabels)'"' + } + + tempname glabname + tempname glabstat + tempname glabvar + tempname glabmat + + forv i = 1/`g' { + mata `glabname' = `gtabstat'.getf(`i', 1, .) + mata `glabmat' = `gtabstat'.colvar? `gtabstat'.getOutputGroup(`i'): `gtabstat'.getOutputGroup(`i')' + + if `"`columns'"'=="statistics" { + mata `glabstat' = (J(`gtabstat'.kstats, 1, ""), `gtabstat'.statnames') + mata `glabvar' = (J(`gtabstat'.ksources, 1, `glabname'), `gtabstat'.statvars') + } + else { + mata `glabstat' = (J(`gtabstat'.kstats, 1, `glabname'), `gtabstat'.statnames') + mata `glabvar' = (J(`gtabstat'.ksources, 1, ""), `gtabstat'.statvars') + } + + mata st_matrix("`tmp'", `glabmat') + mata st_matrixrowstripe("`tmp'", `glabstat') + mata st_matrixcolstripe("`tmp'", `glabvar') + + if `"`columns'"'=="statistics" { + mat `tmp' = `tmp'' + if ( `R'==1 ) { + mata `glabstat' = ("", `glabname') + mata `glabvar' = (J(`gtabstat'.kstats, 1, ""), `gtabstat'.statnames') + mata st_matrixrowstripe("`tmp'", `glabstat') + mata st_matrixcolstripe("`tmp'", `glabvar') + } + } + + local j 0 + foreach cname of local cnames { + local ++j + mat `_`j'' = nullmat(`_`j''), `tmp'[1..., `j']' + } + } + + if ( `g' == 0 ) { + mata `glabmat' = `gtabstat'.colvar? `gtabstat'.output: `gtabstat'.output' + mata `glabstat' = (J(`gtabstat'.kstats, 1, ""), `gtabstat'.statnames') + mata `glabvar' = (J(`gtabstat'.ksources, 1, ""), `gtabstat'.statvars') + + mata st_matrix("`tmp'", `glabmat') + mata st_matrixrowstripe("`tmp'", `glabstat') + mata st_matrixcolstripe("`tmp'", `glabvar') + if `"`columns'"'=="statistics" { + mat `tmp' = `tmp'' + } + + local j 0 + foreach cname of local cnames { + local ++j + mat `_`j'' = nullmat(`_`j''), `tmp'[1..., `j']' + } + } + + // display + if "`quietly'"=="" { + tempname res + local rescoln + local j 0 + foreach cname of local cnames { + local ++j + mat `res' = nullmat(`res'), `_`j''' + local rescoln `rescoln' e(`cname') + } + mat coln `res' = `rescoln' + di _n as txt "Summary statistics: `stats'" + di as txt " for variables: `vars'" + if "`by'"!="" { + di as txt " by categories of: `by'" + } + if c(stata_version)<9 { + mat list `res', noheader nohalf format(%9.0g) + } + else { + if `R'==1 & `g'>0 { + mat rown `res' = `savenames' + } + matlist `res', nohalf `rowtotal' rowtitle(`by') + } + if `"`macval(labels)'"'!="" { + di _n as txt "category labels saved in macro e(labels)" + } + mat drop `res' + } + + // post results + local b + local V + if c(stata_version)<9 { // b and V required in Stata 8 + tempname b V + mat `b' = `_1' \ J(1, colsof(`_1'), 0) + mat `b' = `b'[2,1...] + mat `V' = `b'' * `b' + } + if "`esample'"!="" local esample esample(`touse') + eret post `b' `V', obs(`N') `esample' + + eret local labels `"`macval(labels)'"' + eret local byvar "`by'" + eret local vars "`vars'" + eret local stats "`stats'" + eret local wexp `"`exp'"' + eret local wtype `"`weight'"' + eret local subcmd "tabstat" + eret local cmd "estpost" + + local nmat: list sizeof cnames + forv j=`nmat'(-1)1 { + local cname: word `j' of `cnames' + eret matrix `cname' = `_`j'' + } + + cap mata mata drop `gtabstat' + cap mata mata drop `glabname' + cap mata mata drop `glabstat' + cap mata mata drop `glabvar' + cap mata mata drop `glabmat' +end + +* 99. +* copy of erepost.ado, version 1.0.1, Ben Jann, 30jul2007 +* 14jan2009: noV option added => repost e(b) and remove e(V) if not specified +prog erepost, eclass + version 8.2 + syntax [anything(equalok)] [, NOV cmd(str) noEsample Esample2(varname) REName /// + Obs(passthru) Dof(passthru) PROPerties(passthru) * ] + if "`esample'"!="" & "`esample2'"!="" { + di as err "only one allowed of noesample and esample()" + exit 198 + } +// parse [b = b] [V = V] + if `"`anything'"'!="" { + tokenize `"`anything'"', parse(" =") + if `"`7'"'!="" error 198 + if `"`1'"'=="b" { + if `"`2'"'=="=" & `"`3'"'!="" { + local b `"`3'"' + confirm matrix `b' + } + else error 198 + if `"`4'"'=="V" { + if `"`5'"'=="=" & `"`6'"'!="" { + local v `"`6'"' + confirm matrix `b' + } + else error 198 + } + else if `"`4'"'!="" error 198 + } + else if `"`1'"'=="V" { + if `"`4'"'!="" error 198 + if `"`2'"'=="=" & `"`3'"'!="" { + local v `"`3'"' + confirm matrix `v' + } + else error 198 + } + else error 198 + } +//backup existing e()'s + if "`esample2'"!="" { + local sample "`esample2'" + } + else if "`esample'"=="" { + tempvar sample + gen byte `sample' = e(sample) + } + local emacros: e(macros) + if `"`properties'"'!="" { + local emacros: subinstr local emacros "properties" "", word + } + foreach emacro of local emacros { + local e_`emacro' `"`e(`emacro')'"' + } + local escalars: e(scalars) + if `"`obs'"'!="" { + local escalars: subinstr local escalars "N" "", word + } + if `"`dof'"'!="" { + local escalars: subinstr local escalars "df_r" "", word + } + foreach escalar of local escalars { + tempname e_`escalar' + scalar `e_`escalar'' = e(`escalar') + } + local ematrices: e(matrices) + if "`v'"=="" & "`nov'"!="" { // added 14jan2009 + local nov V + local ematrices : list ematrices - nov + } + if "`b'"=="" & `:list posof "b" in ematrices' { + tempname b + mat `b' = e(b) + } + if "`v'"=="" & `:list posof "V" in ematrices' { + tempname v + mat `v' = e(V) + } + local bV "b V" + local ematrices: list ematrices - bV + foreach ematrix of local ematrices { + tempname e_`ematrix' + matrix `e_`ematrix'' = e(`ematrix') + } +// rename + if "`b'"!="" & "`v'"!="" & "`rename'"!="" { + local eqnames: coleq `b', q + local vnames: colnames `b' + mat coleq `v' = `eqnames' + mat coln `v' = `vnames' + mat roweq `v' = `eqnames' + mat rown `v' = `vnames' + } +// post results + if "`esample'"=="" { + eret post `b' `v', esample(`sample') `obs' `dof' `properties' `options' + } + else { + eret post `b' `v', `obs' `dof' `properties' `options' + } + foreach emacro of local emacros { + eret local `emacro' `"`e_`emacro''"' + } + if `"`cmd'"'!="" { + eret local cmd `"`cmd'"' + } + foreach escalar of local escalars { + eret scalar `escalar' = scalar(`e_`escalar'') + } + foreach ematrix of local ematrices { + eret matrix `ematrix' = `e_`ematrix'' + } +end diff --git a/110/replication_package/replication/ado/plus/e/estpost.hlp b/110/replication_package/replication/ado/plus/e/estpost.hlp new file mode 100644 index 0000000000000000000000000000000000000000..c3dd4dc6909b32b0bed2e1ffe9c8c7b4383f75df --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/estpost.hlp @@ -0,0 +1,1524 @@ +{smcl} +{* 19may2021}{...} +{hi:help estpost}{right:also see: {helpb esttab}, {helpb estout}, {helpb eststo}, {helpb estadd}} +{right: {browse "http://repec.sowi.unibe.ch/stata/estout/"}} +{hline} + +{title:Title} + +{p 4 4 2}{hi:estpost} {hline 2} Post results from various commands in {cmd:e()} + + +{title:Syntax} + +{p 8 15 2} +{cmd:estpost} {it:{help estpost##commands:command}} [...] + +{marker commands} + {it:command}{col 26}description + {hline 79} + {helpb estpost##summarize:{ul:su}mmarize}{col 26}{...} +post summary statistics + {helpb estpost##tabstat:tabstat}{col 26}{...} +post summary statistics + {helpb estpost##gtabstat:gtabstat}{col 26}{...} +post summary statistics (using {helpb gstats tab} from {helpb gtools}) + {helpb estpost##ttest:ttest}{col 26}{...} +post two-group mean-comparison tests + {helpb estpost##prtest:prtest}{col 26}{...} +post two-group tests of proportions + {helpb estpost##tabulate:{ul:ta}bulate}{col 26}{...} +post one-way or two-way frequency table + {helpb estpost##svy_tabulate:svy: {ul:ta}bulate}{col 26}{...} +post frequency table for survey data + {helpb estpost##correlate:{ul:cor}relate}{col 26}{...} +post correlations + {helpb estpost##ci:ci}{col 26}{...} +post confidence intervals for means, + {col 26}{...} + proportions, or counts + {helpb estpost##stci:stci}{col 26}{...} +post confidence intervals for means + {col 26}{...} + and percentiles of survival time + {helpb estpost##margins:margins}{col 26}{...} +post results from {cmd:margins} (Stata 11 or newer) + {hline 79} + + +{title:Description} + +{p 4 4 2} +{cmd:estpost} posts results from various Stata commands in {cmd:e()} +so that they can be tabulated using {helpb esttab} or {helpb estout}. Type +{helpb ereturn:ereturn list} after {cmd:estpost} to list the elements saved +in {cmd:e()}. + + +{title:Commands} +{marker summarize} +{dlgtab:summarize} + +{p 4 15 2} +{cmd:estpost} {cmdab:su:mmarize} + [{it:{help varlist}}] [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:d:etail} + {cmdab:mean:only} + {cmdab:list:wise} + {cmdab:case:wise} + {cmdab:q:uietly} + {cmdab:es:ample} + ] + +{p 4 4 2} + posts summary statistics computed by {helpb summarize}. If no + {it:varlist} is specified, summary statistics are calculated for all + variables in the dataset. + +{p 4 4 2} + {cmd:aweight}s, {cmd:fweight}s, and {cmd:iweight}s are allowed + (however, {cmd:iweight}s may not be used with the {cmd:detail} option); + see {help weight}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:detail} and {cmd:meanonly} as described in help {helpb summarize}. + +{p 8 12 2} + {cmd:listwise} to handle missing values through listwise deletion, + meaning that an observation is omitted from the estimation + sample if any of the variables in {it:varlist} is missing for that + observation. The default is to determine the used observations for + each variable separately without regard to whether other variables + are missing. {cmd:casewise} is a synonym for {cmd:listwise}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 4 4 2}The following results vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(count)}}number of observations + {lalign 13:{cmd:e(mean)}}mean + {lalign 13:{cmd:e(min)}}minimum + {lalign 13:{cmd:e(max)}}maximum + {lalign 13:{cmd:e(sum)}}sum of variable + {lalign 13:{cmd:e(sum_w)}}sum of the weights + {lalign 13:{cmd:e(Var)}}variance (unless {cmd:meanonly}) + {lalign 13:{cmd:e(sd)}}standard deviation (unless {cmd:meanonly}) + {lalign 13:{cmd:e(p1)}}1st percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p5)}}5th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p10)}}10th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p25)}}25th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p50)}}50th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p75)}}75th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p90)}}90th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p95)}}95th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(p99)}}99th percentile ({cmd:detail} only) + {lalign 13:{cmd:e(skewness)}}skewness ({cmd:detail} only) + {lalign 13:{cmd:e(kurtosis)}}kurtosis ({cmd:detail} only) + +{p 4 4 2} + Example: + +{* begin example summarize }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. estpost summarize price mpg rep78 foreign + + {txt}{ralign 12:} {c |} {ralign 9:e(count)} {ralign 9:e(sum_w)} {ralign 9:e(mean)} {ralign 9:e(Var)} {ralign 9:e(sd)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 6165.257}}} {ralign 9:{res:{sf: 8699526}}} {ralign 9:{res:{sf: 2949.496}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 21.2973}}} {ralign 9:{res:{sf: 33.47205}}} {ralign 9:{res:{sf: 5.785503}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 69}}} {ralign 9:{res:{sf: 69}}} {ralign 9:{res:{sf: 3.405797}}} {ralign 9:{res:{sf: .9799659}}} {ralign 9:{res:{sf: .9899323}}} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: .2972973}}} {ralign 9:{res:{sf: .2117734}}} {ralign 9:{res:{sf: .4601885}}} + + {ralign 12:} {c |} {ralign 9:e(min)} {ralign 9:e(max)} {ralign 9:e(sum)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 3291}}} {ralign 9:{res:{sf: 15906}}} {ralign 9:{res:{sf: 456229}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 12}}} {ralign 9:{res:{sf: 41}}} {ralign 9:{res:{sf: 1576}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf: 5}}} {ralign 9:{res:{sf: 235}}} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf: 22}}} + + {com}. esttab ., cells("mean sd count") noobs + {res} + {txt}{hline 51} + {txt} (1) + {txt} + {txt} mean sd count + {txt}{hline 51} + {txt}price {res} 6165.257 2949.496 74{txt} + {txt}mpg {res} 21.2973 5.785503 74{txt} + {txt}rep78 {res} 3.405797 .9899323 69{txt} + {txt}foreign {res} .2972973 .4601885 74{txt} + {txt}{hline 51} +{* end example }{txt}{...} + +{marker tabstat} +{dlgtab:tabstat} + +{p 4 15 2} +{cmd:estpost} {cmdab:tabstat} + {it:{help varlist}} [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:s:tatistics:(}{it:{help tabstat##statname:statname}} [{it:...}]{cmd:)} + {cmdab:c:olumns:(}{cmdab:v:ariables}|{cmdab:s:tatistics:)} + {cmd:by(}{it:varname}{cmd:)} + {cmdab:not:otal} + {cmdab:m:issing} + {cmdab:list:wise} + {cmdab:case:wise} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + ] + +{p 4 4 2} + posts summary statistics computed by {helpb tabstat}. {cmd:aweight}s and + {cmd:fweight}s are allowed; see {help weight}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:statistics()}, {cmd:columns()}, {cmd:by()}, {cmd:nototal}, + and {cmd:missing} as described in help {helpb tabstat}. + +{p 8 12 2} + {cmd:listwise} to handle missing values through listwise deletion, + meaning that an observation is omitted from the estimation + sample if any of the variables in {it:varlist} is missing for that + observation. The default is to determine the used observations for + each variable separately without regard to whether other variables + are missing. {cmd:casewise} is a synonym for {cmd:listwise}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 8 12 2} + {cmd:elabels} to enforce saving the {cmd:by()} values/labels in macro + {cmd:e(labels)}. + +{p 4 4 2}A vector of results is saved in {cmd:e()} for each specified +variable or statistic, depending on {cmd:columns()}. + +{p 4 4 2} + Examples: + +{* begin example tabstat }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. estpost tabstat price mpg rep78, listwise /// + > statistics(mean sd) + + {txt}Summary statistics: mean sd + for variables: price mpg rep78 + + {ralign 12:} {c |} {ralign 9:e(price)} {ralign 9:e(mpg)} {ralign 9:e(rep78)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11} + {ralign 12:mean} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 3.405797}}} + {ralign 12:sd} {c |} {ralign 9:{res:{sf: 2912.44}}} {ralign 9:{res:{sf: 5.866408}}} {ralign 9:{res:{sf: .9899323}}} + + {com}. esttab ., cells("price mpg rep78") + {res} + {txt}{hline 51} + {txt} (1) + {txt} + {txt} price mpg rep78 + {txt}{hline 51} + {txt}mean {res} 6146.043 21.28986 3.405797{txt} + {txt}sd {res} 2912.44 5.866408 .9899323{txt} + {txt}{hline 51} + {txt}N {res} 69 {txt} + {txt}{hline 51} + + {com}. estpost tabstat price mpg rep78, listwise /// + > statistics(mean sd) columns(statistics) + + {txt}Summary statistics: mean sd + for variables: price mpg rep78 + + {ralign 12:} {c |} {ralign 9:e(mean)} {ralign 9:e(sd)} + {hline 13}{c +}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 2912.44}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 5.866408}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.405797}}} {ralign 9:{res:{sf: .9899323}}} + + {com}. esttab ., cells("mean(fmt(a3)) sd") + {res} + {txt}{hline 38} + {txt} (1) + {txt} + {txt} mean sd + {txt}{hline 38} + {txt}price {res} 6146.0 2912.4{txt} + {txt}mpg {res} 21.29 5.866{txt} + {txt}rep78 {res} 3.406 0.990{txt} + {txt}{hline 38} + {txt}N {res} 69 {txt} + {txt}{hline 38} + + {com}. estpost tabstat price mpg rep78, by(foreign) /// + > statistics(mean sd) columns(statistics) listwise + + {txt}Summary statistics: mean sd + for variables: price mpg rep78 + by categories of: foreign + + {ralign 12:foreign} {c |} {ralign 9:e(mean)} {ralign 9:e(sd)} + {hline 13}{c +}{hline 11}{hline 11} + {res:{lalign 13:Domestic}}{c |}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6179.25}}} {ralign 9:{res:{sf: 3188.969}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 19.54167}}} {ralign 9:{res:{sf: 4.753312}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.020833}}} {ralign 9:{res:{sf: .837666}}} + {hline 13}{c +}{hline 11}{hline 11} + {res:{lalign 13:Foreign}}{c |}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6070.143}}} {ralign 9:{res:{sf: 2220.984}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 25.28571}}} {ralign 9:{res:{sf: 6.309856}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 4.285714}}} {ralign 9:{res:{sf: .7171372}}} + {hline 13}{c +}{hline 11}{hline 11} + {res:{lalign 13:Total}}{c |}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 2912.44}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 5.866408}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.405797}}} {ralign 9:{res:{sf: .9899323}}} + + {com}. esttab ., main(mean) aux(sd) nostar unstack /// + > noobs nonote label + {res} + {txt}{hline 59} + {txt} (1) + {txt} + {txt} Domestic Foreign Total + {txt}{hline 59} + {txt}Price {res} 6179.3 6070.1 6146.0{txt} + {res} {ralign 12:{txt:(}3189.0{txt:)}} {ralign 12:{txt:(}2221.0{txt:)}} {ralign 12:{txt:(}2912.4{txt:)}}{txt} + + {txt}Mileage (mpg) {res} 19.54 25.29 21.29{txt} + {res} {ralign 12:{txt:(}4.753{txt:)}} {ralign 12:{txt:(}6.310{txt:)}} {ralign 12:{txt:(}5.866{txt:)}}{txt} + + {txt}Repair Record 1978 {res} 3.021 4.286 3.406{txt} + {res} {ralign 12:{txt:(}0.838{txt:)}} {ralign 12:{txt:(}0.717{txt:)}} {ralign 12:{txt:(}0.990{txt:)}}{txt} + {txt}{hline 59} +{* end example }{txt}{...} + +{marker gtabstat} +{dlgtab:gtabstat} + +{p 4 15 2} +{cmd:estpost} {cmdab:gtabstat} + {it:{help varlist}} [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:s:tatistics:(}{it:{help gstats tab##statname:statname}} [{it:...}]{cmd:)} + {cmdab:c:olumns:(}{cmdab:v:ariables}|{cmdab:s:tatistics:)} + {cmd:by(}{it:varname}{cmd:)} + {cmdab:m:issing} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + ] + +{p 4 4 2} + posts summary statistics computed by {helpb gstats tab} from + the {helpb gtools} package. {cmd:gstats tab} is a fast, by-able + alternative to {cmd:tabstat}. {cmd:aweight}s, {cmd:fweight}s, {cmd:pweight}s + and {cmd:pweight}s are allowed; see {help weight}. However, {cmd:total}, {cmd:casewise}, + and {cmd:listwise} are not allowed. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:statistics()}, {cmd:columns()}, and {cmd:by()} as described + in help {helpb gstats tab}. By default {cmd:nomissing} is passed; + use {cmd:missing} to include missing-value groups. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 8 12 2} + {cmd:elabels} to enforce saving the {cmd:by()} values/labels in macro + {cmd:e(labels)}. + +{p 4 4 2}A vector of results is saved in {cmd:e()} for each specified +variable or statistic, depending on {cmd:columns()}. + +{p 4 4 2} + Examples: + +{* begin example gtabstat }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. estpost gtabstat price mpg rep78, /// + > statistics(mean sd) + + {txt}Summary statistics: mean sd + for variables: price mpg rep78 + + {ralign 12:} {c |} {ralign 9:e(price)} {ralign 9:e(mpg)} {ralign 9:e(rep78)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11} + {ralign 12:mean} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 3.405797}}} + {ralign 12:sd} {c |} {ralign 9:{res:{sf: 2912.44}}} {ralign 9:{res:{sf: 5.866408}}} {ralign 9:{res:{sf: .9899323}}} + + {com}. esttab ., cells("price mpg rep78") + {res} + {txt}{hline 51} + {txt} (1) + {txt} + {txt} price mpg rep78 + {txt}{hline 51} + {txt}mean {res} 6146.043 21.28986 3.405797{txt} + {txt}sd {res} 2912.44 5.866408 .9899323{txt} + {txt}{hline 51} + {txt}N {res} 69 {txt} + {txt}{hline 51} + + {com}. estpost gtabstat price mpg rep78, /// + > statistics(mean sd) columns(statistics) + + {txt}Summary statistics: mean sd + for variables: price mpg rep78 + + {ralign 12:} {c |} {ralign 9:e(mean)} {ralign 9:e(sd)} + {hline 13}{c +}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 2912.44}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 5.866408}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.405797}}} {ralign 9:{res:{sf: .9899323}}} + + {com}. esttab ., cells("mean(fmt(a3)) sd") + {res} + {txt}{hline 38} + {txt} (1) + {txt} + {txt} mean sd + {txt}{hline 38} + {txt}price {res} 6146.0 2912.4{txt} + {txt}mpg {res} 21.29 5.866{txt} + {txt}rep78 {res} 3.406 0.990{txt} + {txt}{hline 38} + {txt}N {res} 69 {txt} + {txt}{hline 38} + + {com}. estpost gtabstat price mpg rep78, by(foreign) /// + > statistics(mean sd) columns(statistics) + + {txt}Summary statistics: mean sd + for variables: price mpg rep78 + by categories of: foreign + + {ralign 12:foreign} {c |} {ralign 9:e(mean)} {ralign 9:e(sd)} + {hline 13}{c +}{hline 11}{hline 11} + {res:{lalign 13:Domestic}}{c |}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6179.25}}} {ralign 9:{res:{sf: 3188.969}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 19.54167}}} {ralign 9:{res:{sf: 4.753312}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.020833}}} {ralign 9:{res:{sf: .837666}}} + {hline 13}{c +}{hline 11}{hline 11} + {res:{lalign 13:Foreign}}{c |}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6070.143}}} {ralign 9:{res:{sf: 2220.984}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 25.28571}}} {ralign 9:{res:{sf: 6.309856}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 4.285714}}} {ralign 9:{res:{sf: .7171372}}} + {hline 13}{c +}{hline 11}{hline 11} + {res:{lalign 13:Total}}{c |}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 2912.44}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 5.866408}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.405797}}} {ralign 9:{res:{sf: .9899323}}} + + {com}. esttab ., main(mean) aux(sd) nostar unstack /// + > noobs nonote label + {res} + {txt}{hline 59} + {txt} (1) + {txt} + {txt} Domestic Foreign Total + {txt}{hline 59} + {txt}Price {res} 6179.3 6070.1 6146.0{txt} + {res} {ralign 12:{txt:(}3189.0{txt:)}} {ralign 12:{txt:(}2221.0{txt:)}} {ralign 12:{txt:(}2912.4{txt:)}}{txt} + + {txt}Mileage (mpg) {res} 19.54 25.29 21.29{txt} + {res} {ralign 12:{txt:(}4.753{txt:)}} {ralign 12:{txt:(}6.310{txt:)}} {ralign 12:{txt:(}5.866{txt:)}}{txt} + + {txt}Repair Record 1978 {res} 3.021 4.286 3.406{txt} + {res} {ralign 12:{txt:(}0.838{txt:)}} {ralign 12:{txt:(}0.717{txt:)}} {ralign 12:{txt:(}0.990{txt:)}}{txt} + {txt}{hline 59} +{* end example }{txt}{...} + +{marker ttest} +{dlgtab:ttest} + +{p 4 15 2} +{cmd:estpost} {cmdab:ttest} + {it:{help varlist}} [{it:{help if}}] [{it:{help in}}]{cmd:,} + {cmd:by(}{it:groupvar}{cmd:)} + [ + {cmdab:une:qual} {cmdab:w:elch} + {cmdab:list:wise} + {cmdab:case:wise} + {cmdab:q:uietly} + {cmdab:es:ample} + ] + +{p 4 4 2} + posts two-group mean-comparison tests computed by {helpb ttest}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:by()}, {cmd:unequal}, and {cmd:welch} as described in + help {helpb ttest}. + +{p 8 12 2} + {cmd:listwise} to handle missing values through listwise deletion, + meaning that an observation is omitted from the estimation + sample if any of the variables in {it:varlist} is missing for that + observation. The default is to determine the used observations for + each variable separately without regard to whether other variables + are missing. {cmd:casewise} is a synonym for {cmd:listwise}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 4 4 2}The following results vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(b)}}mean difference + {lalign 13:{cmd:e(count)}}number of observations + {lalign 13:{cmd:e(se)}}standard error of difference + {lalign 13:{cmd:e(t)}}t statistic + {lalign 13:{cmd:e(df_t)}}degrees of freedom + {lalign 13:{cmd:e(p_l)}}lower one-sided p-value + {lalign 13:{cmd:e(p)}}two-sided p-value + {lalign 13:{cmd:e(p_u)}}upper one-sided p-value + {lalign 13:{cmd:e(N_1)}}number of observations in group 1 + {lalign 13:{cmd:e(mu_1)}}mean in group 1 + {lalign 13:{cmd:e(N_2)}}number of observations in group 2 + {lalign 13:{cmd:e(mu_2)}}mean in group 2 + +{p 4 4 2} + Example: + +{* begin example ttest }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. estpost ttest price mpg headroom trunk, by(foreign) + + {txt}{ralign 12:} {c |} {ralign 9:e(b)} {ralign 9:e(count)} {ralign 9:e(se)} {ralign 9:e(t)} {ralign 9:e(df_t)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf:-312.2587}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 754.4488}}} {ralign 9:{res:{sf:-.4138899}}} {ralign 9:{res:{sf: 72}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf:-4.945804}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 1.362162}}} {ralign 9:{res:{sf:-3.630848}}} {ralign 9:{res:{sf: 72}}} + {ralign 12:headroom} {c |} {ralign 9:{res:{sf: .5402098}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: .2070884}}} {ralign 9:{res:{sf: 2.608596}}} {ralign 9:{res:{sf: 72}}} + {ralign 12:trunk} {c |} {ralign 9:{res:{sf: 3.340909}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 1.022208}}} {ralign 9:{res:{sf: 3.268327}}} {ralign 9:{res:{sf: 72}}} + + {ralign 12:} {c |} {ralign 9:e(p_l)} {ralign 9:e(p)} {ralign 9:e(p_u)} {ralign 9:e(N_1)} {ralign 9:e(mu_1)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: .3400925}}} {ralign 9:{res:{sf: .6801851}}} {ralign 9:{res:{sf: .6599075}}} {ralign 9:{res:{sf: 52}}} {ralign 9:{res:{sf: 6072.423}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: .0002627}}} {ralign 9:{res:{sf: .0005254}}} {ralign 9:{res:{sf: .9997373}}} {ralign 9:{res:{sf: 52}}} {ralign 9:{res:{sf: 19.82692}}} + {ralign 12:headroom} {c |} {ralign 9:{res:{sf: .9944757}}} {ralign 9:{res:{sf: .0110486}}} {ralign 9:{res:{sf: .0055243}}} {ralign 9:{res:{sf: 52}}} {ralign 9:{res:{sf: 3.153846}}} + {ralign 12:trunk} {c |} {ralign 9:{res:{sf: .99917}}} {ralign 9:{res:{sf: .00166}}} {ralign 9:{res:{sf: .00083}}} {ralign 9:{res:{sf: 52}}} {ralign 9:{res:{sf: 14.75}}} + + {ralign 12:} {c |} {ralign 9:e(N_2)} {ralign 9:e(mu_2)} + {hline 13}{c +}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 22}}} {ralign 9:{res:{sf: 6384.682}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 22}}} {ralign 9:{res:{sf: 24.77273}}} + {ralign 12:headroom} {c |} {ralign 9:{res:{sf: 22}}} {ralign 9:{res:{sf: 2.613636}}} + {ralign 12:trunk} {c |} {ralign 9:{res:{sf: 22}}} {ralign 9:{res:{sf: 11.40909}}} + + {com}. esttab ., wide + {res} + {txt}{hline 41} + {txt} (1) + {txt} + {txt}{hline 41} + {txt}price {res} -312.3 {ralign 12:{txt:(}-0.41{txt:)}}{txt} + {txt}mpg {res} -4.946*** {ralign 12:{txt:(}-3.63{txt:)}}{txt} + {txt}headroom {res} 0.540* {ralign 12:{txt:(}2.61{txt:)}}{txt} + {txt}trunk {res} 3.341** {ralign 12:{txt:(}3.27{txt:)}}{txt} + {txt}{hline 41} + {txt}N {res} 74 {txt} + {txt}{hline 41} + {txt}t statistics in parentheses + {txt}* p<0.05, ** p<0.01, *** p<0.001 +{* end example }{txt}{...} + +{marker prtest} +{dlgtab:prtest} + +{p 4 15 2} +{cmd:estpost} {cmdab:prtest} + {it:{help varlist}} [{it:{help if}}] [{it:{help in}}]{cmd:,} + {cmd:by(}{it:groupvar}{cmd:)} + [ + {cmdab:list:wise} + {cmdab:case:wise} + {cmdab:q:uietly} + {cmdab:es:ample} + ] + +{p 4 4 2} + posts two-group tests of proportions computed by {helpb prtest}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:by()} as described in + help {helpb prtest}. + +{p 8 12 2} + {cmd:listwise} to handle missing values through listwise deletion, + meaning that an observation is omitted from the estimation + sample if any of the variables in {it:varlist} is missing for that + observation. The default is to determine the used observations for + each variable separately without regard to whether other variables + are missing. {cmd:casewise} is a synonym for {cmd:listwise}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 4 4 2}The following results vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(b)}}difference in proportions + {lalign 13:{cmd:e(count)}}number of observations + {lalign 13:{cmd:e(se)}}standard error of difference + {lalign 13:{cmd:e(se0)}}standard error under Ho + {lalign 13:{cmd:e(z)}}z statistic + {lalign 13:{cmd:e(p_l)}}lower one-sided p-value + {lalign 13:{cmd:e(p)}}two-sided p-value + {lalign 13:{cmd:e(p_u)}}upper one-sided p-value + {lalign 13:{cmd:e(N_1)}}number of observations in group 1 + {lalign 13:{cmd:e(P_1)}}proportion in group 1 + {lalign 13:{cmd:e(N_2)}}number of observations in group 2 + {lalign 13:{cmd:e(P_2)}}proportion in group 2 + +{p 4 4 2} + Example: + +{* begin example prtest }{...} + {com}. webuse cure2, clear + {txt} + {com}. estpost prtest cure, by(sex) + + {txt}{ralign 12:} {c |} {ralign 9:e(b)} {ralign 9:e(count)} {ralign 9:e(se)} {ralign 9:e(se0)} {ralign 9:e(z)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:cure} {c |} {ralign 9:{res:{sf:-.0729167}}} {ralign 9:{res:{sf: 109}}} {ralign 9:{res:{sf: .0933123}}} {ralign 9:{res:{sf: .0942404}}} {ralign 9:{res:{sf:-.7737309}}} + + {ralign 12:} {c |} {ralign 9:e(p_l)} {ralign 9:e(p)} {ralign 9:e(p_u)} {ralign 9:e(N_1)} {ralign 9:e(P_1)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:cure} {c |} {ralign 9:{res:{sf: .219545}}} {ralign 9:{res:{sf: .43909}}} {ralign 9:{res:{sf: .780455}}} {ralign 9:{res:{sf: 64}}} {ralign 9:{res:{sf: .59375}}} + + {ralign 12:} {c |} {ralign 9:e(N_2)} {ralign 9:e(P_2)} + {hline 13}{c +}{hline 11}{hline 11} + {ralign 12:cure} {c |} {ralign 9:{res:{sf: 45}}} {ralign 9:{res:{sf: .6666667}}} + + {com}. esttab ., cell("b se0 z p") + {res} + {txt}{hline 64} + {txt} (1) + {txt} + {txt} b se0 z p + {txt}{hline 64} + {txt}cure {res} -.0729167 .0942404 -.7737309 .43909{txt} + {txt}{hline 64} + {txt}N {res} 109 {txt} + {txt}{hline 64} +{* end example }{txt}{...} + +{marker tabulate} +{dlgtab:tabulate} + +{p 4 4 2}One-way table: + +{p 8 15 2} +{cmd:estpost} {cmdab:ta:bulate} + {it:varname} [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:m:issing} + {cmdab:nol:abel} + {cmd:sort} + {cmd:subpop(}{it:varname}{cmd:)} + {cmdab:notot:al} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + ] + +{p 4 4 2}Two-way table: + +{p 8 15 2} +{cmd:estpost} {cmdab:ta:bulate} + {it:varname1} {it:varname2} [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:m:issing} + {cmdab:nol:abel} + {cmdab:ch:i2} + {cmdab:e:xact}[{cmd:(}{it:#}{cmd:)}] + {cmdab:g:amma} + {cmdab:lr:chi2} + {cmdab:t:aub} + {cmdab:v} + {cmdab:notot:al} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + ] + +{p 4 4 2} + {cmd:estpost tabulate} posts a one-way or two-way table + computed by {helpb tabulate}. {cmd:aweight}s, {cmd:fweight}s, + and {cmd:iweight}s are allowed; see {help weight}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:missing}, + {cmd:nolabel}, + {cmd:sort}, + {cmd:subpop()}, + {cmd:chi2}, + {cmd:exact}, + {cmd:gamma}, + {cmd:lrchi2}, + {cmd:taub}, and + {cmd:v} + as described in help {helpb tabulate}. + +{p 8 12 2} + {cmdab:nototal} to omit row and column totals. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 8 12 2} + {cmd:elabels} to enforce saving labels in {cmd:e(labels)} and + {cmd:e(eqlabels)}. + +{p 4 4 2}The following vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(b)}}frequency counts + {lalign 13:{cmd:e(pct)}}percent + {lalign 13:{cmd:e(cumpct)}}cumulative percent (one-way only) + {lalign 13:{cmd:e(colpct)}}column percent (two-way only) + {lalign 13:{cmd:e(rowpct)}}row percent (two-way only) + +{p 4 4 2}If two-way options such as, e.g., {cmd:chi2} or {cmd:exact} are +specified, the results of the tests added as scalars in {cmd:e()} using the +names documented in {helpb tabulate:{bind:[R] tabulate}}. + +{p 4 4 2}The value labels of the row variable are stored as names in the +saved vectors, unless +no label exceeds 30 characters or contains unsuitable characters in which case +the labels are stored in macro {cmd:e(labels)}. Type +{cmd:varlabels(`e(labels)')} in {helpb esttab} or {helpb estout} to +use the labels stored {cmd:e(labels)}. The value labels of the column variable +are stored as equation names or, alternatively, +in macro {cmd:e(eqlabels)}. Type {cmd:eqlabels(`e(eqlabels)')} in +{helpb esttab} or {helpb estout} to use the labels stored in +{cmd:e(eqlabels)}. Specify the {cmd:elabels} option to enforce saving labels +in {cmd:e(labels)} and {cmd:e(eqlabels)}. + +{p 4 4 2}Examples: + +{* begin example tabulate }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. estpost tabulate foreign + + {txt}{ralign 12:foreign} {c |} {ralign 9:e(b)} {ralign 9:e(pct)} {ralign 9:e(cumpct)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11} + {ralign 12:Domestic} {c |} {ralign 9:{res:{sf: 52}}} {ralign 9:{res:{sf: 70.27027}}} {ralign 9:{res:{sf: 70.27027}}} + {ralign 12:Foreign} {c |} {ralign 9:{res:{sf: 22}}} {ralign 9:{res:{sf: 29.72973}}} {ralign 9:{res:{sf: 100}}} + {hline 13}{c +}{hline 11}{hline 11}{hline 11} + {ralign 12:Total} {c |} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: 100}}} {ralign 9:{res:{sf:{space 9}}}} + + {com}. esttab ., cells("b pct(fmt(2)) cumpct(fmt(2))") noobs + {res} + {txt}{hline 51} + {txt} (1) + {txt} foreign + {txt} b pct cumpct + {txt}{hline 51} + {txt}Domestic {res} 52 70.27 70.27{txt} + {txt}Foreign {res} 22 29.73 100.00{txt} + {txt}Total {res} 74 100.00 {txt} + {txt}{hline 51} + + {com}. estpost tabulate rep78 foreign + + {res}foreign {txt} {c |}{space 44} + {ralign 12:rep78} {c |} {ralign 9:e(b)} {ralign 9:e(pct)} {ralign 9:e(colpct)} {ralign 9:e(rowpct)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11} + {res:{lalign 13:Domestic}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:1} {c |} {ralign 9:{res:{sf: 2}}} {ralign 9:{res:{sf: 2.898551}}} {ralign 9:{res:{sf: 4.166667}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:2} {c |} {ralign 9:{res:{sf: 8}}} {ralign 9:{res:{sf: 11.5942}}} {ralign 9:{res:{sf: 16.66667}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:3} {c |} {ralign 9:{res:{sf: 27}}} {ralign 9:{res:{sf: 39.13043}}} {ralign 9:{res:{sf: 56.25}}} {ralign 9:{res:{sf: 90}}} + {ralign 12:4} {c |} {ralign 9:{res:{sf: 9}}} {ralign 9:{res:{sf: 13.04348}}} {ralign 9:{res:{sf: 18.75}}} {ralign 9:{res:{sf: 50}}} + {ralign 12:5} {c |} {ralign 9:{res:{sf: 2}}} {ralign 9:{res:{sf: 2.898551}}} {ralign 9:{res:{sf: 4.166667}}} {ralign 9:{res:{sf: 18.18182}}} + {ralign 12:Total} {c |} {ralign 9:{res:{sf: 48}}} {ralign 9:{res:{sf: 69.56522}}} {ralign 9:{res:{sf: 100}}} {ralign 9:{res:{sf: 69.56522}}} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11} + {res:{lalign 13:Foreign}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:1} {c |} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 0}}} + {ralign 12:2} {c |} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 0}}} {ralign 9:{res:{sf: 0}}} + {ralign 12:3} {c |} {ralign 9:{res:{sf: 3}}} {ralign 9:{res:{sf: 4.347826}}} {ralign 9:{res:{sf: 14.28571}}} {ralign 9:{res:{sf: 10}}} + {ralign 12:4} {c |} {ralign 9:{res:{sf: 9}}} {ralign 9:{res:{sf: 13.04348}}} {ralign 9:{res:{sf: 42.85714}}} {ralign 9:{res:{sf: 50}}} + {ralign 12:5} {c |} {ralign 9:{res:{sf: 9}}} {ralign 9:{res:{sf: 13.04348}}} {ralign 9:{res:{sf: 42.85714}}} {ralign 9:{res:{sf: 81.81818}}} + {ralign 12:Total} {c |} {ralign 9:{res:{sf: 21}}} {ralign 9:{res:{sf: 30.43478}}} {ralign 9:{res:{sf: 100}}} {ralign 9:{res:{sf: 30.43478}}} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11} + {res:{lalign 13:Total}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:1} {c |} {ralign 9:{res:{sf: 2}}} {ralign 9:{res:{sf: 2.898551}}} {ralign 9:{res:{sf: 2.898551}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:2} {c |} {ralign 9:{res:{sf: 8}}} {ralign 9:{res:{sf: 11.5942}}} {ralign 9:{res:{sf: 11.5942}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:3} {c |} {ralign 9:{res:{sf: 30}}} {ralign 9:{res:{sf: 43.47826}}} {ralign 9:{res:{sf: 43.47826}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:4} {c |} {ralign 9:{res:{sf: 18}}} {ralign 9:{res:{sf: 26.08696}}} {ralign 9:{res:{sf: 26.08696}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:5} {c |} {ralign 9:{res:{sf: 11}}} {ralign 9:{res:{sf: 15.94203}}} {ralign 9:{res:{sf: 15.94203}}} {ralign 9:{res:{sf: 100}}} + {ralign 12:Total} {c |} {ralign 9:{res:{sf: 69}}} {ralign 9:{res:{sf: 100}}} {ralign 9:{res:{sf: 100}}} {ralign 9:{res:{sf: 100}}} + + {com}. esttab ., cell(colpct(fmt(2))) unstack noobs + {res} + {txt}{hline 51} + {txt} (1) + {txt} + {txt} Domestic Foreign Total + {txt} colpct colpct colpct + {txt}{hline 51} + {txt}1 {res} 4.17 0.00 2.90{txt} + {txt}2 {res} 16.67 0.00 11.59{txt} + {txt}3 {res} 56.25 14.29 43.48{txt} + {txt}4 {res} 18.75 42.86 26.09{txt} + {txt}5 {res} 4.17 42.86 15.94{txt} + {txt}Total {res} 100.00 100.00 100.00{txt} + {txt}{hline 51} + + {com}. esttab ., cell(colpct(fmt(2)) count(fmt(g) par keep(Total))) /// + > collabels(none) unstack noobs nonumber nomtitle /// + > eqlabels(, lhs("Repair Rec.")) /// + > varlabels(, blist(Total "{c -(}hline @width{c )-}{c -(}break{c )-}")) + {res} + {txt}{hline 51} + {txt}Repair Rec. Domestic Foreign Total + {txt}{hline 51} + {txt}1 {res} 4.17 0.00 2.90{txt} + {txt}2 {res} 16.67 0.00 11.59{txt} + {txt}3 {res} 56.25 14.29 43.48{txt} + {txt}4 {res} 18.75 42.86 26.09{txt} + {txt}5 {res} 4.17 42.86 15.94{txt} + {txt}{hline 51}{break} Total {res} 100.00 100.00 100.00{txt} + {res} {txt} + {txt}{hline 51} +{* end example }{txt}{...} + +{marker svy_tabulate} +{dlgtab:svy: tabulate} + +{p 4 4 2}One-way table: + +{p 8 15 2} +{cmd:estpost} {cmd:svy} [{it:vcetype}] [, {it:svy_options}] {cmd::} {cmdab:ta:bulate} + {it:varname} [{it:{help if}}] [{it:{help in}}] + [{cmd:,} + {cmdab:notot:al} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + {help svy_tabulate_oneway:{it:svy_tabulate_opts}} + ] + +{p 4 4 2}Two-way table: + +{p 8 15 2} +{cmd:estpost} {cmd:svy} [{it:vcetype}] [, {it:svy_options}] {cmd::} {cmdab:ta:bulate} + {it:varname1} {it:varname2} [{it:{help if}}] [{it:{help in}}] + [{cmd:,} + {cmdab:notot:al} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + {help svy_tabulate_oneway:{it:svy_tabulate_opts}} + ] + +{p 4 4 2} + {cmd:estpost svy: tabulate} posts a one-way or two-way table + for complex survey data computed by {helpb svy_tabulate:svy: tabulate}. Stata 9 or newer + is required. + +{p 4 4 2} + Options are as described in {helpb svy_tabulate_oneway:[SVY] svy: tabulate oneway} or + {helpb svy_tabulate_twoway:[SVY] svy: tabulate twoway}, respectively, and: + +{p 8 12 2} + {cmdab:nototal} to omit row and column totals (synonym for {cmd:nomarginals}). + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 8 12 2} + {cmd:elabels} to enforce saving labels in {cmd:e(labels)} and + {cmd:e(eqlabels)}. + +{p 4 4 2}{cmd:estpost svy: tabulate} posts results in {cmd:e()} (except {cmd:e(V)}) +as documented in {helpb svy_tabulate_oneway:[SVY] svy: tabulate oneway} and +{helpb svy_tabulate_twoway:[SVY] svy: tabulate twoway}, respectively, +and adds or replaces the following matrices: + + {lalign 10:{cmd:e(b)}}cell, column, or row proportions or percentages, + or weighted counts, depending on options + {lalign 10:{cmd:e(se)}}standard errors of {cmd:e(b)} + {lalign 10:{cmd:e(lb)}}lower confidence bounds for {cmd:e(b)} + {lalign 10:{cmd:e(ub)}}upper confidence bounds for {cmd:e(b)} + {lalign 10:{cmd:e(deff)}}deff for variances of {cmd:e(b)} + {lalign 10:{cmd:e(deft)}}deft for variances of {cmd:e(b)} + {lalign 10:{cmd:e(cell)}}cell proportion or percentages + {lalign 10:{cmd:e(row)}}row proportion or percentages (two-way only) + {lalign 10:{cmd:e(col)}}column proportion or percentages (two-way only) + {lalign 10:{cmd:e(count)}}weighted counts + {lalign 10:{cmd:e(obs)}}number of observations + +{p 4 4 2}The value labels of the row variable are stored as names in the +saved vectors, unless +no label exceeds 30 characters or contains unsuitable characters in which case +the labels are stored in macro {cmd:e(labels)}. Type +{cmd:varlabels(`e(labels)')} in {helpb esttab} or {helpb estout} to +use the labels stored {cmd:e(labels)}. The value labels of the column variable +are stored as equation names or, alternatively, +in macro {cmd:e(eqlabels)}. Type {cmd:eqlabels(`e(eqlabels)')} in +{helpb esttab} or {helpb estout} to use the labels stored in +{cmd:e(eqlabels)}. Specify the {cmd:elabels} option to enforce saving labels +in {cmd:e(labels)} and {cmd:e(eqlabels)}. + +{p 4 4 2}Examples: + +{* begin example svy_tabulate }{...} + {com}. webuse nhanes2b, clear + {txt} + {com}. svyset psuid [pweight=finalwgt], strata(stratid) + + {txt}pweight:{col 16}{res}finalwgt + {txt}VCE:{col 16}{res}linearized + {txt}Single unit:{col 16}{res}missing + {txt}Strata 1:{col 16}{res}stratid + {txt}SU 1:{col 16}{res}psuid + {txt}FPC 1:{col 16} + {p2colreset}{...} + + {com}. estpost svy: tabulate race + {txt}(running tabulate on estimation sample) + + {col 1}Number of strata{col 20}= {res} 31{txt}{col 48}Number of obs{col 67}= {res} 10351 + {txt}{col 1}Number of PSUs{col 20}= {res} 62{txt}{col 48}Population size{col 67}={res} 117157513 + {txt}{col 48}Design df{col 67}= {res} 31 + + {txt}{hline 10}{c TT}{hline 12} + 1=white, {c |} + 2=black, {c |} + 3=other {c |} proportions + {hline 10}{c +}{hline 12} + White {c |} {res}.8792 + {txt}Black {c |} {res}.0955 + {txt}Other {c |} {res}.0253 + {txt}{c |} + Total {c |} {res}1 + {txt}{hline 10}{c BT}{hline 12} + Key: {col 1}proportions = {res}cell proportions + + {txt}saved vectors: + e(b) = {res}cell proportions + {txt}e(se) = {res}standard errors of cell proportions + {txt}e(lb) = {res}lower 95% confidence bounds for cell proportions + {txt}e(ub) = {res}upper 95% confidence bounds for cell proportions + {txt}e(deff) = {res}deff for variances of cell proportions + {txt}e(deft) = {res}deft for variances of cell proportions + {txt}e(cell) = {res}cell proportions + {txt}e(count) = {res}weighted counts + {txt}e(obs) = {res}number of observations + {txt} + {com}. esttab ., cell("b(f(4)) se deft") + {res} + {txt}{hline 51} + {txt} (1) + {txt} + {txt} b se deft + {txt}{hline 51} + {txt}White {res} 0.8792 0.0167 5.2090{txt} + {txt}Black {res} 0.0955 0.0127 4.4130{txt} + {txt}Other {res} 0.0253 0.0105 6.8246{txt} + {txt}Total {res} 1.0000 0.0000 {txt} + {txt}{hline 51} + {txt}N {res} 10351 {txt} + {txt}{hline 51} + + {com}. estpost svy: tabulate race diabetes, row percent + {txt}(running tabulate on estimation sample) + + {col 1}Number of strata{col 20}= {res} 31{txt}{col 48}Number of obs{col 67}= {res} 10349 + {txt}{col 1}Number of PSUs{col 20}= {res} 62{txt}{col 48}Population size{col 67}={res} 117131111 + {txt}{col 48}Design df{col 67}= {res} 31 + + {txt}{hline 10}{c TT}{hline 20} + 1=white, {c |} diabetes, 1=yes, + 2=black, {c |} 0=no + 3=other {c |} 0 1 Total + {hline 10}{c +}{hline 20} + White {c |} {res}96.8 3.195 100 + {txt}Black {c |} {res}94.1 5.903 100 + {txt}Other {c |} {res}97.97 2.034 100 + {txt}{c |} + Total {c |} {res}96.58 3.425 100 + {txt}{hline 10}{c BT}{hline 20} + Key: {col 1}{res}row percentages + + {txt} Pearson: + {col 5}Uncorrected{col 19}chi2({res}2{txt}){col 35}= {res} 21.3483 + {txt}{col 5}Design-based{col 19}F({res}1.52{txt}, {res}47.26{txt}){col 35}= {res} 15.0056{col 51}{txt}P = {res}0.0000 + + {txt}saved vectors: + e(b) = {res}row percentages + {txt}e(se) = {res}standard errors of row percentages + {txt}e(lb) = {res}lower 95% confidence bounds for row percentages + {txt}e(ub) = {res}upper 95% confidence bounds for row percentages + {txt}e(deff) = {res}deff for variances of row percentages + {txt}e(deft) = {res}deft for variances of row percentages + {txt}e(cell) = {res}cell percentages + {txt}e(row) = {res}row percentages + {txt}e(col) = {res}column percentages + {txt}e(count) = {res}weighted counts + {txt}e(obs) = {res}number of observations + {txt} + {com}. esttab ., b(2) se(2) scalars(F_Pear) nostar unstack /// + > mtitle(`e(colvar)') + {res} + {txt}{hline 51} + {txt} (1) + {txt} diabetes + {txt} 0 1 Total + {txt}{hline 51} + {txt}White {res} 96.80 3.20 100.00{txt} + {res} {ralign 12:{txt:(}0.20{txt:)}} {ralign 12:{txt:(}0.20{txt:)}} {txt} + + {txt}Black {res} 94.10 5.90 100.00{txt} + {res} {ralign 12:{txt:(}0.61{txt:)}} {ralign 12:{txt:(}0.61{txt:)}} {txt} + + {txt}Other {res} 97.97 2.03 100.00{txt} + {res} {ralign 12:{txt:(}0.76{txt:)}} {ralign 12:{txt:(}0.76{txt:)}} {txt} + + {txt}Total {res} 96.58 3.42 100.00{txt} + {res} {ralign 12:{txt:(}0.18{txt:)}} {ralign 12:{txt:(}0.18{txt:)}} {txt} + {txt}{hline 51} + {txt}N {res} 10349 {txt} + {txt}F_Pear {res} 15.01 {txt} + {txt}{hline 51} + {txt}Standard errors in parentheses +{* end example }{txt}{...} + +{marker correlate} +{dlgtab:correlate} + +{p 4 15 2} +{cmd:estpost} {cmdab:cor:relate} + {it:{help varlist}} [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:m:atrix} + {cmdab:noh:alf} + {cmdab:print:(}{it:#}{cmd:)} + {cmdab:b:onferroni} + {cmdab:sid:ak} + {cmdab:list:wise} + {cmdab:case:wise} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels}[{cmd:(}{it:pfx} {it:sfx}{cmd:)}] + {cmdab:nol:abel} + ] + +{p 4 4 2} + posts the pairwise correlations between the first variable in + {it:varlist} and the remaining variables. Alternatively, if the + {cmd:matrix} option is specified, all pairwise correlations among the + variable in {it:varlist} are posted. + +{p 4 4 2} + {cmd:aweight}s, {cmd:fweight}s, + {cmd:iweight}s and {cmd:pweight}s are allowed; see {help weight}. + +{p 4 4 2} + Methods and formulas are as described in + {helpb correlate:{bind:[R] correlate}}. However, if {cmd:pweight}s + are specified, the p-values of the correlations are computed + as suggested in the Stata FAQ on + {browse "http://www.stata.com/support/faqs/stat/survey.html":"Estimating correlations with survey data"}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:matrix} to return the (lower triangle) of the correlation + matrix of the variables in {it:varlist}. The default is to return + the pairwise correlations between the first variable in + {it:varlist} and the remaining variables. + +{p 8 12 2} + {cmd:nohalf} to return the full correlation matrix rather than just + the lower triangle. {cmd:nohalf} has no effect unless {cmd:matrix} + is specified. + +{p 8 12 2} + {cmd:print(}{it:#}{cmd:)} to suppress (leave blank) + correlation coefficients with a p-value larger than + {it:#}. {cmd:print()} only affects what is saved in + {cmd:e(rho)}, {cmd:e(p)}, and {cmd:e(count)}, but + not what is saved in {cmd:e(b)}. + +{p 8 12 2} + {cmd:bonferroni} to apply the Bonferroni adjustment to the + p-values. + +{p 8 12 2} + {cmd:sidak} to apply the Sidak adjustment to the + p-values. + +{p 8 12 2} + {cmd:listwise} to handle missing values through listwise deletion, + meaning that an observation is omitted from the estimation sample + if any of the variables in {it:varlist} is missing for that + observation. The default is to handle missing values by pairwise + deletion, i.e. all available observations are used to calculate a + pairwise correlation without regard to whether variables outside + that pair are missing. {cmd:casewise} is a synonym for + {cmd:listwise}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 8 12 2} + {cmd:elabels}[{cmd:(}{it:pfx} {it:sfx}{cmd:)}] stores numbered labels in + {cmd:e(labels)} and {cmd:e(eqlabels)} if option {cmd:matrix} has been + specified. This is useful if you want to tabulate a correlation + matrix. See below for an example. Specify {it:pfx} and + {it:sfx} to provide a prefix and suffix for the numbers; for + example, {cmd:eqlabels([ ])} will format the numbers as + {cmd:[1]}, {cmd:[2]}, etc.; {cmd:eqlabels("" .)} will format the numbers as + {cmd:1.}, {cmd:2.}, etc. + +{p 8 12 2} + {cmd:nolabel} causes option {cmd:elabels()} to use variable names rather + than variable labels. + +{p 4 4 2}The following vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(b)}}correlation coefficients + {lalign 13:{cmd:e(rho)}}correlation coefficients + {lalign 13:{cmd:e(p)}}p-values + {lalign 13:{cmd:e(count)}}number of observations + +{p 4 4 2}Examples: + +{* begin example correlate }{...} + {com}. sysuse auto, clear + {txt}(1978 automobile data) + + {com}. estpost correlate price mpg turn foreign, matrix elabels(( )) + + {txt}{ralign 12:} {c |} {ralign 9:e(b)} {ralign 9:e(rho)} {ralign 9:e(p)} {ralign 9:e(count)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11} + {res:{lalign 13:price}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf:{space 9}}}} {ralign 9:{res:{sf: 74}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf:-.4685967}}} {ralign 9:{res:{sf:-.4685967}}} {ralign 9:{res:{sf: .0000255}}} {ralign 9:{res:{sf: 74}}} + {ralign 12:turn} {c |} {ralign 9:{res:{sf: .3096174}}} {ralign 9:{res:{sf: .3096174}}} {ralign 9:{res:{sf: .0072662}}} {ralign 9:{res:{sf: 74}}} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: .0487195}}} {ralign 9:{res:{sf: .0487195}}} {ralign 9:{res:{sf: .6801851}}} {ralign 9:{res:{sf: 74}}} + {res:{lalign 13:mpg}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf:{space 9}}}} {ralign 9:{res:{sf: 74}}} + {ralign 12:turn} {c |} {ralign 9:{res:{sf:-.7191863}}} {ralign 9:{res:{sf:-.7191863}}} {ralign 9:{res:{sf: 5.30e-13}}} {ralign 9:{res:{sf: 74}}} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: .3933974}}} {ralign 9:{res:{sf: .3933974}}} {ralign 9:{res:{sf: .0005254}}} {ralign 9:{res:{sf: 74}}} + {res:{lalign 13:turn}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:turn} {c |} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf:{space 9}}}} {ralign 9:{res:{sf: 74}}} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf:-.6310965}}} {ralign 9:{res:{sf:-.6310965}}} {ralign 9:{res:{sf: 1.66e-09}}} {ralign 9:{res:{sf: 74}}} + {res:{lalign 13:foreign}}{c |}{space 11}{space 11}{space 11}{space 11} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf: 1}}} {ralign 9:{res:{sf:{space 9}}}} {ralign 9:{res:{sf: 74}}} + + {com}. esttab ., not unstack compress noobs + {res} + {txt}{hline 62} + {txt} (1) + {txt} + {txt} price mpg turn foreign + {txt}{hline 62} + {txt}price {res} 1 {txt} + {txt}mpg {res} -0.469*** 1 {txt} + {txt}turn {res} 0.310** -0.719*** 1 {txt} + {txt}foreign {res} 0.0487 0.393*** -0.631*** 1 {txt} + {txt}{hline 62} + {txt}* p<0.05, ** p<0.01, *** p<0.001 + + {com}. esttab ., not unstack compress noobs nonum nomtitle /// + > varwidth(21) varlabels(`e(labels)') eqlabels(`e(eqlabels)') + {res} + {txt}{hline 73} + {txt} (1) (2) (3) (4) + {txt}{hline 73} + {txt}(1) Price {res} 1 {txt} + {txt}(2) Mileage (mpg) {res} -0.469*** 1 {txt} + {txt}(3) Turn circle (ft.){res} 0.310** -0.719*** 1 {txt} + {txt}(4) Car origin {res} 0.0487 0.393*** -0.631*** 1 {txt} + {txt}{hline 73} + {txt}* p<0.05, ** p<0.01, *** p<0.001 + + {com}. bysort foreign: eststo: /// + > estpost correlate price turn weight rep78, listwise + + {txt}{hline 60} + -> Domestic + + {ralign 12:price} {c |} {ralign 9:e(b)} {ralign 9:e(rho)} {ralign 9:e(p)} {ralign 9:e(count)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:turn} {c |} {ralign 9:{res:{sf: .4328091}}} {ralign 9:{res:{sf: .4328091}}} {ralign 9:{res:{sf: .0021229}}} {ralign 9:{res:{sf: 48}}} + {ralign 12:weight} {c |} {ralign 9:{res:{sf: .6864719}}} {ralign 9:{res:{sf: .6864719}}} {ralign 9:{res:{sf: 7.19e-08}}} {ralign 9:{res:{sf: 48}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf:-.0193249}}} {ralign 9:{res:{sf:-.0193249}}} {ralign 9:{res:{sf: .8962741}}} {ralign 9:{res:{sf: 48}}} + ({res}est1{txt} stored) + + {hline 60} + -> Foreign + + {ralign 12:price} {c |} {ralign 9:e(b)} {ralign 9:e(rho)} {ralign 9:e(p)} {ralign 9:e(count)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:turn} {c |} {ralign 9:{res:{sf: .5102425}}} {ralign 9:{res:{sf: .5102425}}} {ralign 9:{res:{sf: .0181155}}} {ralign 9:{res:{sf: 21}}} + {ralign 12:weight} {c |} {ralign 9:{res:{sf: .8315886}}} {ralign 9:{res:{sf: .8315886}}} {ralign 9:{res:{sf: 2.99e-06}}} {ralign 9:{res:{sf: 21}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: .1797879}}} {ralign 9:{res:{sf: .1797879}}} {ralign 9:{res:{sf: .4354917}}} {ralign 9:{res:{sf: 21}}} + ({res}est2{txt} stored) + + {com}. esttab est1 est2, not mtitles + {res} + {txt}{hline 44} + {txt} (1) (2) + {txt} Domestic Foreign + {txt}{hline 44} + {txt}turn {res} 0.433** 0.510* {txt} + {txt}weight {res} 0.686*** 0.832***{txt} + {txt}rep78 {res} -0.0193 0.180 {txt} + {txt}{hline 44} + {txt}N {res} 48 21 {txt} + {txt}{hline 44} + {txt}* p<0.05, ** p<0.01, *** p<0.001 +{* end example }{txt}{...} + +{marker ci} +{dlgtab:ci} + +{p 4 15 2} +{cmd:estpost} {cmdab:ci} + [{it:{help varlist}}] [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:b:inomial} + {cmdab:p:oisson} {cmdab:e:xposure:(}{it:varname}{cmd:)} + {cmdab:ex:act} {cmdab:wa:ld} {cmdab:w:ilson} {cmdab:a:gresti} {cmdab:j:effreys} + {cmdab:l:evel:(}{it:#}{cmd:)} + {cmdab:list:wise} + {cmdab:case:wise} + {cmdab:q:uietly} + {cmdab:es:ample} + ] + +{p 4 4 2} + posts standard errors and confidence intervals computed by + {helpb ci}. {cmd:aweight}s and {cmd:fweight}s are allowed, + but {cmd:aweight}s may not be specified with options + {cmd:binomial} or {cmd:poisson}; + see {help weight}. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:binomial}, {cmd:poisson}, {cmd:exposure()}, + {cmd:exact}, {cmd:wald}, {cmd:wilson}, {cmd:agresti}, + {cmd:jeffreys}, and {cmd:level()} + as described in help {helpb ci}. + +{p 8 12 2} + {cmd:listwise} to handle missing values through listwise deletion, + meaning that an observation is omitted from the estimation + sample if any of the variables in {it:varlist} is missing for that + observation. The default is to determine the used observations for + each variable separately without regard to whether other variables + are missing. {cmd:casewise} is a synonym for {cmd:listwise}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 4 4 2}The following results vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(b)}}mean + {lalign 13:{cmd:e(count)}}number of observations + {lalign 13:{cmd:e(se)}}estimate of standard error + {lalign 13:{cmd:e(lb)}}lower bound of confidence interval + {lalign 13:{cmd:e(ub)}}upper bound of confidence interval + +{p 4 4 2} + Examples: + +{* begin example ci }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. estpost ci price mpg rep78, listwise + {txt}(confidence level is 95%) + + {ralign 12:} {c |} {ralign 9:e(b)} {ralign 9:e(count)} {ralign 9:e(se)} {ralign 9:e(lb)} {ralign 9:e(ub)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:price} {c |} {ralign 9:{res:{sf: 6146.043}}} {ralign 9:{res:{sf: 69}}} {ralign 9:{res:{sf: 350.6166}}} {ralign 9:{res:{sf: 5446.399}}} {ralign 9:{res:{sf: 6845.688}}} + {ralign 12:mpg} {c |} {ralign 9:{res:{sf: 21.28986}}} {ralign 9:{res:{sf: 69}}} {ralign 9:{res:{sf: .7062326}}} {ralign 9:{res:{sf: 19.88059}}} {ralign 9:{res:{sf: 22.69912}}} + {ralign 12:rep78} {c |} {ralign 9:{res:{sf: 3.405797}}} {ralign 9:{res:{sf: 69}}} {ralign 9:{res:{sf: .1191738}}} {ralign 9:{res:{sf: 3.167989}}} {ralign 9:{res:{sf: 3.643605}}} + + {com}. esttab ., cells("b lb ub") label + {res} + {txt}{hline 59} + {txt} (1) + {txt} + {txt} b lb ub + {txt}{hline 59} + {txt}Price {res} 6146.043 5446.399 6845.688{txt} + {txt}Mileage (mpg) {res} 21.28986 19.88059 22.69912{txt} + {txt}Repair Record 1978 {res} 3.405797 3.167989 3.643605{txt} + {txt}{hline 59} + {txt}Observations {res} 69 {txt} + {txt}{hline 59} + + {com}. eststo exact: estpost ci foreign, binomial exact + {txt}(confidence level is 95%) + + {ralign 12:} {c |} {ralign 9:e(b)} {ralign 9:e(count)} {ralign 9:e(se)} {ralign 9:e(lb)} {ralign 9:e(ub)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: .2972973}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: .0531331}}} {ralign 9:{res:{sf: .196584}}} {ralign 9:{res:{sf: .4148353}}} + + {com}. eststo agresti: estpost ci foreign, binomial agresti + {txt}(confidence level is 95%) + + {ralign 12:} {c |} {ralign 9:e(b)} {ralign 9:e(count)} {ralign 9:e(se)} {ralign 9:e(lb)} {ralign 9:e(ub)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:foreign} {c |} {ralign 9:{res:{sf: .2972973}}} {ralign 9:{res:{sf: 74}}} {ralign 9:{res:{sf: .0531331}}} {ralign 9:{res:{sf: .204807}}} {ralign 9:{res:{sf: .4097942}}} + + {com}. esttab exact agresti, cells(lb ub) mtitles + {res} + {txt}{hline 38} + {txt} (1) (2) + {txt} exact agresti + {txt} lb/ub lb/ub + {txt}{hline 38} + {txt}foreign {res} .196584 .204807{txt} + {res} .4148353 .4097942{txt} + {txt}{hline 38} + {txt}N {res} 74 74{txt} + {txt}{hline 38} +{* end example }{txt}{...} + +{marker stci} +{dlgtab:stci} + +{p 4 15 2} +{cmd:estpost} {cmd:stci} + [{it:{help if}}] [{it:{help in}}] + [{cmd:,} + {cmd:by(}{it:groupvar}{cmd:)} + {cmdab:m:edian} + {cmdab:r:mean} + {cmdab:e:mean} + {cmd:p(}{it:#}{cmd:)} + {cmdab:cc:orr} + {cmdab:l:evel:(}{it:#}{cmd:)} + {cmdab:q:uietly} + {cmdab:es:ample} + {cmdab:el:abels} + ] + +{p 4 4 2} + posts confidence intervals for means + and percentiles of survival time computed by {helpb stci}. Stata 9 or + newer is required. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:by(}{it:groupvar}{cmd:)} + to report separate summaries for each group defined by + {it:groupvar}, along with an overall total. + +{p 8 12 2} + {cmd:median}, + {cmd:rmean}, + {cmd:emean}, + {cmd:p()}, + {cmd:ccorr}, and + {cmd:level()} + as described in help {helpb stci}. + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {cmd:esample} to mark the estimation sample in {cmd:e(sample)}. + +{p 8 12 2} + {cmd:elabels} to enforce saving {cmd:by()} labels in {cmd:e(labels)}. + +{p 4 4 2}The following vectors are saved in {cmd:e()}: + + {lalign 13:{cmd:e(count)}}number of subjects + {lalign 13:{cmd:e(p50)}}median (if {cmd:median} specified; the default) + {lalign 13:{cmd:e(p}{it:#}{cmd:)}}#th percentile (if {cmd:p(}{it:#}{cmd:)} specified) + {lalign 13:{cmd:e(rmean)}}restricted mean (if {cmd:rmean} specified) + {lalign 13:{cmd:e(emean)}}extended mean (if {cmd:emean} specified) + {lalign 13:{cmd:e(se)}}standard error + {lalign 13:{cmd:e(lb)}}lower bound of CI + {lalign 13:{cmd:e(ub)}}upper bound of CI + +{p 4 4 2} + Examples: + +{* begin example stci }{...} + {com}. webuse page2, clear + {txt} + {com}. estpost stci + {txt}(confidence level is 95%) + + {ralign 12:} {c |} {ralign 9:e(count)} {ralign 9:e(p50)} {ralign 9:e(se)} {ralign 9:e(lb)} {ralign 9:e(ub)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:total} {c |} {ralign 9:{res:{sf: 40}}} {ralign 9:{res:{sf: 232}}} {ralign 9:{res:{sf: 2.562933}}} {ralign 9:{res:{sf: 213}}} {ralign 9:{res:{sf: 239}}} + + {com}. esttab ., cell("count p50 se lb ub") noobs compress + {res} + {txt}{hline 60} + {txt} (1) + {txt} + {txt} count p50 se lb ub + {txt}{hline 60} + {txt}total {res} 40 232 2.562933 213 239{txt} + {txt}{hline 60} + + {com}. estpost stci, by(group) + {txt}(confidence level is 95%) + + {ralign 12:} {c |} {ralign 9:e(count)} {ralign 9:e(p50)} {ralign 9:e(se)} {ralign 9:e(lb)} {ralign 9:e(ub)} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:1} {c |} {ralign 9:{res:{sf: 19}}} {ralign 9:{res:{sf: 216}}} {ralign 9:{res:{sf: 5.171042}}} {ralign 9:{res:{sf: 190}}} {ralign 9:{res:{sf: 234}}} + {ralign 12:2} {c |} {ralign 9:{res:{sf: 21}}} {ralign 9:{res:{sf: 233}}} {ralign 9:{res:{sf: 2.179595}}} {ralign 9:{res:{sf: 232}}} {ralign 9:{res:{sf: 280}}} + {hline 13}{c +}{hline 11}{hline 11}{hline 11}{hline 11}{hline 11} + {ralign 12:total} {c |} {ralign 9:{res:{sf: 40}}} {ralign 9:{res:{sf: 232}}} {ralign 9:{res:{sf: 2.562933}}} {ralign 9:{res:{sf: 213}}} {ralign 9:{res:{sf: 239}}} + + {com}. esttab ., cell("count p50 se lb ub") noobs compress + {res} + {txt}{hline 60} + {txt} (1) + {txt} + {txt} count p50 se lb ub + {txt}{hline 60} + {txt}1 {res} 19 216 5.171042 190 234{txt} + {txt}2 {res} 21 233 2.179595 232 280{txt} + {txt}total {res} 40 232 2.562933 213 239{txt} + {txt}{hline 60} +{* end example }{txt}{...} + +{marker margins} +{dlgtab:margins} + +{p 4 15 2} +{cmd:estpost} {cmd:margins} + [{it:{help fvvarlist:marginlist}}] [{it:{help if}}] [{it:{help in}}] [{it:{help weight}}] + [{cmd:,} + {cmdab:q:uietly} + {it:{help margins:margins_opions}} + ] + +{p 4 4 2} + posts results from the {helpb margins} command, that was introduced in + Stata 11. + +{p 4 4 2} + Options are: + +{p 8 12 2} + {cmd:quietly} to suppress the output. + +{p 8 12 2} + {it:margins_opions} as described in help {helpb margins} (except {cmd:post}). + +{p 4 4 2}{cmd:estpost margins} replaces the current {cmd:e(b)} and +{cmd:e(V)} with {cmd:r(b)} and {cmd:r(V)} from {helpb margins} and +also copies all other matrixes, scalars, and macros from {helpb margins} into +{cmd:e()} (possibly replacing identically named existing entries). + +{p 4 4 2} + Examples: + +{* begin example margins }{...} + {com}. sysuse auto, clear + {txt}(1978 Automobile Data) + + {com}. quietly logit foreign price mpg weight + {txt} + {com}. estpost margins, dydx(*) quietly + {txt} + {com}. esttab ., cell("b se") pr2 + {res} + {txt}{hline 38} + {txt} (1) + {txt} foreign + {txt} b se + {txt}{hline 38} + {txt}price {res} .0000686 .0000136{txt} + {txt}mpg {res} -.0089607 .006596{txt} + {txt}weight {res} -.0005069 .000055{txt} + {txt}{hline 38} + {txt}N {res} 74 {txt} + {txt}pseudo R-sq {res} 0.619 {txt} + {txt}{hline 38} +{* end example }{txt}{...} + + +{title:Author} + +{p 4 4 2} Ben Jann, Institute of Sociology, University of Bern, jann@soz.unibe.ch + +{p 4 4 2} {cmd:estpost gtabstat} has been contributed by Mauricio Caceres Bravo. + + +{title:Also see} + + Manual: {hi:[R] estimates} + +{p 4 13 2}Online: help for + {helpb estimates}, + {helpb estout}, + {helpb esttab}, + {helpb eststo}, + {helpb estadd} +{p_end} diff --git a/110/replication_package/replication/ado/plus/e/eststo.ado b/110/replication_package/replication/ado/plus/e/eststo.ado new file mode 100644 index 0000000000000000000000000000000000000000..6298d71e48369cf534edc7315eb95fc54ca7d1e6 --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/eststo.ado @@ -0,0 +1,343 @@ +*! version 1.1.0 05nov2008 Ben Jann + +program define eststo, byable(onecall) + version 8.2 + local caller : di _caller() +// --- eststo clear --- + if `"`1'"'=="clear" { + if `"`0'"'!="clear" { + di as err "invalid syntax" + exit 198 + } + if "`_byvars'"!="" error 190 + _eststo_clear + exit + } +// --- update globals --- + _eststo_cleanglobal +// --- eststo dir --- + if `"`1'"'=="dir" { + if `"`0'"'!="dir" { + di as err "invalid syntax" + exit 198 + } + if "`_byvars'"!="" error 190 + _eststo_dir + exit + } +// --- eststo drop --- + if `"`1'"'=="drop" { + if "`_byvars'"!="" error 190 + _eststo_`0' + exit + } +// --- eststo store (no by) --- + if "`_byvars'"=="" { + version `caller': _eststo_store `0' + exit + } +// --- eststo store (by) --- +// - check sorting + local sortedby : sortedby + local i 0 + foreach byvar of local _byvars { + local sortedbyi : word `++i' of `sortedby' + if "`byvar'"!="`sortedbyi'" error 5 + } +// - parse command on if qualified + capt _on_colon_parse `0' + if _rc error 190 + if `"`s(after)'"'=="" error 190 + local estcom `"`s(after)'"' + local 0 `"`s(before)'"' + if substr(trim(`"`estcom'"'),1,3)=="svy" { + di as err "svy commands not allowed with by ...: eststo:" + exit 190 + } + AddBygrpToIfqualifier `estcom' +// - parse syntax of _eststo_store call in order to determine +// whether title() or missing was specified (note that +// -estimates change- cannot be used to set the titles since +// it does not work with -noesample-) + TitleAndMissing `0' +// - generate byindex + tempname _byindex + qui egen long `_byindex' = group(`_byvars'), label `missing' + qui su `_byindex', meanonly + if r(N)==0 error 2000 + local Nby = r(max) +// - loop over bygroups + forv i = 1/`Nby' { + local ibylab: label (`_byindex') `i' + di as txt _n "{hline}" + di as txt `"-> `ibylab'"' // could be improved + if `titleopt'==0 local ibytitle + else if `titleopt'==1 local ibytitle `" title(`ibylab')"' + else if `titleopt'==2 local ibytitle `", title(`ibylab')"' + capture noisily { + version `caller': _eststo_store `0'`ibytitle' : `estcmd' + } + if _rc { + if "`_byrc0'"=="" error _rc + } + } +end + +prog TitleAndMissing + capt syntax [anything] , Title(string) [ MISsing * ] + if _rc==0 { + c_local titleopt 0 + c_local missing "`missing'" + } + else { + syntax [anything] [ , MISsing * ] + if `"`missing'`options'"'!="" c_local titleopt 1 + else c_local titleopt 2 + c_local missing "`missing'" + } +end + +program AddBygrpToIfqualifier + syntax anything(equalok) [if/] [in] [using] [fw aw pw iw] [, * ] + local estcom `"`macval(anything)' if (\`_byindex'==\`i')"' + if `"`macval(if)'"'!="" { + local estcom `"`macval(estcom)' & (`macval(if)')"' + } + if `"`macval(in)'"'!="" { + local estcom `"`macval(estcom)' `macval(in)'"' + } + if `"`macval(using)'"'!="" { + local estcom `"`macval(estcom)' `macval(using)'"' + } + if `"`macval(weight)'"'!="" { + local estcom `"`macval(estcom)' [`macval(weight)'`macval(exp)']"' + } + if `"`macval(options)'"'!="" { + local estcom `"`macval(estcom)', `macval(options)'"' + } + c_local estcmd `"`macval(estcom)'"' +end + +program define _eststo_clear + local names $eststo + foreach name of local names { + capt estimates drop `name' + } + global eststo + global eststo_counter +end + +program define _eststo_dir + if `"$eststo"'!="" { + estimates dir $eststo + } +end + +program define _eststo_cleanglobal + local enames $eststo + if `"`enames'"'!="" { + tempname hcurrent + _return hold `hcurrent' + qui _estimates dir + local snames `r(names)' + _return restore `hcurrent' + } + local names: list enames & snames + global eststo `names' + if "`names'"=="" global eststo_counter +end + +program define _eststo_drop + local droplist `0' + if `"`droplist'"'=="" { + di as error "someting required" + exit 198 + } + local names $eststo + foreach item of local droplist { + capt confirm integer number `item' + if _rc { + local dropname `item' + } + else { + if `item'<1 { + di as error "`item' not allowed" + exit 198 + } + local dropname est`item' + } + local found 0 + foreach name in `names' { + if match("`name'",`"`dropname'"') { + local found 1 + estimates drop `name' + local names: list names - name + di as txt "(" as res "`name'" as txt " dropped)" + } + } + if `found'==0 { + di as txt "(no matches found for " as res `"`dropname'"' as txt ")" + } + } + global eststo `names' +end + + +program define _eststo_store, eclass + local caller : di _caller() + capt _on_colon_parse `0' + if !_rc { + local command `"`s(after)'"' + local 0 `"`s(before)'"' + } + syntax [name] [, /// + Title(passthru) /// + Prefix(name) /// + Refresh Refresh2(numlist integer max=1 >0) /// + ADDscalars(string asis) /// + noEsample /// + noCopy /// + MISsing svy /// doesn't do anything + ] + if `"`prefix'"'=="" local prefix "est" + +// get previous eststo names and counter + local names $eststo + local counter $eststo_counter + if `"`counter'"'=="" local counter 0 + +// if name provided; set refresh on if name already in list + if "`namelist'"!="" { + if "`refresh2'"!="" { + di as error "refresh() not allowed" + exit 198 + } + local name `namelist' + if `:list name in names' local refresh refresh + else { + if "`refresh'"!="" { + di as txt "(" as res "`name'" as txt " not found)" + } + local refresh + } + if "`refresh'"=="" local ++counter + } +// if no name provided + else { + if "`refresh2'"!="" local refresh refresh + if "`refresh'"!="" { +// refresh2 not provided => refresh last (if available) + if "`refresh2'"=="" { + if "`names'"=="" { + di as txt "(nothing to refresh)" + local refresh + } + else local name: word `:list sizeof names' of `names' + } +// refresh2 provided => check availability + else { + if `:list posof "`prefix'`refresh2'" in names' { + local name `prefix'`refresh2' + } + else { + di as txt "(" as res "`prefix'`refresh2'" as txt " not found)" + local refresh + } + } + } + if "`refresh'"=="" local ++counter +// set default name + if "`name'"=="" local name `prefix'`counter' + } + +// run estimation command if provided + if `"`command'"'!="" { + version `caller': `command' + } + +// add scalars to e() + if `"`addscalars'"'!="" { + capt ParseAddscalars `addscalars' + if _rc { + di as err `"addscalars() invalid"' + exit 198 + } + if "`replace'"=="" { + local elist `: e(scalars)' `: e(macros)' `: e(matrices)' `: e(functions)' + } + local forbidden b V sample + while (1) { + gettoken lhs rest: rest + if `:list lhs in forbidden' { + di as err `"`lhs' not allowed in addscalars()"' + exit 198 + } + if "`replace'"=="" { + if `:list lhs in elist' { + di as err `"e(`lhs') already defined"' + exit 110 + } + } + gettoken rhs rest: rest, bind + capt eret scalar `lhs' = `rhs' + if _rc { + di as err `"addscalars() invalid"' + exit 198 + } + capture local result = e(`lhs') + di as txt "(e(" as res `"`lhs'"' as txt ") = " /// + as res `result' as txt " added)" + if `"`rest'"'=="" continue, break + } + } +// add e(cmd) if missing + if `"`e(cmd)'"'=="" { + if `"`: e(scalars)'`: e(macros)'`: e(matrices)'`: e(functions)'"'!="" { + eret local cmd "." + } + } + +// store estimates with e(sample) + estimates store `name' , `copy' `title' + +// remove e(sample) if -noesample- specified + if "`esample'"!="" { + capt confirm new var _est_`name' + if _rc { + tempname hcurrent + _est hold `hcurrent', restore estsystem nullok + qui replace _est_`name' = . in 1 + _est unhold `name' + capt confirm new var _est_`name' + if _rc qui drop _est_`name' + else { + di as error "somethings wrong; please contact author of -eststo- " /// + "(see e-mail in help {help eststo})" + exit 498 + } + _est hold `name', estimates varname(_est_`name') + // varname() only needed so that _est hold does not return error + // if variable `name' exists + } + } + +// report + if "`refresh'"=="" { + global eststo `names' `name' + global eststo_counter `counter' + if `"`namelist'"'=="" { + di as txt "(" as res "`name'" as txt " stored)" + } + } + else { + if `"`namelist'"'=="" { + di as txt "(" as res "`name'" as txt " refreshed)" + } + } +end + +program ParseAddscalars + syntax anything [ , Replace ] + c_local rest `"`anything'"' + c_local replace `replace' +end diff --git a/110/replication_package/replication/ado/plus/e/eststo.hlp b/110/replication_package/replication/ado/plus/e/eststo.hlp new file mode 100644 index 0000000000000000000000000000000000000000..2ff5ab8342b06b33f866823019e88dca83ab17be --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/eststo.hlp @@ -0,0 +1,347 @@ +{smcl} +{* 01feb2017}{...} +{hi:help eststo}{right:also see: {helpb esttab}, {helpb estout}, {helpb estadd}, {helpb estpost}} +{right: {browse "http://repec.sowi.unibe.ch/stata/estout/"}} +{hline} + +{title:Title} + +{p 4 4 2}{hi:eststo} {hline 2} Store estimates + + +{title:Syntax}{smcl} + +{p 8 15 2} +[{cmd:_}]{cmd:eststo} [{it:name}] +[{cmd:,} {it:{help eststo##options:options}} ] +[ {cmd::} {it:{help estimation_command}} ] + +{p 8 15 2} +[{cmd:_}]{cmd:eststo dir} + +{p 8 15 2} +[{cmd:_}]{cmd:eststo drop} {{it:#}|{it:name}} [ {{it:#}|{it:name}} ... ] + +{p 8 15 2} +[{cmd:_}]{cmd:eststo clear} + +{marker options} + {it:options}{col 23}description + {hline 56} + [{ul:{cmd:no}}]{cmdab:e:sample}{col 23}{...} +do not/do store {cmd:e(sample)} + {cmdab:t:itle:(}{it:string}{cmd:)}{col 23}{...} +specify a title for the stored set + {cmdab:p:refix:(}{it:prefix}{cmd:)}{col 23}{...} +specify a name prefix; default is {cmd:est} + {cmdab:add:scalars(}{it:...}{cmd:)}{col 23}{...} +add scalar statistics + {cmdab:r:efresh}[{cmd:(}{it:#}{cmd:)}]{col 23}{...} +overwrite a previously stored set + {cmdab:noc:opy}{col 23}{...} +clear {cmd:e()} after storing the set + {cmdab:mis:sing}{col 23}{...} +use missing values in the {cmd:by} groups + {hline 56} + +{p 4 4 2} +{cmd:by} is allowed with {cmd:eststo} if {cmd:eststo} +is used as a prefix command, i.e. specify + + {cmd:by} {it:...} {cmd::} {cmd:eststo} {it:...} {cmd::} {it:estimation_command} + +{p 4 4 2} +to apply {it:estimation_command} to each {cmd:by} group and store an estimation +set for each group; see help {help by}. Note that the implementation of {cmd:by} +with {cmd:eststo} requires {it:estimation_command} +to follow {help language:standard Stata syntax} and +allow the {it:{help if}} qualifier. Do not use the +{bind:{cmd:by} {it:...}{cmd:: eststo:}} construct with +{cmd:svy} commands. + + +{title:Description} + +{p 4 4 2} +{cmd:eststo} stores a copy of the active estimation results for later +tabulation. If {it:name} is provided, the estimation set is stored +under {it:name}. If {it:name} is not provided, the estimation set is +stored under {cmd:est}{it:#}, where {it:#} is a counter for the +number of stored estimation sets. + +{p 4 4 2} +{cmd:eststo} may be used in two ways: Either after fitting a model as +in + + {com}. regress y x + . eststo{txt} + +{p 4 4 2} +or as a prefix command (see help {help prefix}): + + {com}. eststo: regress y x{txt} + +{p 4 4 2} +{cmd:_eststo} is a variant on {cmd:eststo} that, by default, does not +store the estimation sample information contained in {cmd:e(sample)}. +Essentially, {cmd:_eststo} is a shortcut to {cmd:eststo, noesample}. + +{p 4 4 2} +{cmd:eststo dir} displays a list of the stored estimates. + +{p 4 4 2} +{cmd:eststo drop} drops estimation sets stored by {cmd:eststo}. If {it:name} is +provided, the estimation set stored under {it:name} +is dropped (if {cmd:*} or {cmd:?} wildcards are used {it:name}, +all matching sets are dropped). Alternatively, if {it:#} is provided, +the estimation set stored as {cmd:est}{it:#} is dropped. + +{p 4 4 2} +{cmd:eststo clear} drops all estimation sets stored by {cmd:eststo} (and clears +{cmd:eststo}'s global macros). + +{p 4 4 2} +{cmd:eststo} is an alternative to official Stata's +{helpb estimates store}. The main differences are: + +{p 8 12 2} +{space 1}o{space 2}{cmd:eststo} does not require the user to specify a +name for the stored estimation set. + +{p 8 12 2} +{space 1}o{space 2}{cmd:eststo} may be used as a prefix command (see +help {help prefix}). + +{p 8 12 2} +{space 1}o{space 2}{cmd:eststo} provides the possibility to store +estimates without the {cmd:e(sample)} function (either specify the +{cmd:noesample} option or use the {cmd:_eststo} command). Omitting +{cmd:e(sample)} saves memory and also speeds up tabulation programs +such as {helpb estimates table}, {helpb estout} or {helpb esttab}. +{hi:Warning:} Some post-estimation commands may not work with +estimation sets that do not contain the {cmd:e(sample)}. + +{p 8 12 2} +{space 1}o{space 2}{cmd:eststo} can add additional scalar statistics to +be stored with the estimation set. + + +{title:Options} +{marker esample} +{p 4 8 2} +{cmd:esample} causes the information in {cmd:e(sample)} to be stored +with the estimates. This is the default in {cmd:eststo}. Type +{cmd:noesample} or use the {cmd:_eststo} command to omit the +{cmd:e(sample)}. Note that some post-estimation commands may not be +working correctly with estimation sets that have been stored without +{cmd:e(sample)}. + +{p 4 8 2} +{cmd:title(}{it:string}{cmd:)} specifies a title for the stored +estimation set. +{p_end} +{marker addscalars} +{p 4 8 2} +{cmd:addscalars(}{it:name exp} [{it:...}] [{cmd:,} {cmdab:r:eplace}]{cmd:)} +may be used to add additional results to the {cmd:e()}-scalars of the +estimation set before storing it. Specify the names and values of the +scalars in pairs. For example, {cmd:addscalars(one 1 two 2)} would +add {cmd:e(one)} = {cmd:1} and {cmd:e(two)} = {cmd:2}. See below for +an example. The {cmd:replace} suboption permits overwriting existing +{cmd:e()}-returns. Not allowed as names are "b", "V", or "sample". +See {helpb estadd} for a more sophisticated tool to add additional +results to {cmd:e()}-returns. + +{p 4 8 2} +{cmd:prefix(}{it:prefix}{cmd:)} specifies a custom prefix for the +automatic names of the stored estimation sets. The default prefix +is {cmd:est}. + +{p 4 8 2} +{cmd:refresh}[{cmd:(}{it:#}{cmd:)}] may be used to overwrite a +previously stored estimation set instead of storing the estimates +under a new name. {cmd:refresh}, specified without argument, will +overwrite the last saved set. Alternatively, +{cmd:refresh(}{it:#}{cmd:)} will overwrite the set named +{cmd:est}{it:#} if it exists. If {it:name} is provided to {cmd:eststo}, +existing sets of the same name will always be overwritten whether or +not {cmd:refresh} is specified. {cmd:refresh()} with argument is not +allowed in this case. + +{p 4 8 2} +{cmd:nocopy} specifies that after the estimation set has been stored, +it no longer be available as the active estimation set. + +{p 4 8 2} +{cmd:missing} is for use of {cmd:eststo} with the {cmd:by} prefix command and +causes missing values to be treated like any other values in the {cmd:by} +variables. The default is to discard observations with missing values in the +{cmd:by} variables. + + +{title:Examples} + +{p 4 4 2} +Applying {cmd:eststo} after fiting a model to store the model's results, +as in the following example: + + {com}. sysuse auto + {txt}(1978 Automobile Data) + + {com}. quietly regress price weight + {txt} + {com}. eststo model1 + {txt} + {com}. quietly regress turn weight foreign + {txt} + {com}. eststo model2 + {txt} + {com}. estout + {res} + {txt}{hline 38} + {txt} model1 model2 + {txt} b b + {txt}{hline 38} + {txt}weight {res} 2.044063 .0042183{txt} + {txt}foreign {res} -1.809802{txt} + {txt}_cons {res} -6.707353 27.44963{txt} + {txt}{hline 38} + + +{p 4 4 2} +Applying {cmd:eststo} as a prefix commmand to fit and store a model in one step: + + {com}. eststo model1: quietly regress price weight + {txt} + {com}. eststo model2: quietly regress turn weight foreign + {txt} + {com}. estout + {res} + {txt}{hline 38} + {txt} model1 model2 + {txt} b b + {txt}{hline 38} + {txt}weight {res} 2.044063 .0042183{txt} + {txt}foreign {res} -1.809802{txt} + {txt}_cons {res} -6.707353 27.44963{txt} + {txt}{hline 38} + + +{p 4 4 2} +Using {cmd:eststo} with automatic names: + + {com}. eststo clear + {txt} + {com}. eststo: quietly regress price weight + {txt}({res}est1{txt} stored) + + {com}. eststo: quietly regress turn weight foreign + {txt}({res}est2{txt} stored) + + {com}. estout + {res} + {txt}{hline 38} + {txt} est1 est2 + {txt} b b + {txt}{hline 38} + {txt}weight {res} 2.044063 .0042183{txt} + {txt}foreign {res} -1.809802{txt} + {txt}_cons {res} -6.707353 27.44963{txt} + {txt}{hline 38} + + +{p 4 4 2} +Adding ancillary statistics: + + {com}. eststo clear + {txt} + {com}. quietly regress price weight mpg + {txt} + {com}. test weight = mpg + + {txt} ( 1) {res}weight - mpg = 0 + + {txt} F( 1, 71) ={res} 0.36 + {txt}{col 13}Prob > F ={res} 0.5514 + {txt} + {com}. eststo, add(p_diff r(p)) + {txt}(e({res}p_diff{txt}) = {res}.55138216{txt} added) + ({res}est1{txt} stored) + + {com}. estout, stat(p_diff) + {res} + {txt}{hline 25} + {txt} est1 + {txt} b + {txt}{hline 25} + {txt}weight {res} 1.746559{txt} + {txt}mpg {res} -49.51222{txt} + {txt}_cons {res} 1946.069{txt} + {txt}{hline 25} + {txt}p_diff {res} .5513822{txt} + {txt}{hline 25} + + +{p 4 4 2} +Using the {cmd:by} prefix to store subbroup models: + + {com}. eststo clear + {txt} + {com}. quietly by foreign: eststo: quietly reg price weight mpg + {txt} + {com}. esttab, label nodepvar nonumber + {res} + {txt}{hline 52} + {txt} Domestic Foreign + {txt}{hline 52} + {txt}Weight (lbs.) {res} 4.415*** 5.156***{txt} + {res} {ralign 12:{txt:(}4.66{txt:)}} {ralign 12:{txt:(}5.85{txt:)}} {txt} + + {txt}Mileage (mpg) {res} 237.7 -19.78 {txt} + {res} {ralign 12:{txt:(}1.71{txt:)}} {ralign 12:{txt:(}-0.34{txt:)}} {txt} + + {txt}Constant {res} -13285.4* -5065.8 {txt} + {res} {ralign 12:{txt:(}-2.32{txt:)}} {ralign 12:{txt:(}-1.58{txt:)}} {txt} + {txt}{hline 52} + {txt}Observations {res} 52 22 {txt} + {txt}{hline 52} + {txt}t statistics in parentheses + {txt}* p<0.05, ** p<0.01, *** p<0.001 + + +{title:Returned results} + +{p 4 4 2} +The name under which an estimation set is stored, is added to the set in +{cmd:e(_estimates_name)}. + +{p 4 4 2} +In addition, {cmd:eststo} maintains two global macros. {cmd:$eststo} contains a list +of the names of the stored estimation sets. {cmd:$eststo_counter} +contains the count of stored estimation sets. + + +{title:Acknowledgements} + +{p 4 4 2} +Bill Gould suggested to make {cmd:eststo} "byable". + + +{title:Author} + +{p 4 4 2} +Ben Jann, Institute of Sociology, University of Bern, jann@soz.unibe.ch + + +{title:Also see} + + Manual: {hi:[R] estimates} + +{p 4 13 2}Online: help for + {helpb estimates}, + {helpb esttab}, + {helpb estout}, + {helpb estadd}, + {helpb estpost} +{p_end} + diff --git a/110/replication_package/replication/ado/plus/e/esttab.ado b/110/replication_package/replication/ado/plus/e/esttab.ado new file mode 100644 index 0000000000000000000000000000000000000000..275757e0026a0853e0a6962cf887770d27fe51aa --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/esttab.ado @@ -0,0 +1,1337 @@ +*! version 2.1.1 10jun2022 Ben Jann +*! wrapper for estout + +program define esttab + version 8.2 + local caller : di _caller() + +// mode specific defaults + local cdate "`c(current_date)'" + local ctime "`c(current_time)'" +// - fixed + local fixed_open0 `""% `cdate' `ctime'""' + local fixed_close0 `""""' + local fixed_open `""' + local fixed_close `""' + local fixed_caption `""@title""' + local fixed_open2 `""' + local fixed_close2 `""' + local fixed_toprule `""@hline""' + local fixed_midrule `""@hline""' + local fixed_bottomrule `""@hline""' + local fixed_topgap `""""' + local fixed_midgap `""""' + local fixed_bottomgap `""""' + local fixed_eqrule `"begin(@hline "")"' + local fixed_ssl `"N R-sq "adj. R-sq" "pseudo R-sq" AIC BIC"' + local fixed_lsl `"Observations R-squared "Adjusted R-squared" "Pseudo R-squared" AIC BIC"' + local fixed_starlevels `"* 0.05 ** 0.01 *** 0.001"' + local fixed_starlevlab `""' + local fixed_begin `""' + local fixed_delimiter `"" ""' + local fixed_end `""' + local fixed_incelldel `"" ""' + local fixed_varwidth `"\`= cond("\`label'"=="", 12, 20)'"' + local fixed_modelwidth `"12"' + local fixed_abbrev `"abbrev"' + local fixed_substitute `""' + local fixed_interaction `"" # ""' + local fixed_tstatlab `"t statistics"' + local fixed_zstatlab `"z statistics"' + local fixed_pvallab `"p-values"' + local fixed_cilab `"\`level'% confidence intervals"' +// - smcl + local smcl_open0 `"{smcl} "{* % `cdate' `ctime'}{...}""' + local smcl_close0 `""""' + local smcl_open `""' + local smcl_close `""' + local smcl_caption `""@title""' + local smcl_open2 `""' + local smcl_close2 `""' + local smcl_toprule `""{hline @width}""' + local smcl_midrule `""{hline @width}""' + local smcl_bottomrule `""{hline @width}""' + local smcl_topgap `""""' + local smcl_midgap `""""' + local smcl_bottomgap `""""' + local smcl_eqrule `"begin("{hline @width}" "")"' + local smcl_ssl `"`macval(fixed_ssl)'"' + local smcl_lsl `"`macval(fixed_lsl)'"' + local smcl_starlevels `"`macval(fixed_starlevels)'"' + local smcl_starlevlab `""' + local smcl_begin `""' + local smcl_delimiter `"" ""' + local smcl_end `""' + local smcl_incelldel `"" ""' + local smcl_varwidth `"`macval(fixed_varwidth)'"' + local smcl_modelwidth `"`macval(fixed_modelwidth)'"' + local smcl_abbrev `"`macval(fixed_abbrev)'"' + local smcl_substitute `""' + local smcl_interaction `"" # ""' + local smcl_tstatlab `"`macval(fixed_tstatlab)'"' + local smcl_zstatlab `"`macval(fixed_zstatlab)'"' + local smcl_pvallab `"`macval(fixed_pvallab)'"' + local smcl_cilab `"`macval(fixed_cilab)'"' +// - tab + local tab_open0 `"`macval(fixed_open0)'"' + local tab_close0 `""""' + local tab_open `""' + local tab_close `""' + local tab_caption `""@title""' + local tab_open2 `""' + local tab_close2 `""' + local tab_topgap `""""' + local tab_midgap `""""' + local tab_bottomgap `""""' + local tab_ssl `"`macval(fixed_ssl)'"' + local tab_lsl `"`macval(fixed_lsl)'"' + local tab_starlevels `"`macval(fixed_starlevels)'"' + local tab_starlevlab `""' + local tab_begin `""' + local tab_delimiter `"_tab"' + local tab_end `""' + local tab_incelldel `"" ""' + local tab_varwidth `""' + local tab_modelwidth `""' + local tab_abbrev `""' + local tab_substitute `""' + local tab_interaction `"" # ""' + local tab_tstatlab `"`macval(fixed_tstatlab)'"' + local tab_zstatlab `"`macval(fixed_zstatlab)'"' + local tab_pvallab `"`macval(fixed_pvallab)'"' + local tab_cilab `"`macval(fixed_cilab)'"' +// - csv + local csv_open0 `"`"\`csvlhs'% `cdate' `ctime'""'"' + local csv_close0 `""""' + local csv_open `""' + local csv_close `""' + local csv_caption `"`"\`csvlhs'@title""'"' + local csv_open2 `""' + local csv_close2 `""' + local csv_topgap `""""' + local csv_midgap `""""' + local csv_bottomgap `""""' + local csv_ssl `"`macval(fixed_ssl)'"' + local csv_lsl `"`macval(fixed_lsl)'"' + local csv_starlevels `"`macval(fixed_starlevels)'"' + local csv_starlevlab `""' + local csv_begin `"`"\`csvlhs'"'"' + local csv_delimiter `"`"",\`csvlhs'"'"' + local scsv_delimiter `"`"";\`csvlhs'"'"' + local csv_end `"`"""'"' + local csv_incelldel `"" ""' + local csv_varwidth `""' + local csv_modelwidth `""' + local csv_abbrev `""' + local csv_substitute `""' + local csv_interaction `"" # ""' + local csv_tstatlab `"`macval(fixed_tstatlab)'"' + local csv_zstatlab `"`macval(fixed_zstatlab)'"' + local csv_pvallab `"`macval(fixed_pvallab)'"' + local csv_cilab `"`macval(fixed_cilab)'"' +// - rtf + local rtf_open0 `""' + local rtf_close0 `""' + local rtf_ct `"\yr`=year(d(`cdate'))'\mo`=month(d(`cdate'))'\dy`=day(d(`cdate'))'\hr`=substr("`ctime'",1,2)'\min`=substr("`ctime'",4,2)'"' + local rtf_fonttbl "\f0\fnil Times New Roman;" + local rtf_open_l1 `"`"{\rtf1`=cond("`c(os)'"=="MacOSX", "\mac", "\ansi")'\deff0 {\fonttbl\`rtf_fonttbl'}"'"' + local rtf_open_l2 `" `"{\info {\author .}{\company .}{\title .}{\creatim`rtf_ct'}}"'"' + local rtf_open_l3 `" `"\deflang1033\plain\fs24"'"' + local rtf_open_l4 `" `"{\footer\pard\qc\plain\f0\fs24\chpgn\par}"'"' + local rtf_open `"\`rtf_open_l1'`rtf_open_l2'`rtf_open_l3'`rtf_open_l4'"' + local rtf_close `""{\pard \par}" "}""' + local rtf_caption `"`"{\pard\keepn\ql @title\par}"'"' + local rtf_open2 `""{""' + local rtf_close2 `""}""' + local rtf_toprule `""' + local rtf_midrule `""' + local rtf_bottomrule `""' + local rtf_topgap `""' + local rtf_midgap `"{\trowd\trgaph108\trleft-108@rtfemptyrow\row}"' + local rtf_bottomgap `""' + local rtf_eqrule `"begin("{\trowd\trgaph108\trleft-108@rtfrowdefbrdrt\pard\intbl\ql {") replace"' + local rtf_ssl `""{\i N}" "{\i R}{\super 2}" "adj. {\i R}{\super 2}" "pseudo {\i R}{\super 2}" "{\i AIC}" "{\i BIC}""' + local rtf_lsl `"Observations "{\i R}{\super 2}" "Adjusted {\i R}{\super 2}" "Pseudo {\i R}{\super 2}" "{\i AIC}" "{\i BIC}""' + local rtf_starlevels `""{\super *}" 0.05 "{\super **}" 0.01 "{\super ***}" 0.001"' + local rtf_starlevlab `", label(" {\i p} < ")"' + local rtf_rowdef `"\`=cond("\`lines'"=="", "@rtfrowdef", "@rtfrowdefbrdr")'"' + local rtf_begin `"{\trowd\trgaph108\trleft-108\`rtf_rowdef'\pard\intbl\ql {"' + local rtf_delimiter `"}\cell \pard\intbl\q\`=cond(`"\`alignment'"'!="", `"\`alignment'"', "c")' {"' + local rtf_end `"}\cell\row}"' + local rtf_incelldel `""\line ""' + local rtf_varwidth `"\`= cond("\`label'"=="", 12, 20)'"' + local rtf_modelwidth `"12"' + local rtf_abbrev `""' + local rtf_substitute `""' + local rtf_interaction `"" # ""' + local rtf_tstatlab `"{\i t} statistics"' + local rtf_zstatlab `"{\i z} statistics"' + local rtf_pvallab `"{\i p}-values"' + local rtf_cilab `"\`level'% confidence intervals"' +// - html + local html_open0 `" "`=cond(`"\`macval(title)'"'=="","estimates table, created `cdate' `ctime'","@title")'" """' + local html_close0 `""" """' + local html_open `"`""'"' + local html_close `""
""' + local html_caption `""@title""' + local html_open2 `""' + local html_close2 `""' + local html_toprule `""
""' + local html_midrule `""
""' + local html_bottomrule `""
""' + local html_topgap `""' + local html_midgap `"" ""' + local html_bottomgap `""' + local html_eqrule `"begin("
" "")"' + local html_ssl `"N R2 "adj. R2" "pseudo R2" AIC BIC"' + local html_lsl `"Observations R2 "Adjusted R2" "Pseudo R2" AIC BIC"' + local html_starlevels `"* 0.05 ** 0.01 *** 0.001"' + local html_starlevlab `", label(" p < ")"' + local html_begin `""' + local html_delimiter `""' + local html_end `""' + local html_incelldel `"
"' + local html_varwidth `"\`= cond("\`label'"=="", 12, 20)'"' + local html_modelwidth `"12"' + local html_abbrev `""' + local html_substitute `""' + local html_interaction `"" # ""' + local html_tstatlab `"t statistics"' + local html_zstatlab `"z statistics"' + local html_pvallab `"p-values"' + local html_cilab `"\`level'% confidence intervals"' +// - tex + local tex_open0 `""% `cdate' `ctime'" `"\documentclass\`texclass'"' \`texpkgs' \`=cond("\`longtable'"!="","\usepackage{longtable}","")' \begin{document} """' + local tex_close0 `""" \end{document} """' + local tex_open `"`"\`=cond("\`longtable'"=="", "\begin{table}[htbp]\centering", "{")'"'"' + local tex_close `"`"\`=cond("\`longtable'"=="", "\end{table}", "}")'"'"' + local tex_caption `"\caption{@title}"' + local tex_open2 `"\`=cond("\`longtable'"!="", "\begin{longtable}", "\begin{tabular" + cond("\`width'"=="", "}", "*}{\`width'}"))'"' + local tex_close2 `"`"\`=cond("\`longtable'"!="", "\end{longtable}", "\end{tabular" + cond("\`width'"=="", "}", "*}"))'"'"' + local tex_toprule `"`"\`="\hline\hline" + cond("\`longtable'"!="", "\endfirsthead\hline\endhead\hline\endfoot\endlastfoot", "")'"'"' + local tex_midrule `""\hline""' + local tex_bottomrule `""\hline\hline""' + local tex_topgap `""' + local tex_midgap `"[1em]"' // `"\\\"' + local tex_bottomgap `""' + local tex_eqrule `"begin("\hline" "")"' + local tex_ssl `"\(N\) \(R^{2}\) "adj. \(R^{2}\)" "pseudo \(R^{2}\)" \textit{AIC} \textit{BIC}"' + local tex_lsl `"Observations \(R^{2}\) "Adjusted \(R^{2}\)" "Pseudo \(R^{2}\)" \textit{AIC} \textit{BIC}"' + local tex_starlevels `"\sym{*} 0.05 \sym{**} 0.01 \sym{***} 0.001"' + local tex_starlevlab `", label(" \(p<@\)")"' + local tex_begin `""' + local tex_delimiter `"&"' + local tex_end `"\\\"' + local tex_incelldel `"" ""' + local tex_varwidth `"\`= cond("\`label'"=="", 12, 20)'"' + local tex_modelwidth `"12"' + local tex_abbrev `""' + local tex_tstatlab `"\textit{t} statistics"' + local tex_zstatlab `"\textit{z} statistics"' + local tex_pvallab `"\textit{p}-values"' + local tex_cilab `"\`level'\% confidence intervals"' + local tex_substitute `"_ \_ "\_cons " \_cons"' + local tex_interaction `"" $\times$ ""' +// - booktabs + local booktabs_open0 `""% `cdate' `ctime'" `"\documentclass\`texclass'"' \`texpkgs' \usepackage{booktabs} \`=cond("\`longtable'"!="","\usepackage{longtable}","")' \begin{document} """' + local booktabs_close0 `"`macval(tex_close0)'"' + local booktabs_open `"`macval(tex_open)'"' + local booktabs_close `"`macval(tex_close)'"' + local booktabs_caption `"`macval(tex_caption)'"' + local booktabs_open2 `"`macval(tex_open2)'"' + local booktabs_close2 `"`macval(tex_close2)'"' + local booktabs_toprule `"`"\`="\toprule" + cond("\`longtable'"!="", "\endfirsthead\midrule\endhead\midrule\endfoot\endlastfoot", "")'"'"' + local booktabs_midrule `""\midrule""' + local booktabs_bottomrule `""\bottomrule""' + local booktabs_topgap `"`macval(tex_topgap)'"' + local booktabs_midgap `"\addlinespace"' + local booktabs_bottomgap `"`macval(tex_bottomgap)'"' + local booktabs_eqrule `"begin("\midrule" "")"' + local booktabs_ssl `"`macval(tex_ssl)'"' + local booktabs_lsl `"`macval(tex_lsl)'"' + local booktabs_starlevels `"`macval(tex_starlevels)'"' + local booktabs_starlevlab `"`macval(tex_starlevlab)'"' + local booktabs_begin `"`macval(tex_begin)'"' + local booktabs_delimiter `"`macval(tex_delimiter)'"' + local booktabs_end `"`macval(tex_end)'"' + local booktabs_incelldel `"`macval(tex_incelldel)'"' + local booktabs_varwidth `"`macval(tex_varwidth)'"' + local booktabs_modelwidth `"`macval(tex_modelwidth)'"' + local booktabs_abbrev `"`macval(tex_abbrev)'"' + local booktabs_tstatlab `"`macval(tex_tstatlab)'"' + local booktabs_zstatlab `"`macval(tex_zstatlab)'"' + local booktabs_pvallab `"`macval(tex_pvallab)'"' + local booktabs_cilab `"`macval(tex_cilab)'"' + local booktabs_substitute `"`macval(tex_substitute)'"' + local booktabs_interaction `"`macval(tex_interaction)'"' +// - mmd + local mmd_open0 `""' + local mmd_close0 `""' + local mmd_open `""""' + local mmd_close `""""' + local mmd_caption `""@title" """' + local mmd_open2 `""' + local mmd_close2 `""' + local mmd_toprule `""' + local mmd_midrule `""' + local mmd_bottomrule `""' + local mmd_topgap `""' + local mmd_midgap `""' + local mmd_bottomgap `""' + local mmd_eqrule `""' + local mmd_ssl `"*N* *R*2 "adj. *R*2" "pseudo *R*2" *AIC* *BIC*"' + local mmd_lsl `"Observations *R*2 "Adjusted *R*2" "Pseudo *R*2" *AIC* *BIC*"' + local mmd_starlevels `"\* 0.05 \*\* 0.01 \*\*\* 0.001"' + local mmd_starlevlab `", label(" *p* < ")"' + local mmd_begin `"| "' + local mmd_delimiter `" | "' + local mmd_end `" |"' + local mmd_incelldel `" "' + local mmd_varwidth `"\`= cond("\`label'"=="", 12, 20)'"' + local mmd_modelwidth `"12"' + local mmd_abbrev `""' + local mmd_substitute `"_ \_ "\_cons " \_cons"' + local mmd_interaction `"" # ""' + local mmd_tstatlab `"*t* statistics"' + local mmd_zstatlab `"*z* statistics"' + local mmd_pvallab `"*p*-values"' + local mmd_cilab `"\`level'\% confidence intervals"' +// syntax + syntax [anything] [using] [ , /// + /// coefficients and t-stats, se, etc. + b Bfmt(string) /// + noT Tfmt(string) /// + z Zfmt(string) /// + se SEfmt(string) /// + p Pfmt(string) /// + ci CIfmt(string) /// + BEta BEtafmt(string) /// + main(string) /// syntax: name format + aux(string) /// syntax: name format + abs /// absolute t-values + wide /// + NOSTAr STAR STAR2(string asis) /// + staraux /// + NOCONstant CONstant /// + COEFlabels(string asis) /// + /// summary statistics + noOBS obslast /// + r2 R2fmt(string) ar2 AR2fmt(string) pr2 PR2fmt(string) /// + aic AICfmt(string) bic BICfmt(string) /// + SCAlars(string asis) /// syntax: "name1 [label1]" "name2 [label2]" etc. + sfmt(string) /// + /// layout + NOMTItles MTItles MTItles2(string asis) /// + NOGAPs GAPs /// + NOLInes LInes /// + ADDNotes(string asis) /// + COMpress /// + plain /// + smcl FIXed tab csv SCsv rtf HTMl tex BOOKTabs md mmd /// + Fragment /// + page PAGE2(str) /// + STANDalone STANDalone2(str asis) /// + ALIGNment(str asis) /// + width(str asis) /// + fonttbl(str) /// + /// other + Noisily /// + * ] + _more_syntax , `macval(options)' + _estout_options , `macval(options)' + +// matrix mode + MatrixMode, `anything' + +// syntax consistency etc + gettoken chunk using0: using + if `"`macval(star2)'"'!="" local star star + foreach opt in constant gaps lines star abbrev depvars numbers parentheses /// + notes mtitles type outfilenoteoff float { + NotBothAllowed "``opt''" `no`opt'' + } + NotBothAllowed "`staraux'" `nostar' + if `"`macval(mtitles2)'"'!="" NotBothAllowed "mtitles" `nomtitles' + if `"`standalone2'"'!="" local standalone standalone + if "`standalone'"!="" { + if `"`standalone2'"'=="" local standalone2 "[varwidth]" + else if `"`standalone2'"'==`""""' local standalone2 + else local standalone2 `"[`standalone2']"' + local page page + } + if `"`page2'"'!="" local page page + NotBothAllowed "`fragment'" `page' + if `"`pfmt'"'!="" local p p + if `"`zfmt'"'!="" local z z + if `"`sefmt'"'!="" local se se + if `"`cifmt'"'!="" local ci ci + if `"`betafmt'"'!="" local beta beta + if "`level'"=="" local level $S_level + if ((("`margin'"!="" | `"`margin2'"'!="") & "`nomargin'"=="") | /// + ("`beta'"!="") | ("`eform'"!="" & "`noeform'"=="")) /// + & "`constant'"=="" local noconstant noconstant + if `"`r2fmt'"'!="" local r2 r2 + if `"`ar2fmt'"'!="" local ar2 ar2 + if `"`pr2fmt'"'!="" local pr2 pr2 + if `"`aicfmt'"'!="" local aic aic + if `"`bicfmt'"'!="" local bic bic + if "`type'"=="" & `"`using'"'!="" local notype notype + local nocellsopt = `"`macval(cells)'"'=="" + if `"`width'"'!="" & `"`longtable'"'!="" { + di as err "width() and longtable not both allowed" + exit 198 + } + +// format modes + local mode `smcl' `fixed' `tab' `csv' `scsv' `rtf' `html' `tex' `booktabs' `md' `mmd' + if `:list sizeof mode'>1 { + di as err "only one allowed of smcl, fixed, tab, csv, scsv, rtf, html, tex, booktabs, md, or mmd" + exit 198 + } + if `"`using'"'!="" { + _getfilename `"`using0'"' + local fn `"`r(filename)'"' + _getfilesuffix `"`fn'"' + local suffix `"`r(suffix)'"' + } + if "`mode'"=="" { + if `"`using'"'!="" { + if inlist(`"`suffix'"', ".html", ".htm") local mode html + else if `"`suffix'"'==".tex" local mode tex + else if `"`suffix'"'==".csv" local mode csv + else if `"`suffix'"'==".rtf" local mode rtf + else if `"`suffix'"'==".smcl" local mode smcl + else if `"`suffix'"'== ".md" local mode md + else if `"`suffix'"'== ".mmd" local mode mmd + else local mode fixed + } + else local mode smcl + } + else { + if "`mode'"=="scsv" { + local csv_delimiter `"`macval(`mode'_delimiter)'"' + local mode "csv" + } + } + if `"`using'"'!="" & `"`suffix'"'=="" { + if inlist("`mode'","fixed","tab") local suffix ".txt" + else if inlist("`mode'","csv","scsv") local suffix ".csv" + else if "`mode'"=="rtf" local suffix ".rtf" + else if "`mode'"=="html" local suffix ".html" + else if inlist("`mode'","tex","booktabs") local suffix ".tex" + else if "`mode'"=="smcl" local suffix ".smcl" + else if "`mode'"=="md" local suffix ".md" + else if "`mode'"=="mmd" local suffix ".mmd" + local using `"using `"`fn'`suffix'"'"' + local using0 `" `"`fn'`suffix'"'"' + } + if "`mode'"=="md" local mode "mmd" // ! + if "`mode'"=="smcl" local smcltags smcltags + local mode0 `mode' + if "`mode0'"=="booktabs" local mode0 tex + else if "`mode0'"=="csv" { + if "`plain'"=="" local csvlhs `"=""' + else local csvlhs `"""' + } + if "`compress'"!="" { + if "``mode'_modelwidth'"!="" { + local `mode'_modelwidth = ``mode'_modelwidth' - 3 + } + if "``mode'_varwidth'"!="" { + local `mode'_varwidth = ``mode'_varwidth' - cond("`label'"!="", 4, 2) + } + } + if `"`modelwidth'"'=="" { + if `nocellsopt' & `"``mode'_modelwidth'"'!="" & "`ci'"!="" { + local modelwidth = 2*``mode'_modelwidth' - 2 + if "`wide'"!="" local modelwidth "``mode'_modelwidth' `modelwidth'" + } + else { + local modelwidth "``mode'_modelwidth'" + } + } + if `"`varwidth'"'=="" { + local varwidth "``mode'_varwidth'" + } + if "`plain'"=="" & `matrixmode'==0 { + foreach opt in star depvars numbers parentheses notes { + SwitchOnIfEmpty `opt' `no`opt'' + } + if "`wide'"=="" & ("`t'"=="" | "`z'`se'`p'`ci'`aux'"!="") & `nocellsopt'==1 /// + SwitchOnIfEmpty gaps `nogaps' + } + if "`plain'"=="" { + SwitchOnIfEmpty lines `nolines' + } + if `"`lines'"'!="" { + SwitchOnIfEmpty eqlines `noeqlines' + } + if inlist("`mode0'", "tab", "csv") { + local lines + local eqlines + } + if "`notes'"!="" & "`nolegend'"=="" & `nocellsopt'==1 & `matrixmode'==0 local legend legend + if "`plain'"!="" { + if "`bfmt'"=="" local bfmt %9.0g + if "`tfmt'"=="" local tfmt `bfmt' + if "`zfmt'"=="" local zfmt `bfmt' + if "`sefmt'"=="" local sefmt `bfmt' + if "`pfmt'"=="" local pfmt `bfmt' + if "`cifmt'"=="" local cifmt `bfmt' + if "`betafmt'"=="" local betafmt `bfmt' + } + //if "`nomtitles'"!="" local depvars + //else if "`depvars'"=="" local mtitles mtitles + +// prepare append for rtf, tex, and html + local outfilenoteoff2 "`outfilenoteoff'" + if "`outfilenoteoff2'"=="" local outfilenoteoff2 "`nooutfilenoteoff'" + if `"`using'"'!="" & "`append'"!="" & /// + (("`mode0'"=="rtf" & "`fragment'"=="") | /// + ("`page'"!="" & inlist("`mode0'", "tex", "html"))) { + capture confirm file `using0' + if _rc==0 { + tempfile appendfile + if "`mode'"=="rtf" local `mode'_open + else local `mode'_open0 + local append + if "`outfilenoteoff2'"=="" local outfilenoteoff2 outfilenoteoff + } + } + +// cells() option + if "`notes'"!="" { + if ("`margin'"!="" | `"`margin2'"'!="") & "`nomargin'"=="" /// + local thenote "`thenote'Marginal effects" + if "`eform'"!="" & "`noeform'"=="" /// + local thenote "`thenote'Exponentiated coefficients" + } + if "`bfmt'"=="" local bfmt a3 + if `nocellsopt' & `matrixmode'==0 { + if "`star'"!="" & "`staraux'"=="" local bstar star + if "`beta'"!="" { + if "`main'"!="" { + di as err "beta() and main() not allowed both" + exit 198 + } + if "`betafmt'"=="" local betafmt 3 + local cells fmt(`betafmt') `bstar' + local cells beta(`cells') + if "`notes'"!="" { + if `"`thenote'"'!="" local thenote "`thenote'; " + local thenote "`thenote'Standardized beta coefficients" + } + } + else if "`main'"!="" { + tokenize "`main'" + if "`2'"=="" local 2 "`bfmt'" + local cells fmt(`2') `bstar' + local cells `1'(`cells') + if "`notes'"!="" { + if `"`thenote'"'!="" local thenote "`thenote'; " + local thenote "`thenote'`1' coefficients" + } + } + else { + local cells fmt(`bfmt') `bstar' + local cells b(`cells') + } + if "`t'"=="" | "`z'`se'`p'`ci'`aux'"!="" { + if "`onecell'"!="" { + local cells `cells' & + } +// parse aux option + tokenize "`aux'" + local auxname `1' + local auxfmt `2' +// type of auxiliary statistic + local aux `z' `se' `p' `ci' `auxname' + if `"`aux'"'=="" local aux t + else { + if `:list sizeof aux'>1 { + di as err "only one allowed of z, se, p, ci, and aux()" + exit 198 + } + } + if !inlist(`"`aux'"', "t", "z") local abs +// parentheses/brackets + if "`parentheses'"!="" | "`brackets'"!="" { + if `"`aux'"'=="ci" { + local brackets brackets + if "`mode'"!="smcl" | "`onecell'"!="" local paren par + else local paren `"par("{ralign @modelwidth:{txt:[}" "{txt:,}" "{txt:]}}")"' + } + else if "`brackets'"!="" { + if "`mode'"!="smcl" | "`onecell'"!="" local paren "par([ ])" + else local paren `"par("{ralign @modelwidth:{txt:[}" "{txt:]}}")"' + } + else { + if "`mode'"!="smcl" | "`onecell'"!="" local paren par + else local paren `"par("{ralign @modelwidth:{txt:(}" "{txt:)}}")"' + } + } +// compose note + if "`notes'"!="" { + if `"`thenote'"'!="" local thenote "`thenote'; " + if `"`auxname'"'!="" { + local thenote `"`macval(thenote)'`auxname'"' + } + else if inlist(`"`aux'"', "t", "z") { + if "`abs'"!="" local thenote `"`macval(thenote)'Absolute "' + local thenote `"`macval(thenote)'``mode'_`aux'statlab'"' + } + else if `"`aux'"'=="se" { + local thenote `"`macval(thenote)'Standard errors"' + } + else if `"`aux'"'=="p" { + local thenote `"`macval(thenote)'``mode'_pvallab'"' + } + else if `"`aux'"'=="ci" { + local thenote `"`macval(thenote)'``mode'_cilab'"' + } + if "`parentheses'"=="" { + if "`wide'"=="" local thenote `"`macval(thenote)' in second row"' + else local thenote `"`macval(thenote)' in second column"' + } + else if "`brackets'"!="" { + local thenote `"`macval(thenote)' in brackets"' + } + else local thenote `"`macval(thenote)' in parentheses"' + } +// formats + if "`tfmt'"=="" local tfmt 2 + if "`zfmt'"=="" local zfmt 2 + if "`sefmt'"=="" local sefmt `bfmt' + if "`pfmt'"=="" local pfmt 3 + if "`cifmt'"=="" local cifmt `bfmt' + if `"`auxfmt'"'=="" local auxfmt `bfmt' + if `"`auxname'"'=="" { + local auxfmt ``aux'fmt' + } +// stars + if "`staraux'"!="" local staraux star +// put together + local temp fmt(`auxfmt') `paren' `abs' `staraux' + local cells `cells' `aux'(`temp') + } + if "`wide'"!="" local cells cells(`"`cells'"') + else local cells cells(`cells') + } + +// stats() option + if `"`macval(stats)'"'=="" & `matrixmode'==0 { + if `"`sfmt'"'=="" local sfmt `bfmt' + if `"`r2fmt'"'=="" local r2fmt = cond("`plain'"!="", "`bfmt'", "3") + if `"`ar2fmt'"'=="" local ar2fmt = cond("`plain'"!="", "`bfmt'", "3") + if `"`pr2fmt'"'=="" local pr2fmt = cond("`plain'"!="", "`bfmt'", "3") + if `"`aicfmt'"'=="" local aicfmt `bfmt' + if `"`bicfmt'"'=="" local bicfmt `bfmt' + if "`label'"=="" { + local stalabs `"``mode'_ssl'"' + } + else { + local stalabs `"``mode'_lsl'"' + } + gettoken obslab stalabs: stalabs + if "`obs'"=="" & "`obslast'"=="" { + local sta N + local stalab `"`"`macval(obslab)'"'"' + local stafmt %18.0g + } + local i 0 + foreach s in r2 ar2 pr2 aic bic { + local ++i + if "``s''"!="" { + local sta `sta' `:word `i' of r2 r2_a r2_p aic bic' + local chunk: word `i' of `macval(stalabs)' + local stalab `"`macval(stalab)' `"`macval(chunk)'"'"' + local stafmt `stafmt' ``s'fmt' + } + } + local i 0 + CheckScalarOpt `macval(scalars)' + foreach addstat of local scalars { + local ++i + gettoken addstatname addstatlabel: addstat + local addstatlabel = substr(`"`macval(addstatlabel)'"',2,.) + if `: list posof `"`addstatname'"' in sta' continue + if `"`addstatname'"'=="N" & "`obs'"=="" & "`obslast'"!="" continue + if trim(`"`macval(addstatlabel)'"')=="" local addstatlabel `addstatname' + local addstatfmt: word `i' of `sfmt' + if `"`addstatfmt'"'=="" { + local addstatfmt: word `: list sizeof sfmt' of `sfmt' + } + local sta `sta' `addstatname' + local stalab `"`macval(stalab)' `"`macval(addstatlabel)'"'"' + local stafmt `stafmt' `addstatfmt' + } + if "`obs'"=="" & "`obslast'"!="" { + local sta `sta' N + local stalab `"`macval(stalab)' `"`macval(obslab)'"'"' + local stafmt `stafmt' %18.0g + } + if "`sta'"!="" { + local stats stats(`sta', fmt(`stafmt') labels(`macval(stalab)')) + } + } + +// table header + if `"`macval(mlabels)'"'=="" { + if "`mode0'"=="tex" local mspan " span prefix(\multicolumn{@span}{c}{) suffix(})" + if `"`depvars'"'!="" { + local mlabels `"mlabels(, depvar`mspan')"' + } + if `"`nomtitles'"'!="" local mlabels `"mlabels(none)"' + if "`mtitles'"!="" { + local mlabels `"mlabels(, titles`mspan')"' + } + if `"`macval(mtitles2)'"'!="" { + local mlabels `"mlabels(`macval(mtitles2)', titles`mspan')"' + } + } + if `"`macval(collabels)'"'=="" & `nocellsopt' & `matrixmode'==0 & "`plain'"=="" { + local collabels `"collabels(none)"' + } + if "`mode0'"=="tex" & "`numbers'"!="" { + local numbers "numbers(\multicolumn{@span}{c}{( )})" + } + +// pre-/posthead, pre-/postfoot, gaps and lines +// - complete note + if `"`macval(thenote)'"'!="" { + local thenote `"`"`macval(thenote)'"'"' + } + if `"`macval(note)'"'!="" { + local thenote `""@note""' + } + if `"`macval(addnotes)'"'!="" { + if index(`"`macval(addnotes)'"', `"""')==0 { + local addnotes `"`"`macval(addnotes)'"'"' + } + local thenote `"`macval(thenote)' `macval(addnotes)'"' + } + if "`legend'"!="" { + if ("`margin'"!="" | `"`margin2'"'!="") & /// + "`nomargin'"=="" & "`nodiscrete'"=="" { + local thenote `"`macval(thenote)' "@discrete""' + } + if "`star'"!="" | `nocellsopt'==0 { + local thenote `"`macval(thenote)' "@starlegend""' + } + } +// - mode specific settings + if "`star'"!="" { + if `"`macval(star2)'"'!="" { + FormatStarSym "`mode0'" `"`macval(star2)'"' + local `mode'_starlevels `"`macval(star2)'"' + } + if `"`macval(starlevels)'"'=="" { + local starlevels `"starlevels(`macval(`mode'_starlevels)'`macval(`mode'_starlevlab)')"' + } + } + foreach opt in begin delimiter end substitute interaction { + if `"`macval(`opt')'"'=="" & `"``mode'_`opt''"'!="" { + local `opt' `"`opt'(``mode'_`opt'')"' + } + } + if "`onecell'"!="" { + if `"`macval(incelldelimiter)'"'=="" { + local incelldelimiter `"incelldelimiter(``mode'_incelldel')"' + } + } + if "`noabbrev'`abbrev'"=="" { + local abbrev ``mode'_abbrev' + } + if `"`fragment'"'=="" { + if `"`fonttbl'"'!="" { + local rtf_fonttbl `"`fonttbl'"' + } + if "`page'"!="" { + local texclass "{article}" + if "`standalone'"!="" local texclass "`standalone2'{standalone}" + else local texclass "{article}" + if `"`page2'"'!="" { + local texpkgs `""\usepackage{`page2'}""' + } + local opening `"``mode'_open0'"' + } + if "`mode0'"=="tex" { + if (`"`macval(title)'"'!="" | "`float'"!="") & "`nofloat'"=="" { + local opening `"`macval(opening)' ``mode'_open'"' + } + else if "`star'"!="" { + local opening `"`macval(opening)' "{""' + } + if "`star'"!="" { + local opening `"`macval(opening)' "\def\sym#1{\ifmmode^{#1}\else\(^{#1}\)\fi}""' + } + if `"`macval(title)'"'!="" & "`longtable'"=="" { + local opening `"`macval(opening)' `"``mode'_caption'"'"' + } + } + else { + local opening `"`macval(opening)' ``mode'_open'"' + if `"`macval(title)'"'!="" { + local opening `"`macval(opening)' ``mode'_caption'"' + } + } + if "`mode0'"=="tex" { + if `"`labcol2'"'!="" local lstubtex "lc" + else local lstubtex "l" + if `"`width'"'!="" local extracolsep "@{\hskip\tabcolsep\extracolsep\fill}" + if `matrixmode' { + if `"`macval(alignment)'"'!="" { + local opening `"`macval(opening)' `"``mode'_open2'{`extracolsep'`lstubtex'`macval(alignment)'}"'"' + } + else { + MakeTeXColspecMat, `anything' + local opening `"`macval(opening)' `"``mode'_open2'{`extracolsep'`lstubtex'`value'}"'"' + } + } + else { + if `"`macval(alignment)'"'!="" { + local opening `"`macval(opening)' `"``mode'_open2'{`extracolsep'`lstubtex'*{@E}{`macval(alignment)'}}"'"' + } + else { + if `nocellsopt' { + MakeTeXColspec "`wide'" "`not'" "`star'" "`stardetach'" "`staraux'" + } + else { + MakeTeXColspecAlt, `cells' + } + local opening `"`macval(opening)' `"``mode'_open2'{`extracolsep'`lstubtex'*{@E}{`value'}}"'"' + } + } + if "`longtable'"!="" { + if `"`macval(title)'"'!="" { + local opening `"`macval(opening)' `"``mode'_caption'\\\"'"' + } + } + } + else { + local opening `"`macval(opening)' ``mode'_open2'"' + } + if "`mode0'"=="html" { + local brr + foreach chunk of local thenote { + local closing `"`macval(closing)' `"`brr'`macval(chunk)'"'"' + local brr "
" + } + if `"`macval(closing)'"'!="" { + local closing `""" `macval(closing)' """' + } + } + else if "`mode0'"=="tex" { + foreach chunk of local thenote { + local closing `"`macval(closing)' `"\multicolumn{@span}{l}{\footnotesize `macval(chunk)'}\\\"'"' + } + } + else if "`mode0'"=="csv" { + foreach chunk of local thenote { + local closing `"`macval(closing)' `"`csvlhs'`macval(chunk)'""'"' + } + } + else if "`mode0'"=="rtf" { + foreach chunk of local thenote { + local closing `"`macval(closing)' `"{\pard\ql\fs20 `macval(chunk)'\par}"'"' + } + } + else if "`mode0'"=="mmd" { + local n_chunks: list sizeof thenote + if `n_chunks' { + local closing `"`macval(closing)' """' + local i 0 + foreach chunk of local thenote { + local ++i + if `i'<`n_chunks' { + local chunk `"`macval(chunk)'
"' + } + local closing `"`macval(closing)' `"`macval(chunk)'"'"' + } + } + } + else { + local closing `"`macval(thenote)'"' + } + local closing `"`macval(closing)' ``mode'_close2'"' + if "`mode0'"=="tex" { + if (`"`macval(title)'"'!="" | "`float'"!="") & "`nofloat'"=="" { + local closing `"`macval(closing)' ``mode'_close'"' + } + else if "`star'"!="" { + local closing `"`macval(closing)' "}""' + } + } + else { + local closing `"`macval(closing)' ``mode'_close'"' + } + if "`page'`standalone'"!="" { + local closing `"`macval(closing)' ``mode'_close0'"' + } + local toprule `"``mode'_toprule'"' + local bottomrule `"``mode'_bottomrule'"' + local topgap `"``mode'_topgap'"' + local bottomgap `"``mode'_bottomgap'"' + } + local midrule `"``mode'_midrule'"' + local midgap `"``mode'_midgap'"' + local eqrule `"``mode'_eqrule'"' +// - compose prehead() + if `"`macval(prehead)'"'=="" { + if `"`lines'"'!="" { + local opening `"`macval(opening)' `macval(toprule)'"' + } + else if `"`gaps'"'!="" { + local opening `"`macval(opening)' `macval(topgap)'"' + } + SaveRetok `macval(opening)' + local opening `"`macval(value)'"' + if `"`macval(opening)'"'!="" { + local prehead `"prehead(`macval(opening)')"' + } + } +// - compose posthead() + if `"`macval(posthead)'"'=="" { + if `"`lines'"'!="" { + local posthead `"posthead(`macval(midrule)')"' + } + else if `"`gaps'"'!="" { + local posthead `"posthead(`macval(midgap)')"' + } + } +// - compose prefoot() + if `"`macval(prefoot)'"'=="" & `"`macval(stats)'"'!="" { + if `"`lines'"'!="" { + local prefoot `"prefoot(`macval(midrule)')"' + } + else if `"`gaps'"'!="" { + local prefoot `"prefoot(`macval(midgap)')"' + } + if `"`cells'"'=="cells(none)" local prefoot + } +// - compose postfoot() + if `"`macval(postfoot)'"'=="" { + if `"`lines'"'!="" { + local closing `"`macval(bottomrule)' `macval(closing)'"' + } + else if `"`gaps'"'!="" { + local closing `"`macval(bottomgap)' `macval(closing)'"' + } + SaveRetok `macval(closing)' + local closing `"`macval(value)'"' + if `"`macval(closing)'"'!="" { + local postfoot postfoot(`macval(closing)') + } + } +// - varlabels + if `"`macval(varlabels)'"'=="" { + if `"`gaps'"'!="" { + local varl `", end("" `macval(midgap)') nolast"' + } + if "`label'"!="" { + local varl `"_cons Constant`macval(varl)'"' + } + if `"`macval(coeflabels)'"'!="" { + local varl `"`macval(coeflabels)' `macval(varl)'"' + } + if trim(`"`macval(varl)'"')!="" { + local varlabels varlabels(`macval(varl)') + } + } +// - equation labels + if ("`eqlines'"!="" | `"`gaps'"'!="") & "`unstack'"=="" { + if trim(`"`eqlabels'"')!="none" { + ParseEqLabels `macval(eqlabels)' + if `eqlabelsok' { + _parse comma eqllhs eqlrhs : eqlabels + if `"`eqlrhs'"'=="" local eqlabelscomma ", " + else local eqlabelscomma " " + if "`eqlines'"!=""{ + local eqlabels `"`macval(eqlabels)'`eqlabelscomma'`macval(eqrule)' nofirst"' + } + else if `"`gaps'"'!="" { + local eqlabels `"`macval(eqlabels)'`eqlabelscomma'begin(`macval(midgap)' "") nofirst"' + } + } + } + } + if `"`macval(eqlabels)'"'!="" { + local eqlabels `"eqlabels(`macval(eqlabels)')"' + } + +// noconstant option + if `"`drop'"'=="" { + if "`noconstant'"!="" { + local drop drop(_cons, relax) + } + } + +// compute beta coefficients (run estadd to add e(beta)) + if "`beta'"!="" { + local estnames `"`anything'"' + if `"`estnames'"'=="" { + capt est_expand $eststo + if !_rc { + local estnames `"$eststo"' + } + } + version `caller': estadd beta, replace: `estnames' + } + +// use tempfile for new table + if `"`appendfile'"'!="" { + local using `"using `"`appendfile'"'"' + } + +// execute estout + if `"`varwidth'"'!="" local varwidth `"varwidth(`varwidth')"' + if `"`modelwidth'"'!="" local modelwidth `"modelwidth(`modelwidth')"' + if `"`style'"'=="" { + if "`mode'"=="mmd" local style "style(mmd)" + else local style "style(esttab)" + } + CleanEstoutCmd `anything' `using' , /// + `macval(cells)' `drop' `nomargin' `margin' `margin2' `noeform' `eform' /// + `nodiscrete' `macval(stats)' `stardetach' `macval(starlevels)' /// + `varwidth' `modelwidth' `noabbrev' `abbrev' `unstack' `macval(begin)' /// + `macval(delimiter)' `macval(end)' `macval(incelldelimiter)' `smcltags' /// + `macval(title)' `macval(prehead)' `macval(posthead)' `macval(prefoot)' /// + `macval(postfoot)' `label' `macval(varlabels)' `macval(mlabels)' `nonumbers' /// + `numbers' `macval(collabels)' `macval(eqlabels)' `macval(mgroups)' /// + `macval(note)' `macval(labcol2)' `macval(substitute)' `macval(interaction)' /// + `append' `notype'`type' `outfilenoteoff2' level(`level') `style' /// + `macval(options)' + if "`noisily'"!="" { + gettoken chunk rest: cmd, parse(",") + di as txt _asis `"`chunk'"' _c + gettoken chunk rest: rest, bind + while `"`macval(chunk)'"'!="" { + di as txt _asis `" `macval(chunk)'"' + gettoken chunk rest: rest, bind + } + } + `macval(cmd)' + +// insert new table into existing document (tex, html, rtf) + if `"`appendfile'"'!="" { + local enddoctex "\end{document}" + local enddochtml "" + local enddocrtf "}" + local enddoc "`enddoc`mode0''" + tempname fh + file open `fh' using `using0', read write + file seek `fh' query + local loc = r(loc) + file read `fh' line + while r(eof)==0 { + if `"`line'"'=="`enddoc'" { + if "`mode'"=="rtf" { + file seek `fh' query + local loc0 = r(loc) + file read `fh' line + if r(eof)==0 { + local loc = `loc0' + continue + } + } + continue, break + } + file seek `fh' query + local loc = r(loc) + file read `fh' line + } + file seek `fh' `loc' + tempname new + file open `new' `using', read + file read `new' line + while r(eof)==0 { + file write `fh' `"`macval(line)'"' _n + file read `new' line + } + file close `fh' + file close `new' + if "`outfilenoteoff'"=="" { + di as txt `"(output written to {browse `using0'})"' + } + } +end + +program _more_syntax +// using subroutine (rather than second syntax call) to preserve 'using' + local theoptions /// + NODEPvars DEPvars /// + NOPArentheses PArentheses /// + BRackets /// + NONOTEs NOTEs /// without s in helpfile + LONGtable /// + NOFLOAT float /// + ONEcell /// + NOEQLInes /// + NOOUTFILENOTEOFF outfilenoteoff + syntax [, `theoptions' * ] + foreach opt of local theoptions { + local opt = lower("`opt'") + c_local `opt' "``opt''" + } + c_local options `"`macval(options)'"' +end + +program _estout_options + syntax [, /// + Cells(passthru) /// + Drop(passthru) /// + /// Keep(string asis) /// + /// Order(string asis) /// + /// REName(passthru) /// + /// Indicate(string asis) /// + /// TRansform(string asis) /// + /// EQuations(passthru) /// + NOEFORM eform ///EFORM2(string) /// + NOMargin Margin Margin2(passthru) /// + NODIscrete /// DIscrete(string asis) /// + /// MEQs(string) /// + /// NODROPPED dropped DROPPED2(string) /// + level(numlist max=1 int >=10 <=99) /// + Stats(passthru) /// + STARLevels(passthru) /// + /// NOSTARDetach /// + STARDetach /// + /// STARKeep(string asis) /// + /// STARDrop(string asis) /// + VARwidth(str) /// + MODELwidth(str) /// + NOABbrev ABbrev /// + /// NOUNStack + UNStack /// + BEGin(passthru) /// + DELimiter(passthru) /// + INCELLdelimiter(passthru) /// + end(passthru) /// + /// DMarker(string) /// + /// MSign(string) /// + /// NOLZ lz /// + SUBstitute(passthru) /// + INTERACTion(passthru) /// + TItle(passthru) /// + NOLEgend LEgend /// + PREHead(passthru) /// + POSTHead(passthru) /// + PREFoot(passthru) /// + POSTFoot(passthru) /// + /// HLinechar(string) /// + /// NOLabel + Label /// + VARLabels(passthru) /// + /// REFcat(string asis) /// + MLabels(passthru) /// + NONUMbers NUMbers ///NUMbers2(string asis) /// + COLLabels(passthru) /// + EQLabels(string asis) /// + MGRoups(passthru) /// + LABCOL2(passthru) /// + /// NOReplace Replace /// + /// NOAppend + Append /// + NOTYpe TYpe /// + /// NOSHOWTABS showtabs /// + /// TOPfile(string) /// + /// BOTtomfile(string) /// + STYle(passthru) /// + /// DEFaults(string) /// + /// NOASIS asis /// + /// NOWRAP wrap /// + /// NOSMCLTAGS smcltags /// + /// NOSMCLRules SMCLRules /// + /// NOSMCLMIDRules SMCLMIDRules /// + /// NOSMCLEQRules SMCLEQRules /// + note(passthru) /// + * ] + foreach opt in /// + cells drop noeform eform nomargin margin margin2 nodiscrete /// + level stats starlevels stardetach varwidth modelwidth unstack /// + noabbrev abbrev begin delimiter incelldelimiter end substitute /// + interaction title nolegend legend prehead posthead prefoot postfoot /// + label varlabels mlabels labcol2 nonumbers numbers collabels eqlabels /// + mgroups append notype type style note options { + c_local `opt' `"`macval(`opt')'"' + } +end + +program MatrixMode + capt syntax [, Matrix(str asis) e(str asis) r(str asis) ] + if _rc | `"`matrix'`e'`r'"'=="" { + c_local matrixmode 0 + exit + } + c_local matrixmode 1 +end + +prog NotBothAllowed + args opt1 opt2 + if `"`opt1'"'!="" { + if `"`opt2'"'!="" { + di as err `"options `opt1' and `opt2' not both allowed"' + exit 198 + } + } +end + +prog SwitchOnIfEmpty + args opt1 opt2 + if `"`opt2'"'=="" { + c_local `opt1' `opt1' + } +end + +prog _getfilesuffix, rclass // based on official _getfilename.ado + version 8 + gettoken filename rest : 0 + if `"`rest'"' != "" { + exit 198 + } + local hassuffix 0 + gettoken word rest : filename, parse(".") + while `"`rest'"' != "" { + local hassuffix 1 + gettoken word rest : rest, parse(".") + } + if `"`word'"'=="." { + di as err `"incomplete filename; ends in ."' + exit 198 + } + if index(`"`word'"',"/") | index(`"`word'"',"\") local hassuffix 0 + if `hassuffix' return local suffix `".`word'"' + else return local suffix "" +end + +prog FormatStarSym + args mode list + if inlist("`mode'","rtf","html","tex") { + if "`mode'"=="rtf" { + local prefix "{\super " + local suffix "}" + } + else if "`mode'"=="html" { + local prefix "" + local suffix "" + } + else if "`mode'"=="tex" { + local prefix "\sym{" + local suffix "}" + } + local odd 1 + foreach l of local list { + if `odd' { + local l `"`"`prefix'`macval(l)'`suffix'"'"' + local odd 0 + } + else local odd 1 + local newlist `"`macval(newlist)'`space'`macval(l)'"' + local space " " + } + c_local star2 `"`macval(newlist)'"' + } + //else do noting +end + +prog CheckScalarOpt + capt syntax [anything] + if _rc error 198 +end + +program MakeTeXColspecMat + capt syntax [, Matrix(str asis) e(str asis) r(str asis) ] + ParseMatrixOpt `matrix'`e'`r' + if `"`e'"'!="" local name "e(`name')" + else if `"`r'"'!="" local name "r(`name')" + confirm matrix `name' + tempname bc + mat `bc' = `name' + if "`transpose'"=="" local cols = colsof(`bc') + else local cols = rowsof(`bc') + c_local value "*{`cols'}{c}" +end +program ParseMatrixOpt + syntax name [, Fmt(str asis) Transpose ] + c_local name `"`namelist'"' + c_local fmt `"`fmt'"' + c_local transpose `"`transpose'"' +end + +prog MakeTeXColspec + args wide not star detach aux + if "`star'"!="" & "`detach'"!="" & "`aux'"=="" local value "r@{}l" + else local value "c" + if "`wide'"!="" & "`not'"=="" { + if "`star'"!="" & "`detach'"!="" & "`aux'"!="" local value "`value'r@{}l" + else local value "`value'c" + } + c_local value "`value'" +end + +prog MakeTeXColspecAlt + syntax, cells(string asis) + local count 1 + while `count' { + local cells: subinstr local cells ") (" ")_(", all // preserve space in ") (" + local cells: subinstr local cells "] (" "]_(", all // preserve space in ") [" + local cells: subinstr local cells " (" "(", all count(local count) + } + local cells: subinstr local cells ")_(" ") (", all // restore space in ") (" + local cells: subinstr local cells "]_(" "] (", all // restore space in ") [" + local count 1 + while `count' { + local cells: subinstr local cells " [" "[", all count(local count) + } + local count 1 + while `count' { + local cells: subinstr local cells " &" "&", all count(local count) + } + local count 1 + while `count' { + local cells: subinstr local cells "& " "&", all count(local count) + } + local count 1 + while `"`macval(cells)'"'!="" { + gettoken row cells : cells, match(par) + local size 0 + gettoken chunk row : row, bind + while `"`macval(chunk)'"'!="" { + local ++size + gettoken chunk row : row, bind + } + local count = max(`count',`size') + } + c_local value: di _dup(`count') "c" +end + +prog SaveRetok + gettoken chunk 0: 0, q + local value `"`macval(chunk)'"' + gettoken chunk 0: 0, q + while `"`macval(chunk)'"'!="" { + local value `"`macval(value)' `macval(chunk)'"' + gettoken chunk 0: 0, q + } + c_local value `"`macval(value)'"' +end + +prog CleanEstoutCmd + syntax [anything] [using] [ , * ] + local cmd estout + if `"`macval(anything)'"'!="" { + local cmd `"`macval(cmd)' `macval(anything)'"' + } + if `"`macval(using)'"'!="" { + local cmd `"`macval(cmd)' `macval(using)'"' + } + if `"`macval(options)'"'!="" { + local cmd `"`macval(cmd)', `macval(options)'"' + } + c_local cmd `"`macval(cmd)'"' +end + +prog ParseEqLabels + syntax [anything] [, Begin(passthru) NOReplace Replace NOFirst First * ] + c_local eqlabelsok = `"`begin'`noreplace'`replace'`nofirst'`first'"'=="" +end diff --git a/110/replication_package/replication/ado/plus/e/esttab.hlp b/110/replication_package/replication/ado/plus/e/esttab.hlp new file mode 100644 index 0000000000000000000000000000000000000000..f4965e643eea8a5e1e85628a50fa7e546a6cc9ed --- /dev/null +++ b/110/replication_package/replication/ado/plus/e/esttab.hlp @@ -0,0 +1,963 @@ +{smcl} +{* 10jun2022}{...} +{hi:help esttab}{right:also see: {helpb estout}, {helpb eststo}, {helpb estadd}, {helpb estpost}} +{right: {browse "http://repec.sowi.unibe.ch/stata/estout/"}} +{hline} + +{title:Title} + +{p 4 4 2}{hi:esttab} {hline 2} Display formatted regression table + + +{title:Table of contents} + + {help esttab##syn:Syntax} + {help esttab##des:Description} + {help esttab##opt:Options} + {help esttab##exa:Examples} + {help esttab##aut:Backmatter} + +{marker syn} +{title:Syntax} + +{p 8 15 2} +{cmd:esttab} [ {it:namelist} ] [ {cmd:using} {it:filename} ] [ {cmd:,} +{it:options} ] + + +{p 4 4 2}where {it:namelist} is a name, a list of names, or {cmd:_all}. The +{cmd:*} and {cmd:?} wildcards are allowed in {it:namelist}. A name may also be {cmd:.}, +meaning the current (active) estimates. + + + {it:options}{col 26}description + {hline 70} + {help esttab##main:Main} + {cmd:b(}{it:{help esttab##fmt:fmt}}{cmd:)}{col 26}{...} +specify format for point estimates + {cmd:beta}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display beta coefficients instead of point est's + {cmd:main(}{it:name} [{it:{help esttab##fmt:fmt}}]{cmd:)}{col 26}{...} +display contents of {cmd:e(}{it:name}{cmd:)} instead of point e's + {cmd:t(}{it:{help esttab##fmt:fmt}}{cmd:)}{col 26}{...} +specify format for t-statistics + {cmd:abs}{col 26}{...} +use absolute value of t-statistics + {cmd:not}{col 26}{...} +suppress t-statistics + {cmd:z}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display z-statistics (affects label only) + {cmd:se}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display standard errors instead of t-statistics + {cmd:p}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display p-values instead of t-statistics + {cmd:ci}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display confidence intervals instead of t-stat's + {cmd:aux(}{it:name} [{it:{help esttab##fmt:fmt}}]{cmd:)}{col 26}{...} +display contents of {cmd:e(}{it:name}{cmd:)} instead of t-stat's + [{ul:{cmd:no}}]{cmdab:con:stant}{col 26}{...} +do not/do report the intercept + + {help esttab##stars:Significance stars} + [{cmd:no}]{cmd:star}[{cmd:(}{it:list}{cmd:)}]{col 26}{...} +do not/do report significance stars + {cmd:staraux}{col 26}{...} +attach stars to t-stat's instead of point est's + + {help esttab##stat:Summary statistics} + {cmd:r2}|{cmd:ar2}|{cmd:pr2}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display (adjusted, pseudo) R-squared + {cmd:aic}|{cmd:bic}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}]{col 26}{...} +display Akaike's or Schwarz's information crit. + {cmdab:sca:lars:(}{it:list}{cmd:)}{col 26}{...} +display any other scalars contained in {cmd:e()} + {cmd:sfmt(}{it:{help esttab##fmt:fmt}} [{it:...}]{cmd:)}{col 26}{...} +set format(s) for {cmd:scalars()} + {cmd:noobs}{col 26}{...} +do not display the number of observations + {cmd:obslast}{col 26}{...} +place the number of observations last + + {help esttab##layout:Layout} + {cmd:wide}{col 26}{...} +place point est's and t-stat's beside one another + {cmdab:one:cell}{col 26}{...} +combine point est's and t-stat's in a single cell + [{ul:{cmd:no}}]{cmdab:pa:rentheses}{col 26}{...} +do not/do print parentheses around t-statistics + {cmdab:br:ackets}{col 26}{...} +use brackets instead of parentheses + [{ul:{cmd:no}}]{cmdab:gap:s}{col 26}{...} +suppress/add vertical spacing + [{ul:{cmd:no}}]{cmdab:li:nes}{col 26}{...} +suppress/add horizontal lines + {cmdab:noeqli:nes}{col 26}{...} +suppress lines between equations + {cmd:compress}{col 26}{...} +reduce horizontal spacing + {cmd:plain}{col 26}{...} +produce a minimally formatted table + + {help esttab##label:Labeling} + {cmdab:l:abel}{col 26}{...} +make use of variable labels + {cmdab:interact:ion:(}{it:str}{cmd:)}{col 26}{...} +specify interaction operator + {cmdab:ti:tle:(}{it:string}{cmd:)}{col 26}{...} +specify a title for the table + {cmdab:mti:tles}[{cmd:(}{it:list}{cmd:)}]{col 26}{...} +specify model titles to appear in table header + {cmdab:nomti:tles}{col 26}{...} +disable model titles + [{ul:{cmd:no}}]{cmdab:dep:vars}{col 26}{...} +do not/do use dependent variables as model titles + [{ul:{cmd:no}}]{cmdab:num:bers}{col 26}{...} +do not/do print model numbers in table header + {cmdab:coef:labels:(}{it:list}{cmd:)}{col 26}{...} +specify labels for coefficients + [{ul:{cmd:no}}]{cmdab:note:s}{col 26}{...} +suppress/add notes in the table footer + {cmdab:addn:otes:(}{it:list}{cmd:)}{col 26}{...} +add lines at the end of the table + + {help esttab##format:Document format} + {cmd:smcl} | {cmdab:fix:ed} | {cmd:tab} | {cmd:csv} | {cmdab:sc:sv} | {cmd:rtf} | {cmdab:htm:l} | {cmd:tex} | {cmdab:bookt:abs} | {cmdab:md} + {col 26}{...} +set the document format ({cmd:smcl} is the default) + {cmdab:f:ragment}{col 26}{...} +suppress table opening and closing (LaTeX, HTML) + [{cmd:no}]{cmd:float}{col 26}{...} +whether to use a float environment or not (LaTeX) + {cmd:page}[{cmd:(}{it:packages}{cmd:)}]{col 26}{...} +add page opening and closing (LaTeX, HTML) + {cmdab:stand:alone}[{cmd:(}{it:opts}{cmd:)}]{col 26}{...} +use class {cmd:standalone} rather than {cmd:article} (LaTeX) + {cmdab:align:ment(}{it:string}{cmd:)}{col 26}{...} +set alignment within columns (LaTeX, HTML, RTF) + {cmdab:width(}{it:string}{cmd:)}{col 26}{...} +set width of table (LaTeX, HTML) + {cmdab:long:table}{col 26}{...} +multi-page table (LaTeX) + {cmd:fonttbl(}{it:string}{cmd:)}{col 26}{...} +set custom font table (RTF) + {cmd:nortfencode}{col 26}{...} +do not escape non-ASCII characters (RTF) + + {help esttab##output:Output} + {cmdab:r:eplace}{col 26}{...} +overwrite an existing file + {cmdab:a:ppend}{col 26}{...} +append the output to an existing file + {cmdab:ty:pe}{col 26}{...} +force printing the table in the results window + {cmdab:n:oisily}{col 26}{...} +display the executed {helpb estout} command + + {help esttab##advanced:Advanced} + {cmdab:d:rop:(}{it:list}{cmd:)}{col 26}{...} +drop individual coefficients + {cmdab:noomit:ted}{col 26}{...} +drop omitted coefficients + {cmdab:nobase:levels}{col 26}{...} +drop base levels of factor variables + {cmdab:k:eep:(}{it:list}{cmd:)}{col 26}{...} +keep individual coefficients + {cmdab:o:rder:(}{it:list}{cmd:)}{col 26}{...} +change order of coefficients + {cmdab:eq:uations:(}{it:list}{cmd:)}{col 26}{...} +match the models' equations + {cmd:eform}{col 26}{...} +report exponentiated coefficients + {cmdab:uns:tack}{col 26}{...} +place multiple equations in separate columns + {it:estout_options}{col 26}{...} +any other {helpb estout} options + {hline 70} + +{marker des} +{title:Description} + +{p 4 4 2} +{cmd:esttab} is a wrapper for {helpb estout}. It produces a +pretty-looking publication-style regression table from stored +estimates without much typing. The compiled table is displayed in the +Stata results window or, optionally, written to a text file specified +by {cmd:using} {it:filename}. If {it:filename} is specified without +suffix, a default suffix is added depending on the specified document +format (".smcl" for {cmd:smcl}, ".txt" for {cmd:fixed} and {cmd:tab}, ".csv" for {cmd:csv} +and {cmd:scsv}, ".rtf" for {cmd:rft}, ".html" for {cmd:html}, +".tex" for {cmd:tex} and {cmd:booktabs}, ".md" for {cmd:md}). + +{p 4 4 2} +{it:namelist} provides the names of the stored estimation sets to be +tabulated. You may use the {cmd:*} and {cmd:?} wildcards in +{it:namelist}. If {it:namelist} is omitted, {cmd:esttab} tabulates the +estimation sets stored by {cmd:eststo} (see help {helpb eststo}) +or, if no such estimates are present, the currently active +estimates (i.e. the model fit last). + +{p 4 4 2} +See help {helpb estimates} for information about storing estimation +results. An alternative to the {cmd:estimates store} command is +provided by {helpb eststo}. + +{p 4 4 2} +{cmd:esttab} can also be used to tabulate a Stata matrix applying syntax +{bind:{cmd:esttab} {cmdab:m:atrix:(}{it:name}{cmd:)}}, where {it:name} +is the name of the matrix. Furthermore, an {cmd:e()}-matrix or {cmd:r()}-matrix +can be tabulated specifying {cmd:esttab e(}{it:name}{cmd:)} or +{cmd:esttab r(}{it:name}{cmd:)}. Most options under the headings +'Main', 'Significance stars', and 'Summary statistics' are irrelevant +in this case. See help {helpb estout} for further details on tabulating matrices. + +{marker opt} +{title:Options} +{marker main} +{dlgtab:Main} + +{p 4 8 2} +{cmd:b(}{it:{help esttab##fmt:fmt}}{cmd:)} sets the numerical display format +for the point estimates. The default format is {cmd:a3}. (See +{help esttab##fmt:Numerical formats} below for details on available +formats.) + +{p 4 8 2} +{cmd:beta}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] requests that +standardized beta coefficients be displayed in place of the raw point +estimates and, optionally, sets the display format (the default is to +print three decimal places). Note that {cmd:beta} causes the +intercept to be dropped from the table (unless {cmd:constant} is +specified).{p_end} +{marker main} +{p 4 8 2} +{cmd:main(}{it:name} [{it:{help esttab##fmt:fmt}}]{cmd:)} requests that +the statistics stored in {cmd:e(}{it:name}{cmd:)} be displayed in +place of the point estimates and, optionally, sets the display format +(the default is to use the display format for point estimates). For +example, {cmd:e(}{it:name}{cmd:)} may contain statistics added by +{cmd:estadd} (see help {helpb estadd}). + +{p 4 8 2} +{cmd:t(}{it:{help esttab##fmt:fmt}}{cmd:)} sets the display format for +t-statistics. The default is to display two decimal places. + +{p 4 8 2} +{cmd:abs} causes absolute values of t-statistics to be reported. + +{p 4 8 2} +{cmd:not} suppresses the printing of t-statistics. + +{p 4 8 2} +{cmd:z}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] requests that +z-statistics be displayed. z-statistics are the same as t-statistics. Hence, +specifying {cmd:z} does not change the table contents, it only changes the +label. + +{p 4 8 2} +{cmd:se}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] requests that +standard errors be displayed in place of t-statistics and, +optionally, sets the display format (the default is to use the +display format for point estimates). + +{p 4 8 2} +{cmd:p}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] requests that +p-values be displayed in place of t-statistics and, optionally, sets +the display format (the default is to print three decimal places) + +{p 4 8 2} +{cmd:ci}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] requests that +confidence intervals be displayed in place of t-statistics and, +optionally, sets the display format (the default is to use the +display format for point estimates). {cmd:level(}{it:#}{cmd:)} +assigns the confidence level, in percent. The default is +{cmd:level(95)} or as set by {helpb set level}.{p_end} +{marker aux} +{p 4 8 2} +{cmd:aux(}{it:name} [{it:{help esttab##fmt:fmt}}]{cmd:)} requests that +the statistics stored in {cmd:e(}{it:name}{cmd:)} be displayed in +place of t-statistics and, optionally, sets the display format (the +default is to use the display format for point estimates). For +example, {cmd:e(}{it:name}{cmd:)} may contain statistics added by +{cmd:estadd} (see help {helpb estadd}, if installed). + +{p 4 8 2} +{cmd:noconstant} causes the intercept be dropped from the table. +Specify {cmd:constant} to include the constant in situations where it +is dropped by default. + +{marker stars} +{dlgtab:Significance stars} + +{p 4 8 2} +{cmd:star}[{cmd:(}{it:symbol} {it:level} [{it:...}]{cmd:)}] causes +stars denoting the significance of the coefficients to be printed +next to the point estimates. This is the default. Type {cmd:nostar} +to suppress the stars. The default symbols and thresholds are: +{cmd:*} for p<.05, {cmd:**} for p<.01, and {cmd:***} for p<.001. +Alternatively, for example, type {bind:{cmd:star(+ 0.10 * 0.05)}} to +set the following thresholds: {cmd:+} for p<.10 and {cmd:*} for +p<.05. Note that the thresholds must lie in the (0,1] interval and +must be specified in descending order. + +{p 4 8 2} +{cmd:staraux} causes the significance stars be printed next to the +t-statistics (or standard errors, etc.) instead of the point estimates. + +{marker stat} +{dlgtab:Summary statistics} + +{p 4 8 2} +{cmd:r2}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}], +{cmd:ar2}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}], and +{cmd:pr2}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] +include the R-squared, the adjusted R-squared, and the +pseudo-R-squared in the table footer and, optionally, set the +corresponding display formats (the default is to display three +decimal places). + +{p 4 8 2} +{cmd:aic}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] and +{cmd:bic}[{cmd:(}{it:{help esttab##fmt:fmt}}{cmd:)}] +include Akaike's and Schwarz's information criterion in the table +footer and, optionally, set the corresponding display formats (the +default is to use the display format for point estimates).{p_end} +{marker scalars} +{p 4 8 2} +{cmd:scalars(}{it:list}{cmd:)} may be used to add other +{cmd:e()}-scalars to the table footer (type {cmd:ereturn list} to +display a list of available {cmd:e()}-scalars after fitting a model; +see help {helpb ereturn}). For example, {cmd:scalars(df_m)} would +report the model degrees of freedom for each model. {it:list} may be +a simple list of names of {cmd:e()}-scalars, e.g. + + {com}. esttab, scalars(ll_0 ll chi2){txt} + +{p 8 8 2} +or, alternatively, a list of quoted name-label pairs, e.g. + + {com}. esttab, scalars({bind:"ll Log lik."} {bind:"chi2 Chi-squared"}){txt} + +{p 4 8 2} +{cmd:sfmt(}{it:{help esttab##fmt:fmt}} [{it:...}]{cmd:)} sets the +display format(s) for the statistics specified in {cmd:scalars()} +(the default is to use the display format for point estimates). If +{cmd:sfmt()} contains less elements than {cmd:scalars()}, the last +specified format is used for the remaining scalars. That is, only one +format needs to be specified if the same format be used for all +scalars. + +{p 4 8 2} +{cmd:noobs} suppresses displaying information on the number of +observations. The default is to report the number of observations for +each model in the table footer. + +{p 4 8 2} +{cmd:obslast} displays the number of observations in the last row of +the table footer. The default is to use the first row. + +{marker layout} +{dlgtab:Layout} +{marker wide} +{p 4 8 2} +{cmd:wide} causes point estimates and t-statistics (or standard errors, +etc.) to be printed beside one another instead of beneath one another. +{p_end} +{marker onecell} +{p 4 8 2} +{cmd:onecell} causes point estimates and t-statistics (or standard errors, +etc.) to be combined in a single table cell. This option is useful primarily +in {cmd:rtf} or {cmd:html} mode. In these modes a line break is +inserted between the two statistics. The benefit from using {cmd:onecell} +in {cmd:rtf} or {cmd:html} mode is that long coefficients labels do not +derange the table layout. The default for other modes is to insert +a blank between the statistics. Use {cmd:estout}'s +{helpb estout##incelldel:incelldelimiter()} option to change this. + +{p 4 8 2} +{cmd:parentheses} encloses t-statistics (or standard errors, etc.) in +parentheses. This is the default. Specify {cmd:noparentheses} to +suppress the parentheses. + +{p 4 8 2} +{cmd:brackets} uses square brackets, [], instead of parentheses. Note +that brackets are the default for confidence intervals. + +{p 4 8 2} +{cmd:gaps} adds empty rows (or, more generally, additional vertical +space) between coefficients to increase readability (empty rows are +also inserted between the table's header, body, and footer, unless +{cmd:lines} is activated). This is the default unless {cmd:wide} or +{cmd:not} is specified. Type {cmd:nogaps} to suppress the extra +spacing. + +{p 4 8 2} +{cmd:lines} adds horizontal lines to the table separating the table's +header, body, and footer and, in the case of multiple +equation models, the equations. This is the default. Specify {cmd:nolines} +to suppress the lines. Lines are always suppressed in the {cmd:tab} +and {cmd:csv} modes. + +{p 4 8 2} +{cmd:noeqlines} suppresses the horizontal lines between equations +in the case of multiple equation models.{p_end} +{marker compress} +{p 4 8 2} +{cmd:compress} reduces the amount of horizontal spacing (so that more +models fit on screen without line breaking). The option has no effect +in the {cmd:tab} and {cmd:csv} modes. Furthermore, note that in the +TeX and HTML modes the {cmd:compress} option only changes the +arrangement the table's code, but not the look of the compiled +end-product. In {cmd:rtf}, however, {cmd:compress} changes the look +of the formatted table.{p_end} +{marker plain} +{p 4 8 2} +{cmd:plain} produces a minimally formatted table. It is a shorthand +to specifying {cmd:nostar}, {cmd:nodepvars}, {cmd:nonumbers}, +{cmd:noparentheses}, {cmd:nogaps}, {cmd:nolines} and {cmd:nonotes} +and setting all formats to {cmd:%9.0g}. Note that the disabled +options can be switched on again. For example, type + + {com}. esttab, plain star{txt} + +{p 8 8 2} +to produce a plain table including significance stars. + +{marker label} +{dlgtab:Labeling} + +{p 4 8 2} +{cmd:label} specifies that variable labels be used instead of +variable names (and estimation set titles be used instead of +estimation set names). Furthermore, {cmd:label} prints "Constant" +instead of "_cons". + +{p 4 8 2} +{cmd:interaction(}{it:string}{cmd:)} specifies the string to be used +as delimiter for interaction terms (only relevant in Stata 11 or newer). The +default is {cmd:interaction(" # ")}. For {cmd:tex} and {cmd:booktabs} the +default is {cmd:interaction(" $\times$ ")}. +{p_end} +{marker title} +{p 4 8 2} +{cmd:title(}{it:string}{cmd:)} may be used to provide a title for the +table. If specified, {it:string} is printed at the top of the table. +Note that specifying a title causes the table to be set up as a +floating object in LaTeX mode (unless the {cmd:nofloat} option +is specified). You may want to set a label for +referencing in this case. For example, if you type +{cmd:title(...\label{tab1})}, then "\ref{tab1}" could be used in the +LaTeX document to point to the table. + +{p 4 8 2} +{cmd:mtitles}, without argument, specifies that for each model the title +(or, if empty, the name) of the stored estimation set be printed as the model's +title in the table header. If {cmd:mtitles} is omitted, the default is to +use name or label of the dependent variable as the model's title (see the +{cmd:depvar} option). Alternatively, use {cmd:mtitles(}{it:list}{cmd:)} +specifies a list of model titles. Enclose the titles +in double quotes if they contain spaces, +e.g. {bind:{cmd:mtitles("Model 1" "Model 2")}}. + +{p 4 8 2} +{cmd:nomtitles} suppresses printing of model titles. + +{p 4 8 2} +{cmd:depvars} prints the name (or label) of the (first) dependent +variable of a model as the model's title in the table header. This is +the default. Specify {cmd:nodepvars} to use the names of +the stored estimation sets as titles. + +{p 4 8 2} +{cmd:numbers} includes a row containing consecutive model numbers in +the table header. This is the default. Specify {cmd:nonumbers} to +suppress printing the model numbers. + +{p 4 8 2} +{cmd:coeflabels(}{it:name} {it:label} [...]{cmd:)} specifies labels +for the coefficients. Specify names and labels in pairs and, if +necessary, enclose labels in double quotes, +e.g. {cmd:coeflabels(mpg Milage rep78 {bind:"Repair Record"})}. + +{p 4 8 2} +{cmd:notes} prints notes at the end of the table explaining the +significance symbols and the type of displayed statistics. This is +the default. Specify {cmd:nonotes} to suppress the notes. + +{p 4 8 2} +{cmd:addnotes(}{it:list}{cmd:)} may be used to add further lines of +text at the bottom of the table. Lines containing blanks must be +enclosed in double quotes, +e.g. {cmd:addnotes({bind:"Line 1"} {bind:"Line 2"})}. + +{marker format} +{dlgtab:Document format} + +{p 4 8 2} +{cmd:smcl}, {cmd:fixed}, {cmd:tab}, {cmd:csv}, {cmd:scsv}, {cmd:rtf}, +{cmd:html}, {cmd:tex}, {cmd:booktabs}, and {cmd:md} choose the table's basic +output format. The default format is {cmd:smcl} unless +{cmd:using} is specified, in which case the default format +depends on the filename's suffix ({cmd:smcl} for ".smcl", {cmd:csv} +for ".csv", {cmd:rtf} for ".rtf", +{cmd:html} for ".htm" or ".html", {cmd:tex} for ".tex", {cmd:md} for ".md" or ".mmd", +and {cmd:fixed} for all other filenames). To override the default behavior, specify one of the +following format options. + +{p 8 8 2} +{cmd:smcl} produces a {help SMCL} formatted table to be displayed in the +Stata results window or the Stata viewer. + +{p 8 8 2} +{cmd:fixed} produces a fixed-format ASCII table. This is suitable, +for example, if the table be displayed in a text editor. + +{p 8 8 2} +{cmd:tab} produces a tab-delimited ASCII table. +{p_end} +{marker csv} +{p 8 8 2} +{cmd:csv} produces a CSV ({ul:C}omma {ul:S}eparated {ul:V}alue +format) table for use with Microsoft Excel. Delimiter is a comma. In +order to prevent Excel from interpreting the contents of the table +cells, they are enclosed double quotes preceded by an equal sign +(i.e. ="..."). However, if the {cmd:plain} option is specified, the +table cells are enclosed in double quotes without the leading equal +sign. The first method is appropriate if you want to preserve the +table's formatting. The second method is appropriate if you want to +use the table's contents for further computations in Excel. +{p_end} +{marker scsv} +{p 8 8 2} +{cmd:scsv} is a variant on the CSV format that uses a semicolon as +the delimiter. This is appropriate for some non-English versions of +Excel (e.g. the German version). +{p_end} +{marker rtf} +{p 8 8 2} +{cmd:rtf} produces a Rich Text Format table for use with word +processors. + +{p 8 8 2} +{cmd:html} produces a simple HTML formatted table. + +{p 8 8 2} +{cmd:tex} produces a LaTeX formatted table. +{p_end} +{marker booktabs} +{p 8 8 2} +{cmd:booktabs} produces a LaTeX formatted table for use with LaTeX's +{it:booktabs} package. + +{p 8 8 2} +{cmd:md} produces a Markdown formatted table. Native +{browse "http://daringfireball.net/projects/markdown/":Markdown} has no specific +support for tables, but for example {browse "http://github.github.com/gfm/":GitHub Flavored Markdown} +and {browse "http://fletcherpenney.net/multimarkdown/":MultiMarkdown} do. Option {cmd:mmd} +can be used as a synonym for {cmd:md} (the default file suffix will be ".mmd" in this case). +{p_end} +{marker fragment} +{p 4 8 2} +{cmd:fragment} causes the table's opening and closing specifications +to be suppressed. This is relevant primarily in LaTeX and HTML mode. + +{p 4 8 2} +{cmd:float} causes the table to be set up as a floating object in LaTeX mode +(table environment). Providing a {cmd:title()} implies {cmd:float}. Specify +{cmd:nofloat} to omit the float environment in this case (this is useful, e.g., +for LyX users). + +{p 4 8 2} +{cmd:page}[{cmd:(}{it:packages}{cmd:)}] adds opening and closing code +to define a whole LaTeX or HTML document. The default is to produce a +raw table that can then be included into an existing LaTeX or HTML +document. Specifying {it:packages} in parentheses causes +{cmd:\usepackage{c -(}}{it:packages}{cmd:{c )-}} to be added to the +preamble of the LaTeX document (note that the {it:booktabs} package +is automatically loaded if {cmd:booktabs} is specified). + +{p 4 8 2} +{cmd:standalone}[{cmd:(}{it:opts}{cmd:)}] implies {cmd:page} +and uses {cmd:\documentclass[}{it:opts}{cmd:]{c -(}standlone{c )-}} +instead of {cmd:\documentclass{c -(}article{c )-}} in the LaTeX header (see +{browse "http://ctan.org/pkg/standalone"}). The default for {it:opts} +is {cmd:varwidth}; type {cmd:standalone("")} to suppress {cmd:[}{it:opts}{cmd:]}. + +{p 4 8 2} +{cmd:alignment(}{it:string}{cmd:)} may be used to specify the +alignment of the models' columns in LaTeX, HTML, or RTF mode. + +{p 8 8 2} +In LaTeX mode {it:string} should be a LaTeX column specifier. The +default is to center the columns. To produce right-aligned columns, +for example, type {cmd:alignment(r)}. If the table contains multiple +columns per model/equation, the alignment specification should define +all columns. For example, if the {cmd:wide} option is specified, you +could type {cmd:alignment(cr)} to, say, center the point estimates +and right-align the t-statistics. Note that more sophisticated column +definitions are often needed to produce appealing results. In +particular, LaTeX's {it:dcolumn} package proves useful to align +columns on the decimal point. + +{p 8 8 2} +In HTML mode {it:string} should be a HTML alignment specifier. The +default is to omit alignment specification, which results in left +aligned columns. To center the columns in HTML, for example, specify +{cmd:alignment(center)}. Other than in LaTeX mode, the same alignment +is used for all columns if the table contains multiple columns per +model/equation in the HTML mode. + +{p 8 8 2} +In RTF mode {it:string} should be one of {cmd:l}, {cmd:c}, {cmd:r}, +and {cmd:j}. The default is to center the columns. To produce +right-aligned columns, for example, type {cmd:alignment(r)}. The same +alignment is used for all columns if the table contains multiple +columns per model/equation in the RTF mode. + +{p 8 8 2} +Note that {cmd:alignment()} does not change the alignment of the +variable names/labels in the left stub of the table. They are always +left-aligned. + +{p 4 8 2} +{cmd:width(}{it:string}{cmd:)} sets the overall width of the table in +LaTeX or HTML. {it:string} should be LaTeX or HTML literal. For +example, specify {cmd:width(\hsize)} in LaTeX or {cmd:width(100%)} in +HTML to span the whole page. The table columns will spread regularly +over the specified width. Note that in RTF mode {helpb estout}'s +{cmd:varwidth()} and {cmd:modelwidth()} options may be used to change +the width of the table columns. + +{p 4 8 2} +{cmdab:longtable} causes the {it:longtable} environment to be used in +LaTeX. Use {cmdab:longtable} for tables that are too +long to fit on a single page. {cmdab:longtable} cannot be combined +with {cmd:width()}. Make sure to load the {it:longtable} package +in the LaTeX document, i.e. include {cmd:\usepackage{longtable}} in the +document's preamble. + +{p 4 8 2} +{cmd:fonttbl(}{it:string}{cmd:)} defines a custom font table in RTF. The +default is "{cmd:\f0\fnil Times New Roman;}". For example, typing + + {com}. esttab using example.rtf, ti("\f1 The Auto Data") /// + fonttbl(\f0\fnil Times New Roman;\f1\fnil Arial;){txt} + +{p 8 8 2} +would add a title in Arial. + +{p 4 8 2} +{cmd:nortfencode} prevents the translation of non-ASCII characters in RTF +mode (a translated character is encoded as {cmd:\u}#{cmd:?}, where +# is the base 10 character code). This is only relevant in Stata 14 or newer; +no translation is applied in Stata 13 or older. + +{marker output} +{dlgtab:Output} + +{p 4 8 2} +{cmd:replace} permits {cmd:esttab} to overwrite an existing file. + +{p 4 8 2} +{cmd:append} specifies that the output be appended to an existing +file. It may be used even if the file does not yet exist. Specifying +{cmd:append} together with {cmd:page} in TeX or HTML mode causes the +new table to be inserted at the end of the body of an existing +document ({cmd:esttab} seeks a line reading "\end{document}" or +"", respectively, and starts appending from there; +contents after this line will be overwritten). In RTF mode, existing +documents are assumed to end with a line containing a single "}". + +{p 4 8 2} +{cmd:type} specifies that the assembled table be printed in the +results window and the log file. This is the default unless +{cmd:using} is specified. + +{p 4 8 2} +{cmd:noisily} displays the executed {helpb estout} command. + +{marker advanced} +{dlgtab:Advanced} + +{p 4 8 2} +{cmd:drop(}{it:droplist}{cmd:)} identifies the coefficients to be +dropped from the table. A {it:droplist} comprises one or more +specifications, separated by white space. A specification can be +either a parameter name (e.g. {cmd:price}), an equation name followed +by a colon (e.g. {cmd:mean:}), or a full name +(e.g. {cmd:mean:price}). You may use the {cmd:*} and {cmd:?} wildcards +in equation names and parameter names. Be sure to refer to the matched +equation names, and not to the original equation names in the models, +when using the {cmd:equations()} option to match equations. + +{p 4 8 2} +{cmd:noomitted} drops omitted coefficients (only relevant in Stata 11 or +newer). + +{p 4 8 2} +{cmd:nobaselevels} drops base levels of factor variables (only relevant +in Stata 11 or newer). + +{p 4 8 2} +{cmd:keep(}{it:keeplist}{cmd:)} selects the coefficients to be +included in the table. {it:keeplist} is specified analogous to +{it:droplist} in {cmd:drop()} (see above). + +{p 4 8 2} +{cmd:order(}{it:orderlist}{cmd:)} changes the order of the +coefficients and equations within the table. {it:orderlist} is +specified analogous to {it:droplist} in {cmd:drop()} (see above). +Coefficients and equations that do not appear in {it:orderlist} are +placed last (in their original order). + +{p 4 8 2} +{cmd:equations(}{it:eqmatchlist}{cmd:)} specifies how the models' +equations are to be matched. This option is passed to the internal +call of {cmd:estimates table}. See help {helpb estimates} on how to +specify this option. The most common usage is {cmd:equations(1)} to +match all the first equations in the models. + +{p 4 8 2} +{cmd:eform} displays the regression table in exponentiated form. The +exponent of a coefficient is displayed in lieu of the untransformed +coefficient; standard errors and confidence intervals are transformed +as well. Note that the intercept is dropped in eform-mode, unless +{cmd:constant} is specified. + +{p 4 8 2} +{cmd:unstack} specifies that the individual equations from +multiple-equation models (e.g. {cmd:mlogit}, {cmd:reg3}, +{cmd:heckman}) be placed in separate columns. The default is to place +the equations below one another in a single column. + +{p 4 8 2} +{it:estout_options} are any other {cmd:estout} options (see help +{helpb estout}). Note that {cmd:estout} options take precedence over +{cmd:esttab} options. For example, + +{p 8 20 2} +{cmd:cells()}{space 5}disables {cmd:b()}, {cmd:beta()}, {cmd:main()}, +{cmd:t()}, {cmd:abs}, {cmd:not}, {cmd:se()}, {cmd:p()}, {cmd:ci()}, +{cmd:aux()}, {cmd:star}, {cmd:staraux}, {cmd:wide}, {cmd:onecell}, +{cmd:parentheses}, and {cmd:brackets}, + +{p 8 20 2} +{cmd:stats()}{space 5}disables {cmd:r2()}, {cmd:ar2()}, {cmd:pr2()}, +{cmd:aic()}, {cmd:bic()}, {cmd:scalars()}, {cmd:sfmt()}, {cmd:noobs}, +and {cmd:obslast}. + +{p 8 8 2} +Other {cmd:estout} options that should be used with care are +{cmd:begin()}, {cmd:delimiter()}, {cmd:end()}, {cmd:prehead()}, +{cmd:posthead()}, {cmd:prefoot()}, {cmd:postfoot()}, {cmd:mlabels()}, +and {cmd:varlabels()}. Furthermore, note that {cmd:estout}'s {cmd:style()} +option does not have much effect because most options that would be affected +by {cmd:style()} are set explicitly by {cmd:esttab}. + +{marker fmt} +{dlgtab:Numerical formats} + +{p 4 4 2} +Numerical display formats may be specified in {cmd:esttab} as follows: + +{p 5 8 2} +1. Official Stata's display formats: You may specify formats, such as +{cmd:%9.0g} or {cmd:%8.2f}. See help {help format} for a list +of available formats. {cmd:%g} or {cmd:g} may be used as a +synonym for {cmd:%9.0g}. + +{p 5 8 2} +2. Fixed format: You may specify an integer value such as {cmd:0}, +{cmd:1}, {cmd:2}, etc. to request a display format with a fixed number +of decimal places. For example, {cmd:t(3)} would display t-statistics +with three decimal places. + +{p 5 8 2} +3. Automatic format: You may specify {cmd:a1}, {cmd:a2}, ..., or +{cmd:a9} to cause {cmd:esttab} to choose a reasonable display format for +each number depending on the number's value. {cmd:a} may be used as a +synonym for {cmd:a3}. The {it:#} in +{cmd:a}{it:#} determines the minimum precision according to the +following rules: + +{p 10 12 2} +o Absolute numbers smaller than 1 are displayed with {it:#} +significant decimal places (i.e. with {it:#} decimal places ignoring +any leading zeros after the decimal point). For example, +{cmd:0.00123456} is displayed as {cmd:0.00123} if the format is +{cmd:a3}. + +{p 10 12 2} +o Absolute numbers greater than 1 are displayed with as many digits +required to retain at least one decimal place and are displayed with +a minimum of ({it:#} + 1) digits. For example, if the format is +{cmd:a3}, {cmd:1.23456} is displayed as {cmd:1.235}, {cmd:12.3456} is +displayed as {cmd:12.35}, and {cmd:1234.56} is displayed as +{cmd:1234.6}. + +{p 10 12 2} +o In any case, integers are displayed with zero decimal places, and +very large or very small absolute numbers are displayed in +exponential format. + +{marker exa} +{title:Examples} + +{p 4 4 2} +The following examples are intended to illustrate the basic usage of +{cmd:esttab}. Additional examples can be found at +{browse "http://repec.sowi.unibe.ch/stata/estout/"}. + +{p 4 4 2} The procedure is to first fit and store some models (see {helpb eststo}) and then apply +{cmd:esttab} to these stored estimates: + + {com}. eststo clear + {txt} + {com}. sysuse auto + {txt}(1978 Automobile Data) + + {com}. eststo: quietly regress price weight mpg + {txt}({res}est1{txt} stored) + + {com}. eststo: quietly regress price weight mpg foreign + {txt}({res}est2{txt} stored) + + {com}. esttab, ar2 + {res} + {txt}{hline 44} + {txt} (1) (2) + {txt} price price + {txt}{hline 44} + {txt}weight {res} 1.747** 3.465***{txt} + {res} {ralign 12:{txt:(}2.72{txt:)}} {ralign 12:{txt:(}5.49{txt:)}} {txt} + + {txt}mpg {res} -49.51 21.85 {txt} + {res} {ralign 12:{txt:(}-0.57{txt:)}} {ralign 12:{txt:(}0.29{txt:)}} {txt} + + {txt}foreign {res} 3673.1***{txt} + {res} {ralign 12:{txt:(}5.37{txt:)}} {txt} + + {txt}_cons {res} 1946.1 -5853.7 {txt} + {res} {ralign 12:{txt:(}0.54{txt:)}} {ralign 12:{txt:(}-1.73{txt:)}} {txt} + {txt}{hline 44} + {txt}N {res} 74 74 {txt} + {txt}adj. R-sq {res} 0.273 0.478 {txt} + {txt}{hline 44} + {txt}t statistics in parentheses + {txt}* p<0.05, ** p<0.01, *** p<0.001 + + +{p 4 4 2} +The same table using labels: + + {com}. esttab, ar2 label + {res} + {txt}{hline 52} + {txt} (1) (2) + {txt} Price Price + {txt}{hline 52} + {txt}Weight (lbs.) {res} 1.747** 3.465***{txt} + {res} {ralign 12:{txt:(}2.72{txt:)}} {ralign 12:{txt:(}5.49{txt:)}} {txt} + + {txt}Mileage (mpg) {res} -49.51 21.85 {txt} + {res} {ralign 12:{txt:(}-0.57{txt:)}} {ralign 12:{txt:(}0.29{txt:)}} {txt} + + {txt}Car type {res} 3673.1***{txt} + {res} {ralign 12:{txt:(}5.37{txt:)}} {txt} + + {txt}Constant {res} 1946.1 -5853.7 {txt} + {res} {ralign 12:{txt:(}0.54{txt:)}} {ralign 12:{txt:(}-1.73{txt:)}} {txt} + {txt}{hline 52} + {txt}Observations {res} 74 74 {txt} + {txt}Adjusted R-squared {res} 0.273 0.478 {txt} + {txt}{hline 52} + {txt}t statistics in parentheses + {txt}* p<0.05, ** p<0.01, *** p<0.001 + + +{p 4 4 2} +Plain table: + + {com}. esttab, ar2 plain + {res} + {txt} est1 est2 + {txt} b/t b/t + {txt}weight {res} 1.746559 3.464706{txt} + {res} 2.723238 5.493003{txt} + {txt}mpg {res} -49.51222 21.8536{txt} + {res} -.5746808 .2944391{txt} + {txt}foreign {res} 3673.06{txt} + {res} 5.370142{txt} + {txt}_cons {res} 1946.069 -5853.696{txt} + {res} .541018 -1.733408{txt} + {txt}N {res} 74 74{txt} + {txt}adj. R-sq {res} .2734846 .4781119{txt} + + +{p 4 4 2} +Using standard errors in brackets and suppress significance stars: + + {com}. esttab, se nostar brackets + {res} + {txt}{hline 38} + {txt} (1) (2) + {txt} price price + {txt}{hline 38} + {txt}weight {res} 1.747 3.465{txt} + {res} {ralign 12:{txt:[}0.641{txt:]}} {ralign 12:{txt:[}0.631{txt:]}}{txt} + + {txt}mpg {res} -49.51 21.85{txt} + {res} {ralign 12:{txt:[}86.16{txt:]}} {ralign 12:{txt:[}74.22{txt:]}}{txt} + + {txt}foreign {res} 3673.1{txt} + {res} {ralign 12:{txt:[}684.0{txt:]}}{txt} + + {txt}_cons {res} 1946.1 -5853.7{txt} + {res} {ralign 12:{txt:[}3597.0{txt:]}} {ralign 12:{txt:[}3377.0{txt:]}}{txt} + {txt}{hline 38} + {txt}N {res} 74 74{txt} + {txt}{hline 38} + {txt}Standard errors in brackets + + +{p 4 4 2} +Printing beta coefficients: + + {com}. esttab, beta + {res} + {txt}{hline 44} + {txt} (1) (2) + {txt} price price + {txt}{hline 44} + {txt}weight {res} 0.460** 0.913***{txt} + {res} {ralign 12:{txt:(}2.72{txt:)}} {ralign 12:{txt:(}5.49{txt:)}} {txt} + + {txt}mpg {res} -0.097 0.043 {txt} + {res} {ralign 12:{txt:(}-0.57{txt:)}} {ralign 12:{txt:(}0.29{txt:)}} {txt} + + {txt}foreign {res} 0.573***{txt} + {res} {ralign 12:{txt:(}5.37{txt:)}} {txt} + {txt}{hline 44} + {txt}N {res} 74 74 {txt} + {txt}{hline 44} + {txt}Standardized beta coefficients; t statistics in parentheses + {txt}* p<0.05, ** p<0.01, *** p<0.001 + +{marker aut} +{title:Author} + +{p 4 4 2} +Ben Jann, Institute of Sociology, University of Bern, jann@soz.unibe.ch + +{marker als} +{title:Also see} + + Manual: {hi:[R] estimates} + +{p 4 13 2}Online: help for + {helpb estimates}, + {help estcom}, + {helpb estout}, + {helpb eststo}, + {helpb estadd}, + {helpb estpost} +{p_end} diff --git a/110/replication_package/replication/ado/plus/f/fcollapse.ado b/110/replication_package/replication/ado/plus/f/fcollapse.ado new file mode 100644 index 0000000000000000000000000000000000000000..7cfc3b2a8d5aa3a53b5cbe1220fc6b2dd7a5ecb9 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fcollapse.ado @@ -0,0 +1,285 @@ +*! version 2.49.0 06may2022 +program define fcollapse + cap noi Inner `0' + loc rc = c(rc) + cap mata: mata drop query + cap mata: mata drop fun_dict + cap mata: mata drop F + exit `rc' +end + + +program define Inner + syntax [anything(equalok)] [if] [in] [fw aw pw iw/] , /// + [by(varlist)] /// + [FAST] /// + [cw] /// + [FREQ FREQvar(name)] /// -contract- feature for free + [REGister(namelist local)] /// additional aggregation functions + [POOL(numlist integer missingok max=1 >0 min=1)] /// memory-related + [MERGE] /// adds back collapsed vars into dataset; replaces egen + [APPEND] /// appends collapsed variables at the end of the dataset (useful to add totals to tables) + [SMART] /// allow calls to collapse instead of fcollapse + [METHOD(string)] /// allow choice of internal method (hash0, hash1, etc.) + [noCOMPRESS] /// save variables in the smallest type that preserves information + [Verbose] // debug info + + // Parse + if ("`freq'" != "" & "`freqvar'" == "") local freqvar _freq + if ("`pool'" == "") loc pool . // all obs together + if ("`fast'" == "") preserve + if ("`by'" == "") { + tempvar byvar + gen byte `byvar' = 1 + loc by `byvar' + } + loc merge = ("`merge'" != "") + loc append = ("`append'" != "") + loc smart = ("`smart'" != "") & !`merge' & !`append' & ("`freqvar'" == "") & ("`register'" == "") & ("`anything'" != "") & ("`by'" != "") + loc compress = ("`compress'" != "nocompress") + loc verbose = ("`verbose'" != "") + + _assert `merge' + `append' < 2, msg("cannot append and merge at the same time") + + if (`smart') { + gettoken first_by _ : by + loc ok = strpos("`: sortedby'", "`first_by'") == 1 + if (!`ok' & c(N)>1) { + tempvar notok + gen byte `notok' = `first_by' < `first_by'[_n-1] + cou if `notok' in 2/`c(N)' + loc ok = r(N)==0 + drop `notok' + } + if (`ok') { + if (`verbose') di as text "data already sorted; calling -collapse- due to -smart- option" + if ("`weight'" != "") loc eqsign = + collapse `anything' `if' `in' [`weight'`eqsign'`exp'] , by(`by') `fast' `cw' + exit + } + } + + if ("`anything'" == "") { + if ("`freqvar'"=="") { + di as error "need at least a varlist or a freq. option" + error 110 + } + } + else { + ParseList `anything', merge(`merge') // modify `targets' `keepvars' + } + + loc valid_stats mean median sum count percent max min /// + iqr first last firstnm lastnm sd nansum + loc invalid_stats : list stats - valid_stats + loc invalid_stats : list invalid_stats - register + foreach stat of local invalid_stats { + if !(regexm("`stat'", "^p[0-9]+$")) { + di as error "Invalid stat: (`stat')" + error 110 + } + } + + // Check dependencies + cap qui mata: mata which mm_quantile() + loc rc = c(rc) + if (`rc') { + di as error "SSC Package Moremata required (to compute quantiles)" + di as smcl "{err}To install: {stata ssc install moremata}" + error `rc' + } + + loc intersection : list targets & by + if ("`intersection'" != "") { + di as error "targets in collapse are also in by(): `intersection'" + error 110 + } + loc intersection : list targets & freqvar + if ("`intersection'" != "") { + di as error "targets in collapse are also in freq(): `intersection'" + error 110 + } + + * Trim data + loc need_touse = ("`if'`in'"!="" | "`cw'"!="" | "`exp'" != "") + if (`need_touse') { + marksample touse, strok novarlist + + * Raise error with [iw] and negative weights (other weight types already do so) + if ("`weight'"=="iweight") { + _assert (!`touse') | (`exp' >= 0), msg("negative weights encountered") rc(402) + } + + if ("`cw'" != "") { + markout `touse' `keepvars', strok + } + + if (!`merge' & !`append') { + qui keep if `touse' + drop `touse' + loc touse + } + } + + // Raise error if no obs. + if (!c(N)) { + error 2000 + } + + if (`append') loc offset = c(N) + 1 + + // Create factor structure + mata: F = factor("`by'", "`touse'", `verbose', "`method'") + + // Trim again + // (saves memory but is slow for big datasets) + if (!`merge' & !`append' & `pool' < .) keep `keepvars' `exp' + + // Get list of aggregating functions + mata: fun_dict = aggregate_get_funs() + if ("`register'" != "") { + foreach fun of local register { + mata: asarray(fun_dict, "`fun'", &aggregate_`fun'()) + } + } + + // Main loop: collapses data + if ("`anything'" != "") { + mata: f_collapse(F, fun_dict, query, "`keepvars'", `merge', `append', `pool', "`exp'", "`weight'", `compress') + } + else { + clear + mata: F.store_keys(1) + } + + // Add frequencies (already stored in -F-) + if ("`freqvar'" != "") { + mata: st_local("maxfreq", strofreal(max(F.counts))) + loc freqtype long + if (`maxfreq' <= 32740) loc freqtype int + if (`maxfreq' <= 100) loc freqtype byte + if (`merge') { + mata: st_store(., st_addvar("`freqtype'", "`freqvar'", 1), F.counts[F.levels]) + } + else if (`append') { + mata: st_store((`offset'::`=c(N)'), st_addvar("`freqtype'", "`freqvar'", 1), F.counts) + } + else { + mata: st_store(., st_addvar("`freqtype'", "`freqvar'", 1), F.counts) + } + la var `freqvar' "Frequency" + } + + if (!`merge' & !`append') order `by' `targets' + if ("`fast'" == "") restore, not +end + + +program define ParseList + syntax [anything(equalok)] , MERGE(integer) + TrimSpaces 0 : `anything' + + loc stat mean // default + mata: query = asarray_create("string") // query[var] -> [stat, target] + mata: asarray_notfound(query, J(0, 3, "")) + + while ("`0'" != "") { + GetStat stat 0 : `0' + GetTarget target 0 : `0' + gettoken vars 0 : 0 + unab vars : `vars' + foreach var of local vars { + if ("`target'" == "") { + if (`merge') { + loc target `stat'_`var' + } + else { + loc target `var' + } + } + + loc raw = strpos("`stat'", "raw") == 1 + if (`raw') { + loc stat = substr("`stat'", 4, .) + } + + loc targets `targets' `target' + loc keepvars `keepvars' `var' + loc stats `stats' `stat' + mata: asarray(query, "`var'", asarray(query, "`var'") \ ("`target'", "`stat'", "`raw'")) + loc target + } + } + + // Check that targets don't repeat + loc dups : list dups targets + if ("`dups'" != "") { + cap mata: mata drop query + di as error "repeated targets in collapse: `dups'" + error 110 + } + + loc keepvars : list uniq keepvars + loc stats : list uniq stats + c_local targets `targets' + c_local stats `stats' + c_local keepvars `keepvars' +end + + +program define TrimSpaces + _on_colon_parse `0' + loc lhs `s(before)' + loc rest `s(after)' + + * Trim spaces around equal signs ("= ", " =", " = ", etc) + loc old_n .b + loc n .a + while (`n' < `old_n') { + loc rest : subinstr loc rest " " " ", all + loc old_n `n' + loc n : length local rest + } + loc rest : subinstr loc rest " =" "=", all + loc rest : subinstr loc rest "= " "=", all + c_local `lhs' `rest' +end + + +program define GetStat + _on_colon_parse `0' + loc before `s(before)' + gettoken lhs rhs : before + loc rest `s(after)' + + gettoken stat rest : rest , match(parens) + if ("`parens'" != "") { + c_local `lhs' `stat' + c_local `rhs' `rest' + } +end + + +program define GetTarget + _on_colon_parse `0' + loc before `s(before)' + gettoken lhs rhs : before + loc rest `s(after)' + + loc rest : subinstr loc rest "=" "= ", all + gettoken target rest : rest, parse("= ") + gettoken eqsign rest : rest + if ("`eqsign'" == "=") { + c_local `lhs' `target' + c_local `rhs' `rest' + } +end + + +findfile "ftools.mata" +include "`r(fn)'" + +findfile "fcollapse_main.mata" +include "`r(fn)'" + +exit diff --git a/110/replication_package/replication/ado/plus/f/fcollapse.sthlp b/110/replication_package/replication/ado/plus/f/fcollapse.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..cc90a1612d896937a95c14c7c9998337e7212c9c --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fcollapse.sthlp @@ -0,0 +1,242 @@ +2.49.0 06may2022{smcl} +{* *! version 2.49.0 06may2022}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "[R] collapse" "help collapse"}{...} +{vieweralsosee "[R] contract" "help contract"}{...} +{viewerjumpto "Syntax" "fcollapse##syntax"}{...} +{viewerjumpto "Description" "fcollapse##description"}{...} +{viewerjumpto "Options" "fcollapse##options"}{...} +{title:Title} + +{p2colset 5 18 23 2}{...} +{p2col :{cmd:fcollapse} {hline 2}}Efficiently +make dataset of summary statistics{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{p 8 17 2} +{cmd:fcollapse} +{it:clist} +{ifin} +[{cmd:,} {it:{help fcollapse##table_options:options}}] + +{pstd}where {it:clist} is either + +{p 8 17 2} +[{opt (stat)}] +{varlist} +[ [{opt (stat)}] {it:...} ]{p_end} + +{p 8 17 2} +[{opt (stat)}] {it:target_var}{cmd:=}{varname} + [{it:target_var}{cmd:=}{varname} {it:...}] + [ [{opt (stat)}] {it:...}] + +{p 4 4 2}or any combination of the {it:varlist} or {it:target_var} forms, and +{it:stat} is one of{p_end} + +{p2colset 9 22 24 2}{...} +{p2col :{opt mean}}means (default){p_end} +{p2col :{opt median}}medians{p_end} +{p2col :{opt p1}}1st percentile{p_end} +{p2col :{opt p2}}2nd percentile{p_end} +{p2col :{it:...}}3rd{hline 1}49th percentiles{p_end} +{p2col :{opt p50}}50th percentile (same as {cmd:median}){p_end} +{p2col :{it:...}}51st{hline 1}97th percentiles{p_end} +{p2col :{opt p98}}98th percentile{p_end} +{p2col :{opt p99}}99th percentile{p_end} +{p2col :{opt sum}}sums{p_end} +{p2col :{opt count}}number of nonmissing observations{p_end} +{p2col :{opt percent}}percentage of nonmissing observations{p_end} +{p2col :{opt max}}maximums{p_end} +{p2col :{opt min}}minimums{p_end} +{p2col :{opt iqr}}interquartile range{p_end} +{p2col :{opt first}}first value{p_end} +{p2col :{opt last}}last value{p_end} +{p2col :{opt firstnm}}first nonmissing value{p_end} +{p2col :{opt lastnm}}last nonmissing value{p_end} +{p2col :{opt nansum}}same as sum, but if all obs. in the group are missing it will also be missing (instead of zero){p_end} +{p2col :{opt raw}{inp:{bf:{it:stat}}}}compute stats while ignoring weights (a generalization of {it:rawsum}){p_end} +{p2colreset}{...} + +{pstd} +If {it:stat} is not specified, {opt mean} is assumed. + +{pstd} +Technical limitation: Both normal stats and {it:raw} stats will ignore zero weights + +{synoptset 15 tabbed}{...} +{marker table_options}{...} +{synopthdr} +{synoptline} +{syntab :Options} +{synopt :{opth by(varlist)}}groups over which {it:stat} is to be calculated +{p_end} +{synopt :{opt merge}}merge collapsed dataset back into the original one; +if the dataset is unsorted or sorted by something different than {opt by()}, +it is much more efficient than {cmd:egen} and that combining {cmd:collapse} with {cmd:merge} +{p_end} +{synopt :{opt append}}append collapsed dataset at the end of the original one; +this is useful to create rows of totals +{p_end} +{synopt :{opt cw}}casewise deletion instead of all possible observations +{p_end} +{synopt :{opt fast}}do not preserve and restore the original dataset; +saves speed but leaves the data in an unusable state shall the +user press {hi:Break} +{p_end} +{synopt :{opt smart}}invoke {cmd:collapse} if the data is already sorted (in which case {cmd:collapse} might be faster) +{p_end} +{synopt :{cmd:freq}[{cmd:(}{newvar}{cmd:)}]}store +the raw observation count (similar to {help contract}). +If not indicated, the name of the new variable will be {it:_freq} +{p_end} +{synopt :{opt reg:ister(keys)}}add new stat functions. +For each key, a corresponding Mata function should exist. +See example at the end +{p_end} +{synopt :{opt pool(#)}}load the data into stata in blocks of # variables +Default is {it:pool(.)}, select a low value ({it:pool(5)}) +or very low value ({it:pool(1)}) to save memory at the cost of speed +{p_end} +{synopt :{opt nocompress}}{it:compress} chooses the most compact variable type, at a small speed cost +(on by default) +{p_end} +{synopt :{opt v:erbose}}display misc. debug messages +{p_end} + +{synoptline} +{p2colreset}{...} +{p 4 6 2} + + +{marker description}{...} +{title:Description} + +{pstd} +{opt fcollapse} converts the dataset in memory into a dataset of means, sums, +medians, etc. {it:clist} can refer to numeric and string variables +although string variables are only supported by a few functions +(first, last, firstnm, lastnm). + +{pstd} +Weights are only partially supported. + +{pstd} +You can implement your own Mata functions to easily extend the fcollapse command. + + +{marker options}{...} +{title:Options} + +{dlgtab:Options} + +{phang} +{opth by(varlist)} specifies the groups over which the means, etc., are to be +calculated. If this option is not specified, the resulting dataset will +contain 1 observation. If it is specified, {it:varlist} may refer to either +string or numeric variables. + +{phang} +{opt merge} works similarly to {cmd:egen}. +It will collapse the data in Mata and then add it back to the original dataset. +If the dataset is not sorted by the groups set in {opt by()}, this is much faster than {cmd:egen} and {cmd:collapse} followed by {cmd:merge}. + +{phang} +{opt cw} specifies casewise deletion. If {opt cw} is not specified, all +possible observations are used for each calculated statistic. + +{phang} +{opt fast} specifies that {opt fcollapse} not restore the original dataset +should the user press {hi:Break}. + +{phang} +{opt freq} stores frequencies on a new variable {it:_freq}. +To choose the name of the variable, use {opth freq(newvar)} + +{phang} +{opt reg:ister(fun1 ...)} registers Mata functions {it:fun1}, etc. so +to extend {cmd fcollapse}; see example below. + +{phang} +{opt pool(#)} load the data into Stata in blocks of # variables Default is pool(.), +select a low value (pool(5)) or very low value (pool(1)) to save memory at the cost of speed. + +{phang} +{opt compress} will fit variables into more compact types, such as {it:byte}, +{it:int}, and {it:long}, without losing information when compared to more accurate types +such as {it:double}. +The cost is a slight reduction in speed, due to the extra checks involved. + +{marker example}{...} +{title:Example: Adding your own aggregation functions} + +The following code adds the stat. {it:variance}: + + +{inp} sysuse auto, clear + + cap mata: mata drop aggregate_variance() + + mata: + mata set matastrict on + transmorphic colvector aggregate_variance( + class Factor F, + transmorphic colvector data, + real colvector weights) + { + real scalar i + transmorphic colvector results + results = J(F.num_levels, 1, missingof(data)) + for (i = 1; i <= F.num_levels; i++) { + results[i] = quadvariance(panelsubmatrix(data, i, F.info)) + } + return(results) + } + end + + fcollapse (mean) price (variance) weight foreign, by(turn) register(variance) freq + li +{text} + +Note that the to create a new stat {it:variance} we created a Mata function +called {it:aggregate_variance}. To avoid overlap with other Mata functions, +your function must start with {it:aggregate_}. + + +{marker author}{...} +{title:Author} + +{pstd}Sergio Correia{break} +Board of Governors of the Federal Reserve System, USA{break} +{browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com}{break} +{p_end} + + +{marker project}{...} +{title:More Information} + +{pstd}{break} +To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break} +{browse "https://github.com/sergiocorreia/ftools"}{break} +{p_end} + + +{marker acknowledgment}{...} +{title:Acknowledgment} + +{pstd} +This help file was based on StataCorp's own help file +for {it:collapse}. +{p_end} + +{pstd} +This project was largely inspired by the works of +{browse "http://wesmckinney.com/blog/nycpython-1102012-a-look-inside-pandas-design-and-development/":Wes McKinney}, +{browse "http://www.stata.com/meeting/uk15/abstracts/":Andrew Maurer} +and +{browse "https://ideas.repec.org/c/boc/bocode/s455001.html":Benn Jann}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/f/fcollapse_functions.mata b/110/replication_package/replication/ado/plus/f/fcollapse_functions.mata new file mode 100644 index 0000000000000000000000000000000000000000..047ba0ff2c4084dc9ded74189a203e99298f6df5 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fcollapse_functions.mata @@ -0,0 +1,293 @@ +// FCOLLAPSE - Aggregate Functions +// -data- vector must be already sorted by F: F.sort(data) +// Same for -weights- vector + +mata: +mata set matastrict on + +`Dict' aggregate_get_funs() +{ + `Dict' funs + funs = asarray_create("string", 1) + asarray_notfound(funs, NULL) + asarray(funs, "count", &aggregate_count()) + asarray(funs, "mean", &aggregate_mean()) + asarray(funs, "sum", &aggregate_sum()) + asarray(funs, "min", &aggregate_min()) + asarray(funs, "max", &aggregate_max()) + asarray(funs, "first", &aggregate_first()) + asarray(funs, "last", &aggregate_last()) + asarray(funs, "firstnm", &aggregate_firstnm()) + asarray(funs, "lastnm", &aggregate_lastnm()) + asarray(funs, "percent", &aggregate_percent()) + asarray(funs, "quantile", &aggregate_quantile()) + asarray(funs, "iqr", &aggregate_iqr()) + asarray(funs, "sd", &aggregate_sd()) + asarray(funs, "nansum", &aggregate_nansum()) + // ... + return(funs) +} + + +`Matrix' select_nm_num(`Vector' data) { + // Return matrix in case the answer is 0x0 + return(select(data, data :< .)) +} + + +`StringMatrix' select_nm_str(`StringVector' data) { + return(select(data, data :!= "")) +} + + +`DataCol' aggregate_count(`Factor' F, `DataCol' data, `Vector' weights, `String' wtype) +{ + if (wtype == "" | wtype == "aweight") { + return( `panelsum'(data :<., 1, F.info) ) + } + else { + return( `panelsum'(data :<., weights, F.info) ) + } + // Older: + //`Integer' i + //`DataCol' results + //results = J(F.num_levels, 1, missingof(data)) + //for (i = 1; i <= F.num_levels; i++) { + // results[i] = nonmissing(panelsubmatrix(data, i, F.info)) + //} + //return(results) +} + + +`Vector' aggregate_sum(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + if (wtype == "") { + return( `panelsum'(editmissing(data, 0), 1, F.info) ) + } + else if (wtype == "aweight") { + `Vector' sum_weights + // normalize weights so they add up to number of obs. in the subgroup + sum_weights = `panelsum'(weights :* (data :< .), F.info) :/ `panelsum'(data :< ., F.info) + return( `panelsum'(editmissing(data, 0), weights, F.info) :/ sum_weights ) + } + else { + return( `panelsum'(editmissing(data, 0), weights, F.info) ) + } +} + + +`Vector' aggregate_nansum(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + assert(wtype == "") + return( `panelsum'(editmissing(data, 0), 1, F.info) :/ (`panelsum'(data :<., 1, F.info) :> 0) ) +} + + +`Vector' aggregate_mean(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + if (wtype == "") { + return( aggregate_sum(F, data, 1, "") :/ aggregate_count(F, data, 1, "") ) + } + else { + // http://www.statalist.org/forums/forum/general-stata-discussion/general/289901-collapse-and-weights + return( aggregate_sum(F, data, weights, "iweight") :/ aggregate_count(F, data, weights, "iweight") ) + } + + // Older: + //`Integer' i + //`Vector' results + //results = J(F.num_levels, 1, .) + //for (i = 1; i <= F.num_levels; i++) { + // results[i] = mean(panelsubmatrix(data, i, F.info), weights) + //} + //return(results) +} + + +`Vector' aggregate_min(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `Vector' results + results = J(F.num_levels, 1, .) + for (i = 1; i <= F.num_levels; i++) { + results[i] = colmin(panelsubmatrix(data, i, F.info)) + } + return(results) +} + + +`Vector' aggregate_max(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `Vector' results + results = J(F.num_levels, 1, .) + for (i = 1; i <= F.num_levels; i++) { + results[i] = colmax(panelsubmatrix(data, i, F.info)) + } + return(results) +} + + +`DataCol' aggregate_first(`Factor' F, `DataCol' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `DataCol' results + results = J(F.num_levels, 1, missingof(data)) + for (i = 1; i <= F.num_levels; i++) { + results[i] = data[F.info[i, 1]] + } + return(results) +} + + +`DataCol' aggregate_last(`Factor' F, `DataCol' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `DataCol' results + results = J(F.num_levels, 1, missingof(data)) + for (i = 1; i <= F.num_levels; i++) { + results[i] = data[F.info[i, 2]] + } + return(results) +} + + +`DataCol' aggregate_firstnm(`Factor' F, `DataCol' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `DataCol' results, tmp + pointer(`Vector') fp + results = J(F.num_levels, 1, missingof(data)) + fp = isstring(data) ? &select_nm_str() : &select_nm_num() + for (i = 1; i <= F.num_levels; i++) { + tmp = (*fp)(panelsubmatrix(data, i, F.info)) + if (rows(tmp) == 0) continue + results[i] = tmp[1] + } + return(results) +} + + +`DataCol' aggregate_lastnm(`Factor' F, `DataCol' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `DataCol' results, tmp + pointer(`Vector') fp + results = J(F.num_levels, 1, missingof(data)) + fp = isstring(data) ? &select_nm_str() : &select_nm_num() + for (i = 1; i <= F.num_levels; i++) { + tmp = (*fp)(panelsubmatrix(data, i, F.info)) + if (rows(tmp) == 0) continue + results[i] = tmp[rows(tmp)] + } + return(results) +} + + +`Vector' aggregate_percent(`Factor' F, `DataCol' data, `Vector' weights, `String' wtype) +{ + `Vector' results + results = aggregate_count(F, data, weights, wtype) + return(results :/ (quadsum(results) / 100)) +} + + +`Vector' aggregate_quantile(`Factor' F, `Vector' data, `Vector' weights, `String' wtype, + `Integer' P) +{ + `Integer' i + `Vector' results, tmp_data, tmp_weights + `Boolean' has_fweight + + results = J(F.num_levels, 1, .) + + if (wtype == "") { + for (i = 1; i <= F.num_levels; i++) { + // SYNTAX: mm_quantile(data, | weights, P, def, fw?, ..) + tmp_data = panelsubmatrix(data, i, F.info) + tmp_data = select(tmp_data, tmp_data :< .) + if (rows(tmp_data) == 0) continue + results[i] = mm_quantile(tmp_data, 1, P, 2) + } + } + else { + has_fweight = wtype == "fweight" + for (i = 1; i <= F.num_levels; i++) { + tmp_data = panelsubmatrix(data, i, F.info) + tmp_weights = panelsubmatrix(weights, i, F.info) + tmp_weights = select(tmp_weights, tmp_data :< .) + tmp_data = select(tmp_data, tmp_data :< .) + if (rows(tmp_data) == 0) continue + results[i] = mm_quantile(tmp_data, tmp_weights, P, 2, has_fweight) + } + } + + return(results) +} + + +`Vector' aggregate_iqr(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `Vector' results, tmp_data, tmp_weights, P + `RowVector' tmp_iqr + `Boolean' has_fweight + + results = J(F.num_levels, 1, .) + P = (0.25\0.75) + + if (wtype == "") { + for (i = 1; i <= F.num_levels; i++) { + // SYNTAX: mm_iqrange(X [, w, def, fw, wd]) + tmp_data = panelsubmatrix(data, i, F.info) + tmp_data = select(tmp_data, tmp_data :< .) + if (rows(tmp_data) == 1) results[i] = 0 + if (rows(tmp_data) <= 1) continue + results[i] = mm_iqrange(tmp_data, 1, 2) + } + } + else { + has_fweight = wtype == "fweight" + for (i = 1; i <= F.num_levels; i++) { + tmp_data = panelsubmatrix(data, i, F.info) + tmp_weights = panelsubmatrix(weights, i, F.info) + tmp_weights = select(tmp_weights, tmp_data :< .) + tmp_data = select(tmp_data, tmp_data :< .) + if (rows(tmp_data) == 1) results[i] = 0 + if (rows(tmp_data) <= 1) continue + results[i] = mm_iqrange(tmp_data, tmp_weights, 2, has_fweight) + } + } + + return(results) +} + + +`Vector' aggregate_sd(`Factor' F, `Vector' data, `Vector' weights, `String' wtype) +{ + `Integer' i + `Vector' results, adjustment, tmp_weights + if (wtype == "pweight") { + _error("sd not allowed with pweights") + } + results = J(F.num_levels, 1, .) + + if (wtype == "") { + for (i = 1; i <= F.num_levels; i++) { + results[i] = sqrt(quadvariance(panelsubmatrix(data, i, F.info))) + } + } + else { + printf("{err}warning: option sd has not been properly tested with weights!!!!") + for (i = 1; i <= F.num_levels; i++) { + tmp_weights = panelsubmatrix(weights, i, F.info) + tmp_weights = tmp_weights :/ quadsum(tmp_weights) * 1000000000 // why? bugbug + results[i] = sqrt(quadvariance(panelsubmatrix(data, i, F.info), tmp_weights)) + } + adjustment = aggregate_count(F, data, 1, "") + adjustment = sqrt(adjustment :/ (adjustment :- 1)) + results = results :* adjustment + } + return(results) +} +end diff --git a/110/replication_package/replication/ado/plus/f/fcollapse_main.mata b/110/replication_package/replication/ado/plus/f/fcollapse_main.mata new file mode 100644 index 0000000000000000000000000000000000000000..859b2bde02257d198680622103a052ab0cca6a9c --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fcollapse_main.mata @@ -0,0 +1,323 @@ +// FCOLLAPSE - Main routine +mata: +mata set matastrict on + +void f_collapse(`Factor' F, + `Dict' fun_dict, + `Dict' query, + `String' vars, + `Boolean' merge, + `Boolean' append, + `Integer' pool, + | `Varname' wvar, + `String' wtype, + `Boolean' compress) +{ + `Integer' num_vars, num_targets, num_obs, niceness + `Integer' i, i_next, j, i_cstore, j_cstore, i_target + `Real' q + `StringRowVector' var_formats, var_types + `StringRowVector' targets, target_labels, target_types, target_formats + `RowVector' var_is_str, target_is_str + `String' var + `Vector' weights + `Dict' data_cstore, results_cstore + `Dict' var_positions // varname -> (column, start) + `RowVector' var_pos + `Vector' box + `StringMatrix' target_stat_raw + `String' target, stat + `DataCol' data + `Boolean' raw + `Boolean' nofill + `Vector' idx // used by APPEND to index the new obs. + pointer(`DataCol') scalar fp + + if (args() < 6) wvar = "" + if (args() < 7) wtype = "" + + assert(anyof(("", "aweight", "iweight", "fweight", "pweight"), wtype)) + + + // Variable information + vars = tokens(vars) + assert(cols(vars) == cols(asarray_keys(query)')) + num_vars = length(vars) + var_formats = var_types = J(1, num_vars, "") + var_is_str = J(1, num_vars, .) + num_targets = 0 + for (i = 1; i <= num_vars; i++) { + var = vars[i] + var_formats[i] = st_varformat(var) + var_types[i] = st_vartype(var) + var_is_str[i] = st_isstrvar(var) + num_targets = num_targets + rows(asarray(query, var)) + } + + // Compute permutation vector so we can sort the data + F.panelsetup() + if (!merge) { + F.levels = . // save memory + } + + // Weights + if (wvar != "") { + weights = F.sort(st_data(., wvar, F.touse)) + } + else { + weights = 1 + } + + // Load variables + niceness = st_numscalar("c(niceness)") // requires stata 13+ + if (length(niceness) == 0) niceness = . + stata("cap set niceness 10") // requires stata 13+ + data_cstore = asarray_create("real", 1) + var_positions = asarray_create("string", 1) + num_obs = F.num_obs + if (!merge & !append) assert(num_obs == st_nobs()) + + // i, i_next, j -> index variables + // i_cstore -> index vectors in the cstore + i_next = . // to avoid warning + + for (i = i_cstore = 1; i <= num_vars; i = i_next + 1) { + i_next = min((i + pool - 1, num_vars)) + + // Can't load strings and numbers together + for (j = i; j <= i_next; j++) { + if (var_is_str[j] != var_is_str[i]) { + i_next = j - 1 + break + } + } + + // Load data + if (var_is_str[i]) { + asarray(data_cstore, i_cstore, st_sdata(., vars[i..i_next], F.touse)) + } + else { + asarray(data_cstore, i_cstore, st_data(., vars[i..i_next], F.touse)) + } + + // Keep pending vars + if (!merge & !append) { + if (i_next == num_vars) { + stata("clear") + } + else { + st_keepvar(vars[i_next+1..num_vars]) + } + } + + // Store collated and vectorized data + // cstore[i_cstore] = vec(sort(cstore[i_cstore])) + asarray(data_cstore, i_cstore, + vec(F.sort(asarray(data_cstore, i_cstore)))) + + // Store the position of each variable in the cstore + for (j = i; j <= i_next; j++) { + var = vars[j] + j_cstore = 1 + (j - i) * num_obs + var_pos = (i_cstore, j_cstore) + asarray(var_positions, var, var_pos) + } + i_cstore++ + } + + results_cstore = asarray_create("string", 1) + targets = target_labels = target_types = target_formats = J(1, num_targets, "") + target_is_str = J(1, num_targets, .) + + // Apply aggregations + for (i = i_target = 1; i <= num_vars; i++) { + var = vars[i] + target_stat_raw = asarray(query, var) + var_pos = asarray(var_positions, var) + + for (j = 1; j <= rows(target_stat_raw); j++) { + + i_cstore = var_pos[1] + j_cstore = var_pos[2] + box = j_cstore \ j_cstore + num_obs - 1 + data = asarray(data_cstore, i_cstore)[|box|] + + target = target_stat_raw[j, 1] + stat = target_stat_raw[j, 2] + raw = strtoreal(target_stat_raw[j, 3]) + fp = asarray(fun_dict, stat) + targets[i_target] = target + target_labels[i_target] = sprintf("(%s) %s", stat, var) + target_types[i_target] = infer_type(var_types[i], var_is_str[i], stat, data) + target_formats[i_target] = stat=="count" ? "%8.0g" : var_formats[i] + target_is_str[i_target] = var_is_str[i] + + if (stat == "median") { + stat = "p50" + } + if (regexm(stat, "^p[0-9]+$")) { + q = strtoreal(substr(stat, 2, .)) / 100 + fp = asarray(fun_dict, "quantile") + asarray(results_cstore, target, (*fp)(F, data, weights, raw ? "" : wtype, q)) + } + else { + asarray(results_cstore, target, (*fp)(F, data, weights, raw ? "" : wtype)) + } + ++i_target + } + // Clear vector if done with it + if (box[2] == rows(asarray(data_cstore, i_cstore))) { + asarray(data_cstore, i_cstore, .) + } + } + + if (append) { + // 1) Add obs + idx = ( st_nobs()) + 1 :: (st_nobs() + F.num_levels ) + st_addobs(F.num_levels) + // 2) Fill out -by- variables + if (substr(F.vartypes[1], 1, 3) == "str") { + st_sstore(idx, F.varlist, F.keys) + } + else { + st_store(idx, F.varlist, F.keys) + } + + // Add data to bottom rows, adding variables or recasting if necessary + for (i = 1; i <= length(targets); i++) { + target = targets[i] + data = asarray(results_cstore, target) + + if (target_is_str[i]) { + if (missing(_st_varindex(target))) { + (void) st_addvar(target_types[i], target) + } + st_sstore(idx, target, data) + } + else { + if (compress) { + target_types[i] = compress_type(target_types[i], data) + } + + if (missing(_st_varindex(target))) { + (void) st_addvar(target_types[i], target) + } + else if (st_vartype(target) != target_types[i]) { + // Note that the recast attempt might fail if we ran this command with -if- + // This is b/c observations not loaded into Mata might be outside the valid range + stata(sprintf("qui recast %s %s", target_types[i], target)) + } + + // (sp. tricky with -merge-, but not so much otherwise, as touse will be always 1) + st_store(idx, target, data) + } + asarray(results_cstore, target, .) + } + } // APPEND CASE + else { + + // Store results + if (!merge) { + F.store_keys(1) // sort=1 will 'sort' by keys (faster now than later) + assert(F.touse == "") + } + + nofill = (merge == 0) + + for (i = 1; i <= length(targets); i++) { + target = targets[i] + data = asarray(results_cstore, target) + if (merge) { + data = rows(data) == 1 ? data[F.levels, .] : data[F.levels] + } + + if (target_is_str[i]) { + st_sstore(., st_addvar(target_types[i], target, nofill), F.touse, data) + } + else { + if (compress) { + target_types[i] = compress_type(target_types[i], data) + } + + // note: we can't do -nofill- with addvar because that sets the values to 0 instead of missing + // (sp. tricky with -merge-, but not so much otherwise, as touse will be always 1) + st_store(., st_addvar(target_types[i], target, nofill), F.touse, data) + } + asarray(results_cstore, target, .) + } + + // Label and format vars + for (i = 1; i <= cols(targets); i++) { + st_varlabel(targets[i], target_labels[i]) + st_varformat(targets[i], target_formats[i]) + } + + } // NOT APPEND + + stata(sprintf("cap set niceness %s", strofreal(niceness))) +} + + +// Try to pick a more compact type after the data has been created +`String' compress_type(`String' target_type, + `DataCol' data) +{ + `RowVector' _ + `Integer' min, max + + // We can't improve on byte + if (target_type == "byte") { + return(target_type) + } + + // We shouldn't lose accuracy + if (any( target_type :== ("float", "double") )) { + if (trunc(data) != data) { + return(target_type) + } + } + + _ = minmax(data) + min = _[1] + max = _[2] + + if (-127 <= min & max <= 100) { + return("byte") + } + else if (-32767 <= min & max <= 32740) { + return("int") + } + else if (-2147483647 <= min & max <= 2147483620) { + return("long") + } + else { + return(target_type) + } +} + + +// Infer type required for new variables after collapse +`String' infer_type(`String' var_type, + `Boolean' var_is_str, + `String' stat, + `DataCol' data) +{ + `String' ans + `StringRowVector' fixed_stats + + fixed_stats = ("min", "max", "first", "last", "firstnm", "lastnm") + + if ( var_is_str | any(fixed_stats :== stat) ) { + ans = var_type + } + else if (stat == "count") { + ans = "long" + } + else { + ans = "double" + } + + return(ans) +} + +end diff --git a/110/replication_package/replication/ado/plus/f/fegen.ado b/110/replication_package/replication/ado/plus/f/fegen.ado new file mode 100644 index 0000000000000000000000000000000000000000..bfb8a8a00b48f99d90f3d48160939921a8bf6802 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fegen.ado @@ -0,0 +1,76 @@ +*! version 2.9.0 28mar2017 +* This is just a modified version of Statacorp's -egen- +program define fegen, byable(onecall) // sortpreserve +version 10 + Parse `0' + local byopt "by(`_byvars')" + cap noi `filename' `if' `in', type(`type') name(`name') args(`args') `byopt' `options' + if (_rc) { + cap drop `name' + exit _rc + } + qui count if missing(`name') + if (`r(N)') { + local val = plural(r(N), "value") + di as text "(" r(N) " missing `val' generated)" + } + + * MINIMUM SYNTAX OF A "fegen_ABC" FILE: + * syntax [if] [in], type(string) name(string) args(string) [by(varlist)] +end + +program define Parse + * SYNTAX: fegen [type] newvar = fcn(args) [if] [in] [, options] + + * [type] newvar + gettoken type 0 : 0, parse(" =(") + gettoken name 0 : 0, parse(" =(") + if `"`name'"'=="=" { + local name `"`type'"' + local type : set type + } + else { + gettoken eqsign 0 : 0, parse(" =(") + if `"`eqsign'"' != "=" { + error 198 + } + } + confirm new variable `name' + + * fcn(args) + gettoken fcn 0 : 0, parse(" =(") + gettoken args 0 : 0, parse(" ,") match(par) + local filename fegen_`fcn' + capture qui findfile `filename'.ado + if (`"`r(fn)'"' == "") { + di as error "unknown ado file `fcn'()" + exit 133 + } + if `"`par'"' != "(" { + exit 198 + } + if `"`args'"' == "_all" | `"`args'"' == "*" { + unab args : _all + local args : list args - _sortindex + } + + * [if] [in] [, options] + syntax [if] [in] [, *] + + * Return results + local params type name fcn filename args in if options + foreach x of local params { + * di as text "[`x'] = [" as result "``x''" as text "]" + c_local `x' "``x''" + } +end + + +/* + Note, the utility routine should not present unnecessary messages + and must return nonzero if there is a problem. The new variable + can be left created or not; it will be automatically dropped. + If the return code is zero, a count of missing values is automatically + presented. +*/ + diff --git a/110/replication_package/replication/ado/plus/f/fegen.sthlp b/110/replication_package/replication/ado/plus/f/fegen.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..315876307bcf815ac26e100d6ed34803cce3ea6a --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fegen.sthlp @@ -0,0 +1,80 @@ +{smcl} +{* *! version 1.9.0 10jan2017}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "[R] egen" "help egen"}{...} +{vieweralsosee "" "--"}{...} +{vieweralsosee "egenmore" "help egenmore"}{...} +{viewerjumpto "Syntax" "sort##syntax"}{...} +{title:Title} + +{p2colset 5 13 20 2}{...} +{p2col :{cmd:fegen} {hline 2}}Alternative to egen that optimizes speed{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{p 8 16 2} +{cmd:fegen} +{dtype} +{newvar} +{cmd:=} +{it:function}{cmd:(}{it:arguments}{cmd:)} +{ifin} +[{cmd:,} {it:options}] + +{title:Included functions} + +{synoptset 32 tabbed}{...} +{synopt:{opth max(exp)}}{p_end} +{synopt:{opth group(varlist)}}note: {varlist} cannot have both string and numeric variables{p_end} +{p2colreset}{...} + +{marker group}{...} +{title:Options for {cmd:group()}} + +{synoptset 22 tabbed}{...} +{synopthdr} +{synoptline} +{synopt : {opt missing}}treat missing values in varlist (either . or "") as any other value when assigning groups, +instead of being assigned to the group missing{p_end} +{synopt : {opt method(str)}}the underlying method used; either {cmd:mata} +(default) or {cmd:stata} (often faster if the inputs are string or nonintegers){p_end} +{synopt : {opt nosort}}speeds up the {cmd:mata} method, but the new IDs will not respect the sort order of the inputs{p_end} +{synopt : {opt ratio(#)}}the same as the {it:hash_ratio} option from {help ftools}{p_end} +{synopt : {opt verbose}}see some internal details{p_end} + + + +{title:How to add a new function} + +First, create a file named _gf{it:NAME}.ado where {it:NAME} is the name of the function. +Then, inside the file, use the following scaffolding: + +{tab}{input}{bf}program define _gf{sf:{it:NAME}} +{tab}syntax [if] [in], type(string) name(string) args(string) [by(varlist)] {it:...} +{tab}{tab}tempvar touse +{tab}{tab}qui { +{tab}{tab}{tab}{it:...} +{tab}{tab}{tab}gen `type' `name' = {it:...} if `touse'==1 +{tab}{tab}{tab}{it:...} +{tab}{tab}} +{tab}end{text}{sf} + + +{marker author}{...} +{title:Author} + +{pstd}Sergio Correia{break} +Board of Governors of the Federal Reserve System, USA{break} +{browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com}{break} +{p_end} + + +{marker project}{...} +{title:More Information} + +{pstd}{break} +To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break} +{browse "https://github.com/sergiocorreia/ftools"}{break} +{p_end} diff --git a/110/replication_package/replication/ado/plus/f/fegen_group.ado b/110/replication_package/replication/ado/plus/f/fegen_group.ado new file mode 100644 index 0000000000000000000000000000000000000000..221e29cc0c576a05b76a55af5917201c7462faf9 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fegen_group.ado @@ -0,0 +1,129 @@ +*! version 2.23.2 10nov2017 +program define fegen_group + syntax [if] [in] , [by(varlist) type(string)] /// -by- is ignored + name(string) args(string) /// + [Missing Label LName(name) Truncate(numlist max=1 int >= 1) /// + Ratio(string) Verbose METhod(string) noSORT] + + * TODO: support label lname truncate + + loc verbose = ("`verbose'" != "") + loc sort = ("`sort'" != "nosort") + _assert inlist("`method'", "", "stata", "mata", "hash0", "hash1", "gtools") + _assert ("`by'" == ""), msg("by() not supported") + if ("`ratio'"=="") loc ratio . + + local 0 `args' `if' `in' + syntax varlist [if] [in] + + loc is_sorted = ("`: sortedby'" == "`varlist'") | (strpos("`: sortedby'", "`varlist' ")==1) + * Note: we need the space after `varlist' to prevent "id10" being matched with "id1" + + if ("`missing'" == "" & !`is_sorted') { + marksample touse, strok + } + else if ("`if'`in'" != "") { + marksample touse, strok novarlist + } + else if (`is_sorted' & inlist("`method'", "", "stata")) { + * Shortcut if already sorted + loc method stata_sorted + } + + * Choose method if not provided + if ("`method'" == "") { + loc usemata = (c(N) > 5e5) | (c(k) * c(N) > 5e6) | ("`touse'" != "") + loc method = cond(`usemata', "mata", "stata") + } + + // ---------------- + + * If varlist mixes strings and integers, use alternative strategy + loc n1 0 + loc n2 0 + + foreach var of local varlist { + loc type : type `var' + if (substr("`type'", 1, 3) == "str") { + loc ++n1 + } + else { + loc ++n2 + } + } + + // ---------------- + + loc problem = (`n1' > 0) & (`n2' > 0) + if (`problem') { + loc method stata + } + + // ---------------- + + if ("`method'" == "stata") { + Group_FirstPrinciples `varlist' , id(`name') /// + touse(`touse') verbose(`verbose') + } + else if ("`method'" == "stata_sorted") { + Group_FirstPrinciplesSorted `varlist' , id(`name') /// + missing("`missing'") verbose(`verbose') + } + else { + cap noi { + mata: F = factor("`varlist'", "`touse'", `verbose', "`method'", `sort', 0, `ratio', 0) + mata: F.store_levels("`name'") + } + loc rc = c(rc) + cap mata: mata drop F + error `rc' + } + la var `name' "group(`varlist')" +end + + +program define Group_FirstPrinciples, sortpreserve + syntax varlist, id(name) [touse(string) Verbose(integer 0)] + if (`verbose') { + di as smcl "{txt}(method: {res}stata{txt})" + } + + if ("`touse'" == "") { + bys `varlist': gen long `id' = (_n == 1) + qui replace `id' = sum(`id') + } + else { + qui bys `touse' `varlist': gen long `id' = (_n == 1) if `touse' + qui replace `id' = sum(`id') + qui replace `id' = . if (`touse' != 1) + } + qui compress `id' +end + + +program define Group_FirstPrinciplesSorted + syntax varlist, id(name) [missing(string) Verbose(integer 0)] + if (`verbose') { + di as smcl "{txt}(method: {res}stata_sorted{txt})" + } + + if ("`missing'" == "") { + by `varlist': gen long `id' = (_n == 1) + qui replace `id' = sum(`id') + } + else { + mata: st_local("exp", invtokens("mi(" :+ tokens("`varlist'") :+ ")", " | ")) + tempvar hasmv + gen byte `hasmv' = `exp' + + qui bys `touse' `varlist': gen long `id' = (_n == 1) if !`hasmv' + qui replace `id' = sum(`id') + qui replace `id' = . if `hasmv' + } + qui compress `id' +end + + +findfile "ftools.mata" +include "`r(fn)'" +exit diff --git a/110/replication_package/replication/ado/plus/f/fisid.ado b/110/replication_package/replication/ado/plus/f/fisid.ado new file mode 100644 index 0000000000000000000000000000000000000000..b4941cec63957b0b4a5c6830b7388620b5c10c72 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fisid.ado @@ -0,0 +1,52 @@ +*! version 2.9.0 28mar2017 +program define fisid + syntax varlist [if] [in], [Missok Show] + loc show = ("`show'" != "") + loc missok = ("`missok'" != "") + + marksample touse, novar + + if (!`missok') { + qui cou if `touse' + loc N = r(N) + markout `touse' `varlist', strok + qui cou if `touse' + if r(N) < `N' { + local n : word count `varlist' + local var = cond(`n'==1, "variable", "variables") + di as err "`var' `varlist' should never be missing" + exit 459 + } + } + + mata: fisid("`varlist'", "`touse'", `missok') + + if (!`ok') { + loc n : word count `varlist' + loc var = cond(`n'==1, "variable", "variables") + loc does = cond(`n'==1, "does", "do") + loc msg `var' `varlist' `does' not /// + uniquely identify the observations + di as err "`msg'" + exit 459 + } +end + +mata: +void fisid(string rowvector varnames, + | string scalar touse, + real scalar show) +{ + class Factor scalar F + real scalar ok + + F = factor(varnames, touse, 0, "", 0, 1, ., 0) + ok = F.is_id() + st_local("ok", strofreal(ok)) +} +end + + +findfile "ftools.mata" +include "`r(fn)'" +exit diff --git a/110/replication_package/replication/ado/plus/f/fisid.sthlp b/110/replication_package/replication/ado/plus/f/fisid.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..fc53e1f69e2c78c9dbbc05b56aebd6c8262054a9 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fisid.sthlp @@ -0,0 +1,45 @@ +{smcl} +{* *! version 1.9.0 10jan2017}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "[R] isid" "help isid"}{...} +{viewerjumpto "Syntax" "fisid##syntax"}{...} +{title:Title} + +{p2colset 5 14 20 2}{...} +{p2col :{cmd:fisid} {hline 2}}Check for unique identifiers{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{p 8 13 2} +{cmd:fisid} +{varlist} +{ifin} +[{cmd:,} +{opt m:issok}] + +{marker description}{...} +{title:Description} + +{pstd} +{opt fisid} is an alternative to {help isid} +(which checks whether {it:varlist} uniquely identifies the observations.) + + +{marker author}{...} +{title:Author} + +{pstd}Sergio Correia{break} +Board of Governors of the Federal Reserve System, USA{break} +{browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com}{break} +{p_end} + + +{marker project}{...} +{title:More Information} + +{pstd}{break} +To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break} +{browse "https://github.com/sergiocorreia/ftools"}{break} +{p_end} diff --git a/110/replication_package/replication/ado/plus/f/flevelsof.ado b/110/replication_package/replication/ado/plus/f/flevelsof.ado new file mode 100644 index 0000000000000000000000000000000000000000..a601c4c54595963b343bbd34534b899237c10b52 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/flevelsof.ado @@ -0,0 +1,131 @@ +*! version 2.20.0 10oct2017 +program define flevelsof, rclass + syntax varname [if] [in] , [ /// + /// 1) Options inherited from levelsof + Clean /// returns instead of <`"IBM"' `"Doe Run"'> + LOCal(str) /// also stores result in a given local + MIssing /// include missing values of varname + Separate(str) /// token separator (default is space) + /// 2) Options specific to ftools + FORCEmata /// + Verbose /// + METHOD(string) /// + ] + + * Use -levelsof- for small datasets + if (c(N)<1e6) & ("`forcemata'"=="") { + // TODO: replace this with a call to -tab `varlist',nol m- but only for numeric values + levelsof `varlist' `if' `in', separate(`separate') `missing' local(vals) `clean' + } + else { + _assert (c(N)), msg("no observations") rc(2000) + + * Only create `touse' if needed + if (`"`if'`in'"' != "") { + marksample touse, novarlist + // faster alternative to -count if `touse'- + if (`touse'[1]==0) { + timer on 11 + su `touse', mean + timer off 11 + if (!`r(max)') { + di as txt "(no observations)" + return local levels = "" + return scalar num_levels = 0 + if ("`local'" != "") c_local `local' `"`vals'"' + exit + } + } + } + + /* + if (`"`if'`in'"' != "" | "`missing'" == "") { + if ("`missing'" != "") loc novarlist "novarlist" + timer on 10 + marksample touse, strok `novarlist' + timer off 10 + // faster alternative to -count if `touse'- + if (`touse'[1]==0) { + timer on 11 + su touse, mean + timer off 11 + if (`r(max)') { + di as txt "(no observations)" + exit + } + } + } + */ + + loc clean = ("`clean'"!="") + loc verbose = ("`verbose'" != "") + loc keepmissing = ("`missing'" != "") + if ("`separate'" == "") loc separate " " + loc isnum = strpos("`: type `varlist''", "str")==0 + + mata: flevelsof("`varlist'", "`touse'", `verbose', "`method'", /// + `keepmissing', `isnum', `clean', "`separate'", /// + `c(max_macrolen)') + return add + di as txt `"`vals'"' + return local levels `"`vals'"' + } + + if ("`local'" != "") { + c_local `local' `"`vals'"' + } +end + +findfile "ftools.mata" +include "`r(fn)'" + +mata: +mata set matastrict on + +void flevelsof(`String' varlist, + `String' touse, + `Boolean' verbose, + `String' method, + `Boolean' keepmissing, + `Boolean' isnum, + `Boolean' clean, + `String' sep, + `Integer' maxlen) +{ + `Factor' F + `DataRow' keys + `String' ans + + F = factor(varlist, touse, verbose, method, 1, 0, ., 1) + keys = keepmissing ? F.keys' : filter_missing(F.keys)' + st_numscalar("r(num_levels)", cols(keys)) + + if (!cols(keys)) exit() + + if (isnum) { + keys = strofreal(keys, "%40.10g") + } + else if (!clean) { + keys = (char(96) + char(34)) :+ keys :+ (char(34) + char(39)) + } + + ans = invtokens(keys, sep) + if (strlen(ans)>maxlen) { + printf("{err}macro length exceeded\n") + exit(1000) + } + st_local("vals", ans) +} + + +`DataFrame' filter_missing(`DataCol' x) +{ + `Vector' v + assert(cols(x)==1) + v = eltype(x)=="string" ? (x :!= missingof(x)) : rownonmissing(x) + return(select(x, v)) +} + +end + +exit diff --git a/110/replication_package/replication/ado/plus/f/flevelsof.sthlp b/110/replication_package/replication/ado/plus/f/flevelsof.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..5453caa1641bd5b3809e3b0a644b522596839a7a --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/flevelsof.sthlp @@ -0,0 +1,151 @@ +{smcl} +{* *! version 2.11.0 08jun2017}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "[P] levelsof" "mansection P levelsof"}{...} +{viewerjumpto "Syntax" "flevelsof##syntax"}{...} +{viewerjumpto "Description" "flevelsof##description"}{...} +{viewerjumpto "Options" "flevelsof##options"}{...} +{viewerjumpto "Remarks" "flevelsof##remarks"}{...} +{viewerjumpto "Examples" "flevelsof##examples"}{...} +{viewerjumpto "Stored results" "flevelsof##results"}{...} +{viewerjumpto "References" "flevelsof##references"}{...} +{title:Title} + +{p2colset 5 21 23 2}{...} +{p2col :{cmd:flevelsof} {hline 2}}Levels of variable{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{p 8 17 2} +{cmd:flevelsof} +{varname} +{ifin} +[{cmd:,} {it:options}] + +{synoptset 21}{...} +{synopthdr} +{synoptline} +{synopt:{opt c:lean}}display string values without compound double quotes{p_end} +{synopt:{opt l:ocal(macname)}}insert the list of values in the local macro {it:macname}{p_end} +{synopt:{opt mi:ssing}}include missing values of {varname} in calculation{p_end} +{synopt:{opt s:eparate(separator)}}separator to serve as punctuation for the values of returned list; default is a space{p_end} +{synopt:{opt force:mata}}prevents calling {help levelsof} for datasets with less than 1mm obs.{p_end} +{synopt:{opt v:erbose}}display debugging information{p_end} +{synoptline} +{p2colreset}{...} + + +{marker description}{...} +{title:Description} + +{pstd} +{cmd:flevelsof} displays a sorted list of the distinct values of {varname}. + + +{marker options}{...} +{title:Options} + +{phang} +{cmd:clean} displays string values without compound double quotes. +By default, each distinct string value is displayed within compound double +quotes, as these are the most general delimiters. If you know that the +string values in {varname} do not include embedded spaces or embedded +quotes, this is an appropriate option. {cmd:clean} +does not affect the display of values from numeric variables. + +{phang} +{cmdab:loc:al(}{it:macname}{cmd:)} inserts the list of values in +local macro {it:macname} within the calling program's space. Hence, +that macro will be accessible after {cmd:flevelsof} has finished. +This is helpful for subsequent use, especially with {helpb foreach}. + +{phang} +{cmdab:mi:ssing} specifies that missing values of {varname} +should be included in the calculation. The default is to exclude them. + +{phang} +{cmdab:s:eparate(}{it:separator}{cmd:)} specifies a separator +to serve as punctuation for the values of the returned list. +The default is a space. A useful alternative is a comma. + + +{marker remarks}{...} +{title:Remarks} + +{pstd} +{cmd:flevelsof} serves two different functions. First, it gives a +compact display of the distinct values of {it:varname}. More commonly, it is +useful when you desire to cycle through the distinct values of +{it:varname} with (say) {cmd:foreach}; see {helpb foreach:[P] foreach}. +{cmd:flevelsof} leaves behind a list in {cmd:r(levels)} that may be used in a +subsequent command. + +{pstd} +{cmd:flevelsof} may hit the {help limits} imposed by your Stata. However, +it is typically used when the number of distinct values of +{it:varname} is modest. + +{pstd} +The terminology of levels of a factor has long been standard in +experimental design. See +{help flevelsof##CC1957:Cochran and Cox (1957, 148)}, +{help flevelsof##F1942:Fisher (1942)}, or +{help flevelsof##Y1937:Yates (1937, 5)}. + + +{marker examples}{...} +{title:Examples} + +{phang}{cmd:. sysuse auto} + +{phang}{cmd:. flevelsof rep78}{p_end} +{phang}{cmd:. display "`r(levels)'"} + +{phang}{cmd:. flevelsof rep78, miss local(mylevs)}{p_end} +{phang}{cmd:. display "`mylevs'"} + +{phang}{cmd:. flevelsof rep78, sep(,)}{p_end} +{phang}{cmd:. display "`r(levels)'"} + +{pstd}Showing value labels when defined:{p_end} +{pstd}{cmd:. flevelsof factor, local(levels)}{break} +{cmd:. foreach l of local levels {c -(}}{break} +{cmd:.{space 8}di "-> factor = `: label (factor) `l''"}{break} +{cmd:.}{space 8}{it:whatever}{cmd: if factor == `l'}{break} +{cmd:. {c )-}} + + +{marker results}{...} +{title:Stored results} + +{pstd} +{cmd:flevelsof} stores the following in {cmd:r()}: + +{synoptset 15 tabbed}{...} +{p2col 5 15 19 2: Macros}{p_end} +{synopt:{cmd:r(levels)}}list of distinct values{p_end} +{p2colreset}{...} + + +{marker references}{...} +{title:References} + +{marker CC1957}{...} +{phang} +Cochran, W. G., and G. M. Cox. 1957. {it:Experimental Designs}. 2nd ed. +New York: Wiley. + +{marker F1942}{...} +{phang} +Fisher, R. A. 1942. The theory of confounding in factorial experiments in +relation to the theory of groups. +{it:Annals of Eugenics} 11: 341-353. + +{marker Y1937}{...} +{phang} +Yates, F. 1937. {it:The Design and Analysis of Factorial Experiments}. +Harpenden, England: Technical Communication 35, Imperial Bureau of +Soil Science. +{p_end} diff --git a/110/replication_package/replication/ado/plus/f/fmerge.ado b/110/replication_package/replication/ado/plus/f/fmerge.ado new file mode 100644 index 0000000000000000000000000000000000000000..4346a0b6b3fc4333db21ca7221033a5b53fc2917 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fmerge.ado @@ -0,0 +1,60 @@ +*! version 2.10.0 3apr2017 +* wrapper for join.ado, parsing code based on merge.ado + +program define fmerge + gettoken mtype 0 : 0, parse(" ,") + + _assert inlist("`mtype'", "1:1", "1:m", "m:1", "m:m"), msg("invalid merge type: `mtype'") + _assert "`mtype'"!="m:m", msg("have you read the disclaimer about m:m merges? (this merge type is dangerous and not supported)") + _assert "`mtype'"!="1:m", msg("1:m merges not supported, use join with the into() option") + + gettoken token : 0, parse(" ,") + _assert ("`token'"!="_n"), msg("_n not supported as a merge key") + + syntax [varlist(default=none)] using/ [, /// + ASSERT(string) /// + GENerate(passthru) /// + FORCE /// + KEEP(string) /// + KEEPUSing(string) /// + noLabel /// + NOGENerate /// + noNOTEs /// + REPLACE /// + DEBUG /// + noREPort /// + SORTED /// + UPDATE /// + Verbose /// + ] + + if ("`debug'" != "") di as error "warning: -debug- option does nothing" + if ("`force'" != "") di as error "warning: -force- option does nothing" + if ("`sorted'" != "") di as error "warning: -sorted- option does nothing" + _assert ("`replace'" == ""), msg("-replace- option not allowed") + _assert ("`update'" == ""), msg("-update- option not allowed") + + loc uniquemaster = cond("`mtype'" == "1:1", "uniquemaster", "") + + loc check = "`keepusing'"!="" + loc keepusing : list keepusing - varlist + if ("`keepusing'"=="" & `check') { + loc keepnone keepnone // don't keep any variable from using + } + + loc cmd join `keepusing', /// + from(`"`using'"') /// + by(`varlist') /// + keep(`keep') /// + assert(`assert') /// + `keepnone' /// + `generate' /// + `nogenerate' /// + `uniquemaster' /// + `label' /// + `notes' /// + `report' + + if ("`verbose'" != "") di as text `"{bf:[cmd]} {res}`cmd'{txt}"' + `cmd' +end diff --git a/110/replication_package/replication/ado/plus/f/fmerge.sthlp b/110/replication_package/replication/ado/plus/f/fmerge.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..44a0d49eab92cbe37a8ff12c692cb9920b64ac88 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fmerge.sthlp @@ -0,0 +1,106 @@ +{smcl} +{* *! version 2.10.0 3apr2017}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "join" "help join"}{...} +{vieweralsosee "[R] merge" "help merge"}{...} +{viewerjumpto "Syntax" "fmerge##syntax"}{...} +{title:Title} + +{p2colset 5 15 20 2}{...} +{p2col :{cmd:fmerge} {hline 2}}Merge datasets{p_end} +{p2colreset}{...} + +{pstd} +{cmd:fmerge} is a wrapper for {help join}, +supporting {it:m:1} and {it:1:1} joins. + +{pstd} +The syntax is identical to {help merge}, except for the extra option +{cmd:verbose}, that will show debug information and the underlying {cmd:join} +command. +{p_end} + +{marker syntax}{...} +{title:Syntax} + +{pstd} +One-to-one merge on specified key variables + +{p 8 15 2} +{opt fmer:ge} {cmd:1:1} {varlist} +{cmd:using} {it:{help filename}} [{cmd:,} {it:options}] + + +{pstd} +Many-to-one merge on specified key variables + +{p 8 15 2} +{opt fmer:ge} {cmd:m:1} {varlist} +{cmd:using} {it:{help filename}} [{cmd:,} {it:options}] + + +{synoptset 20 tabbed}{...} +{synopthdr} +{synoptline} +{syntab :Options} +{synopt :{opth keepus:ing(varlist)}}variables to keep from using data; + default is all +{p_end} +{...} +{synopt :{opth gen:erate(newvar)}}name of new variable to mark merge + results; default is {cmd:_merge} +{p_end} +{...} +{synopt :{opt nogen:erate}}do not create {cmd:_merge} variable +{p_end} +{...} +{synopt :{opt nol:abel}}do not copy value-label definitions from using{p_end} +{...} +{synopt :{opt nonote:s}}do not copy notes from using{p_end} +{...} +{synopt :{opt update}}update missing values of same-named variables in master + with values from using {it:(not allowed)} +{p_end} +{...} +{synopt :{opt replace}}replace all values of same-named variables in master + with nonmissing values from using (requires {cmd:update}) {it:(not allowed)} +{p_end} +{...} +{synopt :{opt norep:ort}}do not display match result summary table +{p_end} +{synopt :{opt force}}allow string/numeric variable type mismatch without error {it:(ignored)} +{p_end} +{synopt :{opt verbose}}show debug information and the {cmd:join} command used {it:(new)} +{p_end} + +{syntab: Results} +{synopt :{cmd:assert(}{help merge##results:{it:results}}{cmd:)}}specify required match results +{p_end} +{...} +{synopt :{cmd:keep(}{help merge##results:{it:results}}{cmd:)}}specify which match results to keep +{p_end} +{...} + +{synopt :{opt sorted}}do not sort; datasets already sorted {it:(ignored)} +{p_end} +{...} +{synoptline} +{p2colreset}{...} +{p 4 6 2} + + +{marker about}{...} +{title:Author} + +{pstd}Sergio Correia{break} +Board of Governors of the Federal Reserve System, USA{break} +{browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com}{break} +{p_end} + + +{title:More Information} + +{pstd}{break} +To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break} +{browse "https://github.com/sergiocorreia/ftools"}{break} +{p_end} diff --git a/110/replication_package/replication/ado/plus/f/fsort.ado b/110/replication_package/replication/ado/plus/f/fsort.ado new file mode 100644 index 0000000000000000000000000000000000000000..1e62646b6c9239c60bc46cda1dee376773f92f68 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fsort.ado @@ -0,0 +1,61 @@ +*! version 2.9.0 28mar2017 + +* Possible improvements: allow in, if, reverse sort (like gsort) +* This uses Andrew Maurer's trick to clear the sort order: +* http://www.statalist.org/forums/forum/general-stata-discussion/mata/172131-big-data-recalling-previous-sort-orders + + +program define fsort + syntax varlist, [Verbose] + + loc sortvar : sortedby + + if ("`sortvar'" == "`varlist'") { + exit + } + else if ("`sortvar'" != "") { + * Andrew Maurer's trick to clear `: sortedby' + loc sortvar : word 1 of `sortvar' + loc sortvar_type : type `sortvar' + loc sortvar_is_str = strpos("`sortvar_type'", "str") == 1 + loc val = `sortvar'[1] + + if (`sortvar_is_str') { + qui replace `sortvar' = cond(mi(`"`val'"'), ".", "") in 1 + qui replace `sortvar' = `"`val'"' in 1 + } + else { + qui replace `sortvar' = cond(mi(`val'), 0, .) in 1 + qui replace `sortvar' = `val' in 1 + } + assert "`: sortedby'" == "" + } + + fsort_inner `varlist', `verbose' + sort `varlist' // dataset already sorted by `varlist' but flag `: sortedby' not set +end + + +program define fsort_inner, sortpreserve + syntax varlist, [Verbose] + loc verbose = ("`verbose'" != "") + mata: fsort_inner("`varlist'", "`_sortindex'", `verbose') +end + + +mata: +void fsort_inner(string scalar vars, string scalar sortindex, real scalar verbose) +{ + class Factor scalar F + F = factor(vars, "", verbose, "", ., ., ., 0) + if (!F.is_sorted) { + F.panelsetup() + st_store(., sortindex, invorder(F.p)) + } +} +end + + +findfile "ftools.mata" +include "`r(fn)'" +exit diff --git a/110/replication_package/replication/ado/plus/f/fsort.sthlp b/110/replication_package/replication/ado/plus/f/fsort.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..62b8bed03fa93bb935b0f66d4139c4582c1b5586 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/fsort.sthlp @@ -0,0 +1,63 @@ +{smcl} +{* *! version 2.9.0 28mar2017}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "[R] sort" "help sort"}{...} +{vieweralsosee "[R] gsort" "help gsort"}{...} +{viewerjumpto "Syntax" "fsort##syntax"}{...} +{title:Title} + +{p2colset 5 14 20 2}{...} +{p2col :{cmd:fsort} {hline 2}}Sort by categorical variables{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{p 8 13 2} +{cmd:fsort} +{varlist} +[{cmd:,} {opt v:erbose}] + +{p 8 13 2}not implemented: +{cmd:fsort} +{varlist} +{ifin} + +{p 8 14 2}not implemented: +{cmd:fsort} +[{cmd:+}|{cmd:-}] +{varname} +[[{cmd:+}|{cmd:-}] +{varname} {it:...}] + +{marker description}{...} +{title:Description} + +{pstd} +{opt fsort} is an alternative to {help sort} and {help gsort}, with some differences: + +{synoptset 3 tabbed}{...} +{synopt:1)}It expects the variables to represent categories (it would be quite slow to use it to sort a normal random variable){p_end} +{synopt:2)}{varlist} cannot have both string and numeric variables{p_end} +{synopt:3)}The sort is always stable{p_end} +{synopt:3)}It is is faster than {cmd:sort} only with large datasets (above 200k obs.){p_end} +{synopt:4)}(wip) It allows {it:if} and {it:in} options{p_end} +{p2colreset}{...} + + +{marker author}{...} +{title:Author} + +{pstd}Sergio Correia{break} +Board of Governors of the Federal Reserve System, USA{break} +{browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com}{break} +{p_end} + + +{marker project}{...} +{title:More Information} + +{pstd}{break} +To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break} +{browse "https://github.com/sergiocorreia/ftools"}{break} +{p_end} diff --git a/110/replication_package/replication/ado/plus/f/ftab.ado b/110/replication_package/replication/ado/plus/f/ftab.ado new file mode 100644 index 0000000000000000000000000000000000000000..decfb023426480e4227f8de53781becbaaeca40b --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftab.ado @@ -0,0 +1,181 @@ +// This is just a prototype + +program define ftab + syntax varlist [if] [in] , /// + [SELect(str) Order(str)] /// + [Missing] /// + [Verbose] + + loc verbose = ("`verbose'" != "") + + // Trim data + if ("`missing'" == "") { + marksample touse, strok + } + else if ("`if'`in'" != "") { + marksample touse, strok novarlist + } + + //tempname table + mata: ftab("`varlist'", "`touse'", "`table'", `verbose') + ParseSelect, select(`select') order(`order') + di as error "`select'-`Select'-`Selectint'-`order'" + //Display, variable(`varlist') table(`table') +end + + +program ParseSelect +* Taken from groups.ado from njc (!!!) + // select option + syntax, [select(str) order(str)] + if "`select'" != "" { + if real("`select'") < . { + capture confirm integer number `select' + if _rc { + di as inp "`select' " /// + as err "invalid argument for " /// + as inp "select()" + exit 198 + } + local Selectint 1 + } + else { + tokenize "`select'", parse(" ><=") + local w "`1'" + local W : subinstr local select "`w'" "" + + if lower(substr("`w'",1,1)) == "r" { + local w = lower("`w'") + } + + local OK 0 + foreach s in freq percent Freq Percent rfreq /// + rpercent vpercent Vpercent rvpercent { + if "`w'" == substr("`s'",1,length("`w'")) { + local OK 1 + local Select "``s''" + continue, break + } + } + + // selection should specify an equality or inequality + qui count if 1 `W' + if _rc | !`OK' { + di as inp "`select' " /// + as err "invalid argument for " /// + as inp "select()" + exit 198 + } + + local Selectint 0 + } + } + c_local Select `Select' + c_local Selectint `Selectint' + + // order option + if "`order'" != "" { + if `: word count `order'' > 1 { + di as err "invalid " as inp "order()" as err "option" + exit 198 + } + + local orderlist "h hi hig high l lo low" + if !`: list order in orderlist' { + di as inp "`order' " /// + as err "invalid argument for " /// + as inp "order()" + exit 198 + } + local order = substr("`order'",1,1) + } + c_local order `order' +end + + +program define Display + syntax, variable(name) table(name) + tempname mytab + + + loc label : var label `variable' + loc cols : rownames `table' , quoted + loc rows : colnames `table' , quoted + + // Raw + matrix list `table', title(`label') format(%8.2g) + + // More detailed + // TODO: WRAP HEADER + di + .`mytab' = ._tab.new, col(4) lmargin(2) comma + .`mytab'.width 13 | 12 12 12 + .`mytab'.pad . 2 2 2 + .`mytab'.numfmt . %8.0g %8.0g %4.2f + .`mytab'.titlefmt %12s %8s %8s %8s + .`mytab'.sep, top + .`mytab'.titles "`label'" `rows' + // .`mytab'.row "" `table'[1, 1] `table'[1, 2] `table'[1, 3] + +end + + +findfile "ftools.mata" +include "`r(fn)'" + +mata: +mata set matastrict on + +void ftab(`Varname' var, + `String' touse, + `String' mat_name, + `Boolean' verbose) +{ + `Factor' F + `Vector' perc, smpl + `Matrix' ans + `StringMatrix' keys + //`StringMatrix' rowstripe, colstripe + + F = factor(var, touse, verbose) + smpl = F.counts :> 50 + perc = F.counts :/ colsum(F.counts) :* 100 + ans = F.counts, perc, runningsum(perc) + //sums = runningsum(F.counts) + //perc = sums :/ sums[rows(sums)] :* 100 + + ans = select(ans, smpl) + keys = select(isreal(F.keys) ? strofreal(F.keys) : F.keys, smpl) + + // Sort; recycle smpl vector + smpl = order(ans, -1) + ans = ans[smpl, .] + keys = keys[smpl] + + mm_matlist(ans \ (colsum(ans[., 1..2]), 100), + ("%g", "%6.2f", "%6.2f"), + 3, + keys \ "Total", + ("Freq.", "Percent", "Cum."), F.varlist) // we could use F.varlabels + + //st_matrix(mat_name, (F.counts, sums, perc)) + //rowstripe = J(rows(F.keys), 1, ""), (isreal(F.keys) ? strofreal(F.keys) : F.keys) + //colstripe = ("", "", "" \ "Freq.", "Percent", "Cum.")' + //st_matrixcolstripe(mat_name, colstripe) + //st_matrixrowstripe(mat_name, rowstripe) +} +end + +exit + +* Tests +cap ado uninstall ftools +net install ftools, from("C:/git/ftools/src") + +clear all +sysuse auto +//la var turn "this is a very very VERY long label" +tab turn +ftab turn + +* Benchmark diff --git a/110/replication_package/replication/ado/plus/f/ftools.ado b/110/replication_package/replication/ado/plus/f/ftools.ado new file mode 100644 index 0000000000000000000000000000000000000000..be0e3ecf20ec9413ad689a667ebba2b01df18578 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools.ado @@ -0,0 +1,44 @@ +*! version 2.49.0 06may2022 +* This file is just used to compile ftools.mlib + +program define ftools + syntax, [*] + + if ("`options'" == "") loc options "check" + + if inlist("`options'", "check", "compile") { + if ("`options'"=="compile") loc force "force" + ms_get_version ftools // included in this package + // maybe just add all fns explicitly? + loc functions Factor*() factor*() _factor*() join_factors() /// + __fload_data() __fstore_data() ftools*() __factor*() /// + assert_msg() assert_in() assert_boolean() _assert_abort() /// bin_order() + aggregate_*() select_nm_*() rowproduct() /// + create_mask() update_mask() is_rowvector() clip() inrange() /// + varlist_is_hybrid() varlist_is_integers() /// + unlink_folder() + ms_compile_mata, package(ftools) version(`package_version') `force' fun(`functions') verbose // debug + } + else if "`options'"=="version" { + which ftools + di as text _n "Required packages installed?" + loc reqs moremata + if (c(version)<13) loc reqs `reqs' boottest + foreach req of local reqs { + loc fn `req'.ado + if ("`req'"=="moremata") loc fn `req'.hlp + cap findfile `fn' + if (_rc) { + di as text "{lalign 20:- `req'}" as error "not" _c + di as text " {stata ssc install `req':install from SSC}" + } + else { + di as text "{lalign 20:- `req'}" as text "yes" + } + } + } + else { + di as error "Wrong option for ftools: `options'" + error 999 + } +end diff --git a/110/replication_package/replication/ado/plus/f/ftools.mata b/110/replication_package/replication/ado/plus/f/ftools.mata new file mode 100644 index 0000000000000000000000000000000000000000..5626b9e99dc8a78af36329f129b6ff546aaa060d --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools.mata @@ -0,0 +1,61 @@ +// -------------------------------------------------------------------------- +// Mata Code: Efficiently compute levels of variables (factors/categories) +// -------------------------------------------------------------------------- +// Project URL: https://github.com/sergiocorreia/ftools + + +// Miscellanea -------------------------------------------------------------- + loc debug 0 + loc debug_on = cond(`debug', "on", "off") + loc debug_off = cond(`debug', "off", "on") + + set matadebug `debug_on' + mata: mata set matastrict `debug_off' + mata: mata set mataoptimize on + mata: mata set matadebug `debug_on' + mata: mata set matalnum `debug_on' + + +// Versioning --------------------------------------------------------------- + ms_get_version ftools // part of this package + assert("`package_version'" != "") + mata: string scalar ftools_version() return("`package_version'") + mata: string scalar ftools_stata_version() return("`c(stata_version)'") + mata: string scalar ftools_joint_version() return("`package_version'|`c(stata_version)'") + + +// Includes ----------------------------------------------------------------- + findfile "ftools_type_aliases.mata" + include "`r(fn)'" + + findfile "ftools_common.mata" + include "`r(fn)'" + + findfile "ftools_main.mata" + include "`r(fn)'" + + * We have different functions depending on whether cols(data)==1 or >1 + findfile "ftools_hash1.mata" + loc is_vector 1 + include "`r(fn)'" + loc is_vector 0 + include "`r(fn)'" + + * Experimental dependency on gtools (with method(gtools)) + findfile "ftools_plugin.mata" + include "`r(fn)'" + + //findfile "ftools_experimental.mata" + //include "`r(fn)'" + + findfile "fcollapse_functions.mata" + include "`r(fn)'" + + + +// Possible Improvements +// ---------------------- +// 1) Do this in a C plugin; perhaps using khash (MIT-lic) like Pandas +// 2) Use a faster hash function like SpookyHash or CityHash (both MIT-lic) +// 3) Use double hashing instead of linear/quadratic probing +// 4) Compute the hashes in parallel diff --git a/110/replication_package/replication/ado/plus/f/ftools.sthlp b/110/replication_package/replication/ado/plus/f/ftools.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..b00e6cd98d73219fb8ef0aaee9781f8cbae7227b --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools.sthlp @@ -0,0 +1,584 @@ +{smcl} +{* *! version 2.49.0 06may2022}{...} +{vieweralsosee "fegen" "help fegen"}{...} +{vieweralsosee "fcollapse" "help fcollapse"}{...} +{vieweralsosee "join" "help join"}{...} +{vieweralsosee "fmerge" "help fmerge"}{...} +{vieweralsosee "flevelsof" "help flevelsof"}{...} +{vieweralsosee "fisid" "help fisid"}{...} +{vieweralsosee "fsort" "help fsort"}{...} +{vieweralsosee "" "--"}{...} +{vieweralsosee "[R] egen" "help egen"}{...} +{vieweralsosee "[R] collapse" "help collapse"}{...} +{vieweralsosee "[R] contract" "help contract"}{...} +{vieweralsosee "[R] merge" "help merge"}{...} +{vieweralsosee "[R] levelsof" "help levelsof"}{...} +{vieweralsosee "[R] sort" "help sort"}{...} +{vieweralsosee "" "--"}{...} +{vieweralsosee "moremata" "help moremata"}{...} +{vieweralsosee "reghdfe" "help reghdfe"}{...} +{viewerjumpto "Syntax" "ftools##syntax"}{...} +{viewerjumpto "Creation" "ftools##creation"}{...} +{viewerjumpto "Properties and methods" "ftools##properties"}{...} +{viewerjumpto "Description" "ftools##description"}{...} +{viewerjumpto "Usage" "ftools##usage"}{...} +{viewerjumpto "Example" "ftools##example"}{...} +{viewerjumpto "Remarks" "ftools##remarks"}{...} +{viewerjumpto "Using functions from collapse" "ftools##collapse"}{...} +{viewerjumpto "Experimental/advanced" "ftools##experimental"}{...} +{viewerjumpto "Source code" "ftools##source"}{...} +{viewerjumpto "Author" "ftools##contact"}{...} + +{title:Title} + +{p2colset 5 15 20 2}{...} +{p2col :{cmd:FTOOLS} {hline 2}}Mata commands for factor variables{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{p 8 16 2} +{it:class Factor scalar} +{bind: }{cmd:factor(}{space 3}{it:varnames} [{space 1} +{cmd:,} +{it:touse}{cmd:,} +{it:verbose}{cmd:,} +{it:method}{cmd:,} +{it:sort_levels}{cmd:,} +{it:count_levels}{cmd:,} +{it:hash_ratio}{cmd:,} +{it:save_keys}]{cmd:)} + +{p 8 16 2} +{it:class Factor scalar} +{bind: }{cmd:_factor(}{it:data} [{cmd:,} +{it:integers_only}{cmd:,} +{it:verbose}{cmd:,} +{it:method}{cmd:,} +{it:sort_levels}{cmd:,} +{it:count_levels}{cmd:,} +{it:hash_ratio}{cmd:,} +{it:save_keys}]{cmd:)} + +{p 8 16 2} +{it:class Factor scalar} +{bind: }{cmd:join_factors(}{it:F1}{cmd:,} +{it:F2} [{cmd:,} +{it:count_levels}{cmd:,} +{it:save_keys}{cmd:,} +{it:levels_as_keys}]{cmd:)} + + +{marker arguments}{...} +{synoptset 38 tabbed}{...} +{synopthdr} +{synoptline} +{p2coldent:* {it:string} varnames}names of variables that identify the factors{p_end} +{synopt:{it:string} touse}name of dummy {help mark:touse} variable{p_end} +{p2coldent:}{bf:note:} you can also pass a vector with the obs. index (i.e. the first argument of {cmd:st_data()}){p_end} +{synopt:{it:string} data}transmorphic matrix with the group identifiers{p_end} + +{synopt:{bf:Advanced options:}}{p_end} +{synopt:{it:real} verbose}1 to display debug information{p_end} +{synopt:{it:string} method}hashing method: mata, hash0, hash1, hash2; default is {it:mata} (auto-choose){p_end} +{synopt:{it:real} sort_levels}set to 0 under {it:hash1} to increase speed, but the new levels will not match the order of the varlist{p_end} +{synopt:{it:real} count_levels}set to 0 under {it:hash0} to increase speed, but the {it:F.counts} vector will not be generated +so F{cmd:.panelsetup()}, F{cmd:.drop_obs()}, and related methods will not be available{p_end} +{synopt:{it:real} hash_ratio}size of the hash vector compared to the maximum number of keys (often num. obs.){p_end} +{synopt:{it:real} save_keys}set to 0 to increase speed and save memory, +but the matrix {it:F.keys} with the original values of the factors +won't be created{p_end} +{synopt:{it:string} integers_only}whether {it:data} is numeric and takes only {it:integers} or not (unless you are sure of the former, set it to 0){p_end} +{synopt:{it:real} levels_as_keys}if set to 1, +{cmd:join_factors()} will use the levels of F1 and F2 +as the keys (as the data) when creating F12{p_end} +{p2colreset}{...} + + +{marker creation}{...} +{title:Creating factor objects} + +{pstd}(optional) First, you can declare the Factor object: + +{p 8 8 2} +{cmd:class Factor scalar}{it: F}{break} + +{pstd}Then, you can create a factor from one or more categorical variables: + +{p 8 8 2} +{it:F }{cmd:=}{bind: }{cmd:factor(}{it:varnames}{cmd:)} + +{pstd} +If the categories are already in Mata +({cmd:data = st_data(., varnames)}), you can do: + +{p 8 8 2} +{it:F }{cmd:=}{bind: }{cmd:_factor(}{it:data}{cmd:)} + +{pstd} +You can also combine two factors ({it:F1} and {it:F2}): + +{p 8 8 2} +{it:F }{cmd:=}{bind: }{cmd:join_factors(}{it:F1}{cmd:,} {it:F2}{cmd:)} + +{pstd} +Note that the above is exactly equivalent (but faster) than: + +{p 8 8 2} +{it: varnames} {cmd:= invtokens((}{it:F1.varnames}{cmd:,} {it:F2.varnames}{cmd:))}{break} +{it:F} {cmd:=} {cmd:factor(}{it:varnames}{cmd:)} + +{pstd} +If {it:levels_as_keys==1}, it is equivalent to: + +{p 8 8 2} +{it:F }{cmd:=}{bind: }{cmd:_factor((}{it:F1.levels}{cmd:,} {it:F2.levels}{cmd:))} + + +{marker properties}{...} +{title:Properties and Methods} + +{marker arguments}{...} +{synoptset 38 tabbed}{...} + +{synopthdr:properties} +{synoptline} +{synopt:{it:real} F{cmd:.num_levels}}number of levels (distinct values) of the factor{p_end} +{synopt:{it:real} F{cmd:.num_obs}}number of observations of the sample used to create the factor ({cmd:c(N)} if touse was empty){p_end} +{synopt:{it:real colvector} F{cmd:.levels}}levels of the factor; dimension {cmd:F.num_obs x 1}; range: {cmd:{1, ..., F.num_levels}}{p_end} +{synopt:{it:transmorphic matrix} F{cmd:.keys}}values of the input varlist that correspond to the factor levels; +dimension {cmd:F.num_levels x 1}; not created if save_keys==0; unordered if sort_levels==0{p_end} +{synopt:{it:real vector} F{cmd:.counts}}frequencies of each level (in the sample set by touse); +dimension {cmd:F.num_levels x 1}; will be empty if count_levels==0{p_end} + +{synopt:{it:string rowvector} F{cmd:.varlist}}name of variables used to create the factor{p_end} +{synopt:{it:string rowvector} F{cmd:.varformats}}formats of the input variables{p_end} +{synopt:{it:string rowvector} F{cmd:.varlabels}}labels of the input variables{p_end} +{synopt:{it:string rowvector} F{cmd:.varvaluelabels}}value labels attached to the input variables{p_end} +{synopt:{it:string rowvector} F{cmd:.vartypes}}types of the input variables{p_end} +{synopt:{it:string rowvector} F{cmd:.vl}}value label definitions used by the input variables{p_end} +{synopt:{it:string} F{cmd:.touse}}name of touse variable{p_end} +{synopt:{it:string} F{cmd:.is_sorted}}1 if the dataset is sorted by F{cmd:.varlist}{p_end} + + +{synopthdr:main methods} +{synoptline} +{synopt:{it:void} F{cmd:.store_levels(}{newvar}{cmd:)}}save +the levels back into the dataset (using the same {it:touse}){p_end} +{synopt:{it:void} F{cmd:.store_keys(}[{it:sort}]{cmd:)}}save +the original key variables into a reduced dataset, including formatting and labels. If {it:sort} is 1, Stata will report the dataset as sorted{p_end} +{synopt:{it:void} F{cmd:.panelsetup()}}compute auxiliary vectors {it:F.info} +and {it:F.p} (see below); used in panel computations{p_end} + + +{synopthdr:ancilliary methods} +{synoptline} +{synopt:{it:real scalar} F{cmd:.equals(}F2{cmd:)}}1 +if {it:F} represents the same data as {it:F2} +(i.e. if .num_obs .num_levels .levels .keys and .counts are equal) +{p_end} +{synopt:{it:real scalar} F{opt .nested_within(vec)}}1 +if the factor {it:F} is +{browse "http://scorreia.com/software/reghdfe/faq.html#what-does-fixed-effect-nested-within-cluster-means":nested within} +the column vector {it:vec} +(i.e. if any two obs. with the same factor level also have the same value of {it:vec}). +For instance, it is true if the factor {it:F} represents counties and {it:vec} represents states. +{p_end} +{synopt:{it:void} F{cmd:.drop_obs(}{it:idx}{cmd:)}}update +{it:F} to reflect a change in the underlying dataset, where +the observations listed in the column vector {it:idx} are dropped +(see example below) +{p_end} +{synopt:{it:void} F{cmd:.keep_obs(}{it:idx}{cmd:)}}equivalent +to keeping only the obs. enumerated by {it:idx} and recreating {it:F}; +uses {cmd:.drop_obs()} +{p_end} +{synopt:{it:void} F{cmd:.drop_if(}{it:vec}{cmd:)}}equivalent +to dropping the obs. where {it:vec==0} and recreating {it:F}; +uses {cmd:.drop_obs()} +{p_end} +{synopt:{it:void} F{cmd:.keep_if(}{it:vec}{cmd:)}}equivalent +to keeping the obs. where {it:vec!=0} and recreating {it:F}; +uses {cmd:.drop_obs()} +{p_end} +{synopt:{it:real colvector} F{cmd:.drop_singletons()}}equivalent +to dropping the levels that only appear once, +and their corresponding observations. +The colvector returned contains the observations that need to be excluded +(note: see the source code for some advanced optional arguments). +{p_end} +{synopt:{it:real scalar} F{opt .is_id()}}1 +if {it:F.counts} is always 1 +(i.e. if {it:F.levels} has no duplicates) +{p_end} +{synopt:{it:real vector} F{cmd:.intersect(}{it:vec}{cmd:)}}return +a mask vector equal to 1 if the row of {it:vec} is also on F.keys. +Also accepts the integers_only and verbose options: {it:mask = F.intersect(y, 1, 1)} +{p_end} + + +{synopthdr:available after F.panelsetup()} +{synoptline} +{synopt:{it:transmorphic matrix} F{cmd:.sort(}{it:data}{cmd:)}}equivalent to +{cmd:data[F.p, .]} +but calls {cmd:F.panelsetup()} if required; {it:data} is a {it:transmorphic matrix}{p_end} +{synopt:{it:transmorphic matrix} F{cmd:.invsort(}{it:data}{cmd:)}}equivalent to +{cmd:data[invorder(F.p), .]}, so it undoes a previous sort operation. Note that {cmd:F.invsort(F.sort(x))==x}. Also, after used it fills the vector {cmd:F.inv_p = invorder(F.p)} so the operation can be repeated easily. +{p_end} +{synopt:{it:void} F{cmd:._sort(}{it:data}{cmd:)}}in-place version of +{cmd:.sort()}; +slower but uses less memory, as it's based on {cmd:_collate()}{p_end} +{synopt:{it:real vector} F{cmd:.info}}equivalent to {help mf_panelsetup:panelsetup()} +(returns a {it:(num_levels X 2)} matrix with start and end positions of each level/panel).{p_end} +{p2coldent:}{bf:note:} instead of using {cmd:F.info} directly, use panelsubmatrix(): +{cmd:x = panelsubmatrix(X, i, F.info)} and {cmd:panelsum()}(see example at the end){p_end} +{synopt:{it:real vector} F{cmd:.p}}equivalent to {cmd:order(F.levels)} +but implemented with a counting sort that is asymptotically +faster ({it:O(N)} instead of {it:O(N log N)}.{p_end} +{p2coldent:}{bf:note:} do not use {cmd:F.p} directly, as it will be missing if the data is already sorted by the varnames.{p_end} +{p2colreset}{...} + + +{pstd}Notes: + +{synoptset 3 tabbed}{...} +{synopt:- }If you just downloaded the package and want to use the Mata functions directly (instead of the Stata commands), run {stata ftools} once to, which creates the Mata library if needed.{p_end} +{synopt:- }To force compilation of the Mata library, type {stata ftools, compile}{p_end} +{synopt:- }{cmd:F.extra} is an undocumented {help mf_asarray:asarray} +that can be used to store additional information: {cmd:asarray(f.extra, "lorem", "ipsum")}; +and retrieve it: {cmd:ipsum = asarray(f.extra, "lorem")}{p_end} +{synopt:- }{cmd:join_factors()} is particularly fast if the dataset is sorted in the same order as the factors{p_end} +{synopt:- }{cmd:factor()} will call {cmd:join_factors()} if appropriate +(2+ integer variables; 10,000+ obs; and method=hash1) +{p_end} + + +{marker description}{...} +{title:Description} + +{pstd} +The {it:Factor} object is a key component of several commands that +manipulate data without having to sort it beforehand: + +{pmore}- {help fcollapse} (alternative to collapse, contract, collapse+merge and some egen functions){p_end} +{pmore}- {help fegen:fegen group}{p_end} +{pmore}- {help fisid}{p_end} +{pmore}- {help join} and {help fmerge} (alternative to m:1 and 1:1 merges){p_end} +{pmore}- {help flevelsof} plug-in alternative to {help levelsof}{p_end} +{pmore}- {help fsort} (note: this is O(N) but with a high constant term){p_end} +{pmore}- freshape{p_end} + +Ancilliary commands include: + +{pmore}- {help local_inlist} return local {it:inlist} based on a variable and a list of values or labels{p_end} + +{pstd} +It rearranges one or more categorical variables into a new variable that takes values from 1 to F.num_levels. You can then efficiently sort any other variable by this, in order to compute groups statistics and other manipulations. + +{pstd} +For technical information, see +{browse "http://stackoverflow.com/questions/8991709/why-are-pandas-merges-in-python-faster-than-data-table-merges-in-r/8992714#8992714":[1]} +{browse "http://wesmckinney.com/blog/nycpython-1102012-a-look-inside-pandas-design-and-development/":[2]}, +and to a lesser degree +{browse "https://my.vertica.com/docs/7.1.x/HTML/Content/Authoring/AnalyzingData/Optimizations/AvoidingGROUPBYHASHWithProjectionDesign.htm":[3]}. + + +{marker usage}{...} +{title:Usage} + +{pstd} +If you only want to create identifiers based on one or more variables, +run something like: + +{inp} + {hline 60} + sysuse auto, clear + mata: F = factor("foreign turn") + mata: F.store_levels("id") + mata: mata drop F + {hline 60} +{txt} + +{pstd} +More complex scenarios would involve some of the following: + +{inp} + {hline 60} + sysuse auto, clear + + * Create factors for foreign data only + mata: F = factor("turn", "foreign") + + * Report number of levels, obs. in sample, and keys + mata: F.num_levels + mata: F.num_obs + mata: F.keys, F.counts + + * View new levels + mata: F.levels[1::10] + + * Store back new levels (on the same sample) + mata: F.store_levels("id") + + * Verify that the results are correct + sort id + li turn foreign id in 1/10 + {hline 60} +{txt} + + +{marker example}{...} +{title:Example: operating on levels of each factor} + +{pstd} +This example shows how to process data for each level of the factor (like {help bysort}). It does so by combining {cmd:F.sort()} with {help mf_panelsetup:panelsubmatrix()}. +{p_end} + +{pstd} +In particular, this code runs a regression for each category of {it:turn}: +{p_end} + +{inp} + {hline 60} + clear all + mata: + real matrix reg_by_group(string depvar, string indepvars, string byvar) + { + class Factor scalar F + real scalar i + real matrix X, Y, x, y, betas + + F = factor(byvar) + Y = F.sort(st_data(., depvar)) + X = F.sort(st_data(., tokens(indepvars))) + betas = J(F.num_levels, 1 + cols(X), .) + + for (i = 1; i <= F.num_levels; i++) { + y = panelsubmatrix(Y, i, F.info) + x = panelsubmatrix(X, i, F.info) , J(rows(y), 1, 1) + betas[i, .] = qrsolve(x, y)' + } + return(betas) + } + end + sysuse auto + mata: reg_by_group("price", "weight length", "foreign") + {hline 60} +{text} + + +{marker example2}{...} +{title:Example: Factors nested within another variable} + +{pstd} +You might be interested in knowing if a categorical variable is nested within another, more coarser, variable. +For instance, a variable containing months ("Jan2017") is nested within another containing years ("2017")), +a variable containing counties ("Durham County, NC") is nested within another containing states ("North Carolina"), and so on. +{p_end} + +{pstd} +To check for this, you can follow this example: +{p_end} + +{inp} + {hline 60} + sysuse auto + gen turn10 = int(turn/10) + + mata: + F = factor("turn") + F.nested_within(st_data(., "trunk")) // False + F.nested_within(st_data(., "turn")) // Trivially true + F.nested_within(st_data(., "turn10")) // True + end + {hline 60} +{txt} + +{pstd} +You can also compare two factors directly: +{p_end} + +{inp} + {hline 60} + mata: + F1 = factor("turn") + F2 = factor("turn10") + F1.nested_within(F2.levels) // True + end + {hline 60} +{txt} + + +{marker example3}{...} +{title:Example: Updating a factor after dropping variables} + +{pstd} +If you change the underlying dataset you have to recreate the factor, which is costly. As an alternative, you can use {cmd:.keep_obs()} and related methods: +{p_end} + +{inp} + {hline 60} + * Benchmark + sysuse auto, clear + drop if price > 4500 + mata: F1 = factor("turn") + // Quickly inspect results + mata: F1.num_obs, F1.num_levels, hash1(F1.levels) + + * Using F.drop_obs() + sysuse auto, clear + mata + price = st_data(., "price") + F2 = factor("turn") + idx = selectindex(price :> 4500) + mata: F2.num_obs, F2.num_levels, hash1(F2.levels) + F2.drop_obs(idx) + mata: F2.num_obs, F2.num_levels, hash1(F2.levels) + assert(F1.equals(F2)) + end + + * Using the other methods + mata + F2 = factor("turn") + idx = selectindex(price :<= 4500) + F2.keep_obs(idx) + assert(F1.equals(F2)) + + F2 = factor("turn") + F2.drop_if(price :> 4500) + assert(F1.equals(F2)) + + F2 = factor("turn") + F2.keep_if(price :<= 4500) + assert(F1.equals(F2)) + end + {hline 60} +{txt} + + +{marker remarks}{...} +{title:Remarks} + +{pstd} +All-numeric and all-string varlists are allowed, but +hybrid varlists (where some but not all variables are strings) are not possible +due to Mata limitations. +As a workaround, first convert the string variables to numeric (e.g. using {cmd:fegen group()}) and then run your intended command. + +{pstd} +You can pass as {varlist} a string like "turn trunk" +or a tokenized string like ("turn", "trunk"). + +{pstd} +To generate a group identifier, most commands first sort the data by a list of keys (such as {it:gvkey, year}) and then ask if the keys differ from one observation to the other. +Instead, {cmd:ftools} exploits the insights that sorting the data is not required to create an identifier, +and that once an identifier is created, we can then use a {it:counting sort} to sort the data in {it:O(N)} time instead of {it:O log(N)}. + +{pstd} +To create an identifier (that takes a value in {1, {it:#keys}}) we first match each key (composed by one or more numbers and strings) into a unique integer. + For instance, the key {it:gvkey=123, year=2010} is assigned the integer {it:4268248869} with the Mata function {cmd:hash1}. + This identifier can then be used as an index when accessing vectors, bypassing the need for sorts. + +{pstd} +The program tries to pick the hash function that best matches the dataset and input variables. +For instance, if the input variables have a small range of possible values (e.g. if they are of {it:byte} type), we select the {it:hash0} method, which uses a (non-minimal) perfect hashing but might consume a lot of memory. +Alternatively, {it:hash1} is used, which adds {browse "https://www.wikiwand.com/en/Open_addressing":open addressing} to Mata's +{help mf_hash1:hash1} function to create a form of open addressing (that is more efficient than Mata's {help mf_asarray:asarray}). + + +{marker collapse}{...} +{title:Using the functions from {it:fcollapse}} + +{pstd} +You can access the {cmd:aggregate_*()} functions so you can collapse information without resorting to Stata. Example: + +{inp} + {hline 60} + sysuse auto, clear + mata: F = factor("turn") + mata: F.panelsetup() + mata: y = st_data(., "price") + mata: sum_y = aggregate_sum(F, F.sort(y), ., "") + mata: F.keys, F.counts, sum_y + + * Benchmark + collapse (sum) price, by(turn) + list + {hline 60} +{txt} + +Functions start with {cmd:aggregate_*()}, and are listed {view fcollapse_functions.mata, adopath asis:here} + + +{marker experimental}{...} +{title:Experimental/advanced functions} + +{p 8 16 2} +{it:real scalar} +{bind: }{cmd:init_zigzag(}{it:F1}{cmd:,} +{it:F2}{cmd:,} +{it:F12}{cmd:,} +{it:F12_1}{cmd:,} +{it:F12_2}{cmd:,} +{it:queue}{cmd:,} +{it:stack}{cmd:,} +{it:subgraph_id}{cmd:,} +{it:verbose}{cmd:)} + +{pstd}Notes: + +{synoptset 3 tabbed}{...} +{synopt:- }Given the bipartite graph formed by F1 and F2, +the function returns the number of disjoin subgraphs (mobility groups){p_end} +{synopt:- }F12 must be set with levels_as_keys==1{p_end} +{synopt:- }For F12_1 and F12_2, you can set save_keys==0{p_end} +{synopt:- }The function fills three useful vectors: queue, stack and subgraph_id{p_end} +{synopt:- }If subgraph_id==0, it the id vector will not be created{p_end} + + +{marker source}{...} +{title:Source code} + +{pstd} +{view ftools.mata, adopath asis:ftools.mata}; +{view ftools_type_aliases.mata, adopath asis:ftools_type_aliases.mata}; +{view ftools_main.mata, adopath asis:ftools_main.mata}; +{view ftools_bipartite.mata, adopath asis:ftools_bipartite.mata} +{view fcollapse_functions.mata, adopath asis:fcollapse_functions.mata} +{p_end} + +{pstd} +Also, the latest version is available online: {browse "https://github.com/sergiocorreia/ftools/source"} + + +{marker author}{...} +{title:Author} + +{pstd}Sergio Correia{break} +{break} +{browse "http://scorreia.com"}{break} +{browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com}{break} +{p_end} + + +{marker project}{...} +{title:More Information} + +{pstd}{break} +To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break} +{browse "https://github.com/sergiocorreia/ftools"}{break} +{p_end} + + +{marker acknowledgment}{...} +{title:Acknowledgment} + +{pstd} +This project was largely inspired by the works of +{browse "http://wesmckinney.com/blog/nycpython-1102012-a-look-inside-pandas-design-and-development/":Wes McKinney}, +{browse "http://www.stata.com/meeting/uk15/abstracts/":Andrew Maurer} +and +{browse "https://ideas.repec.org/c/boc/bocode/s455001.html":Benn Jann}. +{p_end} + diff --git a/110/replication_package/replication/ado/plus/f/ftools_common.mata b/110/replication_package/replication/ado/plus/f/ftools_common.mata new file mode 100644 index 0000000000000000000000000000000000000000..4bb5c464418ee75e43230b9f4c41f8f279e3215d --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools_common.mata @@ -0,0 +1,226 @@ +// Helper functions ---------------------------------------------------------- +mata: + +`Void' _assert_abort(`Integer' rc, `String' msg, `Boolean' traceback) { + if (traceback) { + _error(rc, msg) + } + else { + printf("{err}%s\n", msg) + exit(rc) // exit(error(rc)) + } +} + + +`Void' assert_msg(`Boolean' t, | `String' msg, `Integer' rc, `Boolean' traceback) +{ + if (args()<2 | msg=="") msg = "assertion is false" + if (args()<3 | rc==.) rc = 3498 + if (args()<4 | rc==.) traceback = 1 + if (t==0) _assert_abort(rc, msg, traceback) +} + + +`Void' assert_in(`DataCell' value, `DataRow' valid_values, | string scalar msg) +{ + if (args()<2 | msg=="") msg = "assertion is false; value not in list" + // "anyof(valid_values, value)" <==> "value in valid_values" [Python] + if (!anyof(valid_values, value)) _error(msg) +} + + +`Void' assert_boolean(`DataCell' value, | string scalar msg) +{ + if (args()<2 | msg=="") msg = "assertion is false; value not boolean" + assert_in(value, (0,1), msg) +} + + +// mask: a dummy variable indicating selection (like -touse-) +// Example usage: +// mata: idx = (1,2,5,9)' +// mata: m = create_mask(12, 0, idx, 1) +// mata: update_mask(m, idx, 2) +// mata: update_mask(m, (1,3)', 10) + +`Void' update_mask(`Variable' mask, `Vector' index, `Real' value) +{ + if (!length(index)) return + + // Allow for vector and rowvector masks + if (is_rowvector(index)) { + mask[index] = J(1, cols(index), value) + } + else { + mask[index] = J(rows(index), 1, value) + } +} + + +`Variable' create_mask(`Integer' obs, `Real' default_value, `Vector' index, `Real' value) +{ + `Variable' mask + // Allow for vector and rowvector masks + if (is_rowvector(index)) { + mask = J(1, obs, default_value) + } + else { + mask = J(obs, 1, default_value) + } + update_mask(mask, index, value) + return(mask) +} + + +`Real' clip(`Real' x, `Real' min_x, `Real' max_x) { + return(x < min_x ? min_x : (x > max_x ? max_x : x)) +} + + +`Matrix' inrange(`Matrix' x, `Matrix' lb, `Matrix' ub) +{ + return(lb :<= x :& x :<= ub) +} + + +`Boolean' is_rowvector(`DataFrame' x) { + return(orgtype(x) == "rowvector") +} + + +// Return 1 if all the variables are integers +`Boolean' varlist_is_integers(`Varlist' varlist, `DataFrame' data) +{ + `Integer' i + `Integer' num_vars + `String' type + + if (eltype(data) == "string") { + return(0) + } + + num_vars = cols(varlist) + for (i = 1; i <= num_vars; i++) { + type = st_vartype(varlist[i]) + if (anyof(("byte", "int", "long"), type)) { + continue + } + if (round(data[., i])==data[., i]) { + continue + } + return(0) + } + return(1) +} + + +// Return 1 if the varlist has string and numeric types +`Boolean' varlist_is_hybrid(`Varlist' varlist) +{ + `Boolean' first_is_num + `Integer' i + `Integer' num_vars + + num_vars = cols(varlist) + first_is_num = st_isnumvar(varlist[1]) + for (i = 2; i <= num_vars; i++) { + if (first_is_num != st_isnumvar(varlist[i])) { + return(1) + //_error(999, "variables must be all numeric or all strings") + } + } + return(0) +} + + +`DataFrame' __fload_data(`Varlist' varlist, + | `DataCol' touse, + `Boolean' touse_is_selectvar) +{ + `Integer' num_vars + `Boolean' is_num + `Integer' i + `DataFrame' data + + if (args()<2) touse = . + if (args()<3) touse_is_selectvar = 1 // can be selectvar (a 0/1 mask) or an index vector + + varlist = tokens(invtokens(varlist)) // accept both types + assert_msg(!varlist_is_hybrid(varlist), "variables must be all numeric or all strings", 999) + is_num = st_isnumvar(varlist[1]) + + // idx = touse_is_selectvar ? . : touse + // selectvar = touse_is_selectvar ? touse : . + if (is_num) { + data = st_data(touse_is_selectvar ? . : touse , varlist, touse_is_selectvar ? touse : .) + } + else { + data = st_sdata(touse_is_selectvar ? . : touse , varlist, touse_is_selectvar ? touse : .) + } + return(data) +} + + +`Void' __fstore_data(`DataFrame' data, + `Varname' newvar, + `String' type, + | `String' touse) +{ + `RowVector' idx + idx = st_addvar(type, newvar) + if (substr(type, 1, 3) == "str") { + if (touse == "") st_sstore(., idx, data) + else st_sstore(., idx, touse, data) + } + else { + if (touse == "") st_store(., idx, data) + else st_store(., idx, touse, data) + } +} + + +// Based on Nick Cox's example +// https://www.statalist.org/forums/forum/general-stata-discussion/general/1330558-product-of-row-elements?p=1330561#post1330561 +`Matrix' rowproduct(`Matrix' X) +{ + `Integer' i, k + `Matrix' prod + k = cols(X) + if (k==1) return(X) + prod = X[,1] + for(i = 2; i<=k; i++) { + prod = prod :* X[,i] + } + return(prod) +} + + + +`Void' unlink_folder(`String' path, `Boolean' verbose) +{ + // We are SUPER careful in only removing certain files... so if there are other files this function will fail + `StringVector' fns, patterns + `Integer' i, j, num_dropped + + if (!direxists(path)) exit() + if (verbose) printf("{txt}Removing folder and its contents: {res}%s{txt}\n", path) + + num_dropped = 0 + patterns = ("*.tmp" \ "*.log" \ "parallel_code.do") + + for (j=1; j<=rows(patterns); j++){ + fns = dir(path, "files", patterns[j], 1) + for (i=1; i<=rows(fns); i++) { + unlink(fns[i]) + ++num_dropped + } + } + + if (verbose) printf("{txt} - %f files removed\n", num_dropped) + + rmdir(path) + if (verbose) printf("{txt} - Folder removed\n") +} + + +end diff --git a/110/replication_package/replication/ado/plus/f/ftools_hash1.mata b/110/replication_package/replication/ado/plus/f/ftools_hash1.mata new file mode 100644 index 0000000000000000000000000000000000000000..8700686b0d2e93543acc90139f6cdf1fb31ad81c --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools_hash1.mata @@ -0,0 +1,137 @@ + // Hash tables + // 1) hash0 - Perfect hashing (use the value as the hash) + // 2) hash1 - Use hash1() with open addressing (linear probing) + +// Compilation options ------- +assert inlist(`is_vector', 0, 1) +if (`is_vector') { + loc suffix +} +else { + loc suffix ", ." +} +// --------------------------- + +mata: + +// Open addressing hash function (linear probing) +// Use this for non-integers (2.5, "Bank A") and big ints (e.g. 2014124233573) + +`Factor' __factor_hash1_`is_vector'( + `DataFrame' data, + `Boolean' verbose, + `Integer' dict_size, + `Boolean' sort_levels, + `Integer' max_numkeys, + `Boolean' save_keys) +{ + `Factor' F + `Integer' h, num_collisions, j, val + `Integer' obs, start_obs, num_obs, num_vars + `Vector' dict + `Vector' levels // new levels + `Vector' counts + `Vector' p + `DataFrame' keys + `DataRow' key, last_key + `String' msg + + + num_obs = rows(data) + num_vars = cols(data) + assert(dict_size > 0 & dict_size < .) + assert ((num_vars > 1) + (`is_vector') == 1) // XOR + dict = J(dict_size, 1, 0) + levels = J(num_obs, 1, 0) + keys = J(max_numkeys, num_vars, missingof(data)) + counts = J(max_numkeys, 1, 1) // keys are at least present once! + + j = 0 // counts the number of levels; at the end j == num_levels + val = J(0, 0, .) + num_collisions = 0 + last_key = J(0, 0, missingof(data)) + + for (obs = 1; obs <= num_obs; obs++) { + key = data[obs`suffix'] + + // (optional) Speedup when dataset is already sorted + // (at a ~10% cost for when it's not) + if (last_key == key) { + start_obs = obs + do { + obs++ + } while (obs <= num_obs ? data[obs`suffix'] == last_key : 0 ) + levels[|start_obs \ obs - 1|] = J(obs - start_obs, 1, val) + counts[val] = counts[val] + obs - start_obs + if (obs > num_obs) break + key = data[obs`suffix'] + } + + // Compute hash and retrieve the level the key is assigned to + h = hash1(key, dict_size) + val = dict[h] + + // (new key) The key has not been assigned to a level yet + if (val == 0) { + val = dict[h] = ++j + keys[val`suffix'] = key + } + else if (key == keys[val`suffix']) { + counts[val] = counts[val] + 1 + } + // (collision) Another key already points to the same dict slot + else { + // Look up for an empty slot in the dict + + // Linear probing, not very sophisticate... + do { + ++num_collisions + ++h + if (h > dict_size) h = 1 + val = dict[h] + + if (val == 0) { + dict[h] = val = ++j + keys[val`suffix'] = key + break + } + if (key == keys[val`suffix']) { + counts[val] = counts[val] + 1 + break + } + } while (1) + } + + levels[obs] = val + last_key = key + } // end for >>> + + dict = . // save memory + + if (save_keys | sort_levels) keys = keys[| 1 , 1 \ j , . |] + counts = counts[| 1 \ j |] + + if (sort_levels & j > 1) { + // bugbug: replace with binsort? + p = order(keys, 1..num_vars) // this is O(K log K) !!! + if (save_keys) keys = keys[p, .] // _collate(keys, p) + counts = counts[p] // _collate(counts, p) + levels = rows(levels) > 1 ? invorder(p)[levels] : 1 + } + p = . // save memory + + + if (verbose) { + msg = "{txt}(%s hash collisions - %4.2f{txt}%%)\n" + printf(msg, strofreal(num_collisions), num_collisions / num_obs * 100) + } + + F = Factor() + F.num_levels = j + if (save_keys) swap(F.keys, keys) + swap(F.levels, levels) + swap(F.counts, counts) + return(F) +} + +end diff --git a/110/replication_package/replication/ado/plus/f/ftools_main.mata b/110/replication_package/replication/ado/plus/f/ftools_main.mata new file mode 100644 index 0000000000000000000000000000000000000000..10e002a65e2b0e484c2624bec55ad374fbecb862 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools_main.mata @@ -0,0 +1,976 @@ +// Main class --------------------------------------------------------------- +mata: + +class Factor +{ + `Integer' num_levels // Number of levels + `Integer' num_obs // Number of levels + `Varname' touse // Name of touse variable + `Varlist' varlist // Variable names of keys + `Varlist' varformats, varlabels, varvaluelabels, vartypes + `Dict' vl + `Vector' levels // levels that match the keys + `DataRow' keys // Set of keys found + `Vector' counts // Count of the levels/keys + `Matrix' info + `Vector' p + `Vector' inv_p // inv_p = invorder(p) + `String' method // Hash fn used + //`Vector' sorted_levels + `Boolean' is_sorted // Is varlist==sorted(varlist)? + `StringRowVector' sortedby // undocumented; save sort order of dataset + `Boolean' panel_is_setup + `Boolean' levels_as_keys // when running F3=join_factors(F1, F2), use the levels of F1/F2 as keys for F3 (useful when F1.keys is missing) + + `Void' new() + `Void' swap() + virtual `Void' panelsetup() // aux. vectors + `Void' store_levels() // Store levels in the dta + `Void' store_keys() // Store keys & format/lbls + `DataFrame' sort() // Initialize panel view + `Void' _sort() // as above but in-place + `DataFrame' invsort() // F.invsort(F.sort(x))==x + + `Boolean' nested_within() // True if nested within a var + `Boolean' equals() // True if F1 == F2 + + `Void' __inner_drop() // Adjust to dropping obs. + virtual `Vector' drop_singletons() // Adjust to dropping obs. + virtual `Void' drop_obs() // Adjust to dropping obs. + `Void' keep_obs() // Adjust to dropping obs. + `Void' drop_if() // Adjust to dropping obs. + `Void' keep_if() // Adjust to dropping obs. + `Boolean' is_id() // 1 if all(F.counts:==1) + + `Vector' intersect() // 1 if Y intersects with F.keys + virtual `Void' cleanup_before_saving() // set .vl and .extra to missing + + `Dict' extra // keep for compatibility with reghdfe v5 +} + + +`Void' Factor::new() +{ + keys = J(0, 1, .) + varlist = J(1, 0, "") + info = J(0, 2, .) + counts = J(0, 1, .) + p = J(0, 1, .) + inv_p = J(0, 1, .) + touse = "" + panel_is_setup = 0 + is_sorted = 0 + extra = asarray_create("string", 1, 20) // keep for compatibility with reghdfe v5 +} + + +`Void' Factor::swap(`Factor' other) +{ + ::swap(this.num_levels, other.num_levels) + ::swap(this.num_obs, other.num_obs) + ::swap(this.touse, other.touse) + ::swap(this.varlist, other.varlist) + ::swap(this.varformats, other.varformats) + ::swap(this.varlabels, other.varlabels) + ::swap(this.varvaluelabels, other.varvaluelabels) + ::swap(this.vartypes, other.vartypes) + ::swap(this.vl, other.vl) + ::swap(this.levels, other.levels) + ::swap(this.keys, other.keys) + ::swap(this.counts, other.counts) + ::swap(this.info, other.info) + ::swap(this.p, other.p) + ::swap(this.inv_p, other.inv_p) + ::swap(this.method, other.method) + ::swap(this.is_sorted, other.is_sorted) + ::swap(this.sortedby, other.sortedby) + ::swap(this.panel_is_setup, other.panel_is_setup) +} + + +`Void' Factor::panelsetup() +{ + // Fill out F.info and F.p + `Integer' level + `Integer' obs + `Vector' index + + if (panel_is_setup) return + + assert(is_sorted==0 | is_sorted==1) + + if (counts == J(0, 1, .)) { + _error(123, "panelsetup() requires the -counts- vector") + } + + if (num_levels == 1) { + info = 1, num_obs + p = 1::num_obs + panel_is_setup = 1 + return + } + + // Equivalent to -panelsetup()- but faster (doesn't require a prev sort) + info = runningsum(counts) + index = 0 \ info[|1 \ num_levels - 1|] + info = index :+ 1 , info + + assert_msg(rows(info) == num_levels & cols(info) == 2, "invalid dim") + assert_msg(rows(index) == num_levels & cols(index) == 1, "invalid dim") + + if (!is_sorted) { + // Compute permutations. Notes: + // - Uses a counting sort to achieve O(N) instead of O(N log N) + // See https://www.wikiwand.com/en/Counting_sort + // - A better implementation can make this parallel for num_levels small + + p = J(num_obs, 1, .) + for (obs = 1; obs <= num_obs; obs++) { + level = levels[obs] + p[index[level] = index[level] + 1] = obs + } + } + panel_is_setup = 1 +} + + +`DataFrame' Factor::sort(`DataFrame' data) +{ + assert_msg(rows(data) == num_obs, "invalid data rows") + if (is_sorted) return(data) + panelsetup() + + // For some reason, this is much faster that doing it in-place with collate + return(cols(data)==1 ? data[p] : data[p, .]) +} + + +`Void' Factor::_sort(`DataFrame' data) +{ + if (is_sorted) return(data) + panelsetup() + assert_msg(rows(data) == num_obs, "invalid data rows") + _collate(data, p) +} + + +`DataFrame' Factor::invsort(`DataFrame' data) +{ + assert_msg(rows(data) == num_obs, "invalid data rows") + if (is_sorted) return(data) + panelsetup() + if (inv_p == J(0, 1, .)) inv_p = invorder(p) + + // For some reason, this is much faster that doing it in-place with collate + return(cols(data)==1 ? data[inv_p] : data[inv_p, .]) +} + + +`Void' Factor::store_levels(`Varname' newvar) +{ + `String' type + type = (num_levels<=100 ? "byte" : (num_levels <= 32740 ? "int" : "long")) + __fstore_data(levels, newvar, type, touse) +} + + +`Void' Factor::store_keys(| `Integer' sort_by_keys) +{ + `String' lbl + `Integer' i + `StringRowVector' lbls + `Vector' vl_keys + `StringVector' vl_values + if (sort_by_keys == .) sort_by_keys = 0 + if (st_nobs() != 0 & st_nobs() != num_levels) { + _error(198, "cannot save keys in the original dataset") + } + if (st_nobs() == 0) { + st_addobs(num_levels) + } + assert(st_nobs() == num_levels) + + // Add label definitions + lbls = asarray_keys(vl) + for (i = 1; i <= length(lbls); i++) { + lbl = lbls[i] + vl_keys = asarray(asarray(vl, lbl), "keys") + vl_values = asarray(asarray(vl, lbl), "values") + st_vlmodify(lbl, vl_keys, vl_values) + } + + // Add variables + if (substr(vartypes[1], 1, 3) == "str") { + st_sstore(., st_addvar(vartypes, varlist, 1), keys) + } + else { + st_store(., st_addvar(vartypes, varlist, 1), keys) + } + + // Add formats, var labels, value labels + for (i = 1; i <= length(varlist); i++) { + st_varformat(varlist[i], varformats[i]) + st_varlabel(varlist[i], varlabels[i]) + if (st_isnumvar(varlist[i])) { + st_varvaluelabel(varlist[i], varvaluelabels[i]) + } + } + + // Sort + if (sort_by_keys) { + stata(sprintf("sort %s", invtokens(varlist))) + } +} + + +`Boolean' Factor::nested_within(`DataCol' x) +{ + `Integer' i, j + `DataCell' val, prev_val, mv + `DataCol' y + + mv = missingof(x) + y = J(num_levels, 1, mv) + assert(rows(x) == num_obs) + + assert(!anyof(x, mv)) + //assert(eltype(x)=="string" | eltype(x)=="real") + //if (eltype(x)=="string") { + // assert_msg(!anyof(x, ""), "string vector has missing values") + //} + //else { + // assert_msg(!hasmissing(x), "real vector has missing values") + //} + + for (i = 1; i <= num_obs; i++) { + j = levels[i] // level of the factor associated with obs. i + prev_val = y[j] // value of x the last time this level appeared + if (prev_val == mv) { + y[j] = x[i] + } + else if (prev_val != x[i]) { + return(0) + } + } + return(1) +} + + +`Boolean' Factor::equals(`Factor' F) +{ + if (num_obs != F.num_obs) return(0) + if (num_levels != F.num_levels) return(0) + if (keys != F.keys) return(0) + if (counts != F.counts) return(0) + if (levels != F.levels) return(0) + return(1) +} + + +`Void' Factor::keep_if(`Vector' mask) +{ + drop_obs(`selectindex'(!mask)) +} + + +`Void' Factor::drop_if(`Vector' mask) +{ + drop_obs(`selectindex'(mask)) +} + + +`Void' Factor::keep_obs(`Vector' idx) +{ + `Vector' tmp + tmp = J(num_obs, 1, 1) + tmp[idx] = J(rows(idx), 1, 0) + drop_obs(`selectindex'(tmp)) +} + + +`Void' Factor::drop_obs(`Vector' idx) +{ + `Integer' i, j, num_dropped_obs + `Vector' offset + + // assert(all(idx :>0)) + // assert(all(idx :<=num_obs)) + + if (counts == J(0, 1, .)) { + _error(123, "drop_obs() requires the -counts- vector") + } + + num_dropped_obs = rows(idx) + if (num_dropped_obs==0) return + + // Decrement F.counts to reflect dropped observations + offset = levels[idx] // warning: variable will be reused later + assert(rows(offset)==num_dropped_obs) + for (i = 1; i <= num_dropped_obs; i++) { + j = offset[i] + counts[j] = counts[j] - 1 + } + // assert(all(counts :>= 0)) + + // Update contents of F based on just idx and the updated F.counts + __inner_drop(idx) +} + + +// This is an internal method that updates F based on +// i) the list of dropped obs, ii) the *already updated* F.counts +`Void' Factor::__inner_drop(`Vector' idx) +{ + `Vector' dropped_levels, offset + `Integer' num_dropped_obs, num_dropped_levels + + num_dropped_obs = rows(idx) + + // Levels that have a count of 0 are now dropped + dropped_levels = `selectindex'(!counts) // select i where counts[i] == 0 + // if we use rows() instead of length(), dropped_levels would be J(1,0,.) instead of J(0,1,.) + // and we get num_dropped_levels=1 instead of num_dropped_levels=0 + num_dropped_levels = length(dropped_levels) + + // Need to decrement F.levels to reflect that we have fewer levels + // (This is the trickiest part) + offset = J(num_levels, 1, 0) + if (offset != 0) { + offset[dropped_levels] = J(num_dropped_levels, 1, 1) + offset = runningsum(offset) + levels = levels - offset[levels] + } + + // Remove the obs of F.levels that were dropped + levels[idx] = J(num_dropped_obs, 1, .) + levels = select(levels, levels :!= .) + + // Update the remaining properties + num_obs = num_obs - num_dropped_obs + num_levels = num_levels - num_dropped_levels + if (keys != J(0, 1, .)) keys = select(keys, counts) + counts = select(counts, counts) // must be at the end! + + // Clear these out to prevent mistakes + p = J(0, 1, .) + inv_p = J(0, 1, .) + info = J(0, 2, .) + panel_is_setup = 0 +} + + +// KEPT ONLY FOR BACKWARDS COMPAT +`Vector' Factor::drop_singletons(| `Vector' fweight, + `Boolean' zero_threshold) +{ + `Integer' num_singletons + `Vector' mask, idx + `Boolean' has_fweight + `Vector' weighted_counts + + // - By default, this drops all singletons (obs where F.counts==1) + // - If fweights are provided, we'll only drop those singletons with fweight of 1 + // - As a hack, if zero_threshold==1, we'll drop singletons AND all obs where + // "weighted_counts" (actually depvar) is zero + // Also, we multiply by counts so we can track how many actual obs were dropped + + if (zero_threshold == .) zero_threshold = 0 + + if (counts == J(0, 1, .)) { + _error(123, "drop_singletons() requires the -counts- vector") + } + + has_fweight = (args()>=1 & fweight != .) + + if (has_fweight) { + assert(rows(fweight)==num_obs) + this.panelsetup() + weighted_counts = `panelsum'(this.sort(fweight), this.info) + if (zero_threshold) { + mask = (!weighted_counts :| (counts :== 1)) :* counts + } + else { + mask = weighted_counts :== 1 + } + } + else { + mask = (counts :== 1) + } + + num_singletons = sum(mask) + if (num_singletons == 0) return(J(0, 1, .)) + counts = counts - mask + idx = `selectindex'(mask[levels, .]) + + // Update and overwrite fweight + if (has_fweight) { + fweight = num_singletons == num_obs ? J(0, 1, .) : select(fweight, (!mask)[levels]) + } + + // Update contents of F based on just idx and the updated F.counts + __inner_drop(idx) + return(idx) +} + + +`Boolean' Factor::is_id() +{ + if (counts == J(0, 1, .)) { + _error(123, "is_id() requires the -counts- vector") + } + return(allof(counts, 1)) +} + + +`Vector' Factor::intersect(`Vector' y, + | `Boolean' integers_only, + `Boolean' verbose) +{ + `Factor' F + `Vector' index, mask + + if (integers_only == .) integers_only = 0 + if (verbose == .) verbose = 0 + + assert_msg(keys != J(0, 1, .), "must have set save_keys==1") + F = _factor(keys \ y, integers_only, verbose, "", 0, 0, ., 0) + // The code above does the same as _factor(keys\y) but faster + + // Create a mask equal to 1 where the value of Y is in F.keys + mask = J(F.num_levels, 1, 0) + index = F.levels[| 1 \ rows(keys) |] // levels to exclude + mask[index] = J(rows(keys), 1, 1) + + index = F.levels[| rows(keys)+1 \ . |] + mask = mask[index] // expand mask + return(mask) +} + + +`Void' Factor::cleanup_before_saving() +{ + this.vl = this.extra = . +} + + +// Main functions ------------------------------------------------------------- + +`Factor' factor(`Varlist' varnames, + | `DataCol' touse, // either string varname or a numeric index + `Boolean' verbose, + `String' method, + `Boolean' sort_levels, + `Boolean' count_levels, + `Integer' hash_ratio, + `Boolean' save_keys) +{ + `Factor' F + `Varlist' vars + `DataFrame' data + `Integer' i, k + `Boolean' integers_only + `Boolean' touse_is_selectvar + `String' var, lbl + `Dict' map + `Vector' keys + `StringVector' values + + if (args()<2 | touse == "") touse = . + + if (strlen(invtokens(varnames))==0) { + printf("{err}factor() requires a variable name: %s") + exit(102) + } + + vars = tokens(invtokens(varnames)) + k = cols(vars) + + // touse is a string with the -touse- variable (a 0/1 mask), unless + // we use an undocumented feature where it is an observation index + if (eltype(touse) == "string") { + assert_msg(orgtype(touse) == "scalar", "touse must be a scalar string") + assert_msg(st_isnumvar(touse), "touse " + touse + " must be a numeric variable") + touse_is_selectvar = 1 + } + else { + touse_is_selectvar = 0 + } + + if (method=="gtools") { + // Warning: touse can't be a vector + if (eltype(touse)=="real") { + assert_msg(touse == ., "touse must be a variable name") + touse = "" + } + F = __factor_gtools(vars, touse, verbose, + sort_levels, count_levels, save_keys) + } + else { + data = __fload_data(vars, touse, touse_is_selectvar) + integers_only = varlist_is_integers(vars, data) // Are the variables integers (so maybe we can use the fast hash)? + F = _factor(data, integers_only, verbose, method, + sort_levels, count_levels, hash_ratio, + save_keys, + vars, touse) + } + + F.sortedby = tokens(st_macroexpand("`" + ": sortedby" + "'")) + + if (!F.is_sorted & cols(F.sortedby)) { + i = min((k, cols(F.sortedby))) + F.is_sorted = vars == F.sortedby[1..i] + } + + if (!F.is_sorted & integers_only & cols(data)==1 & rows(data)>1) { + F.is_sorted = all( data :<= (data[| 2, 1 \ rows(data), 1 |] \ .) ) + } + F.varlist = vars + if (touse_is_selectvar & touse!=.) F.touse = touse + F.varformats = F.varlabels = F.varvaluelabels = F.vartypes = J(1, cols(vars), "") + F.vl = asarray_create("string", 1) + + for (i = 1; i <= k; i++) { + var = vars[i] + F.varformats[i] = st_varformat(var) + F.varlabels[i] = st_varlabel(var) + F.vartypes[i] = st_vartype(var) + F.varvaluelabels[i] = lbl = st_varvaluelabel(var) + if (lbl != "") { + if (st_vlexists(lbl)) { + pragma unset keys + pragma unset values + st_vlload(lbl, keys, values) + map = asarray_create("string", 1) + asarray(map, "keys", keys) + asarray(map, "values", values) + asarray(F.vl, lbl, map) + } + } + } + return(F) +} + + +`Factor' _factor(`DataFrame' data, + | `Boolean' integers_only, + `Boolean' verbose, + `String' method, + `Boolean' sort_levels, + `Boolean' count_levels, + `Integer' hash_ratio, + `Boolean' save_keys, + `Varlist' vars, // hack + `DataCol' touse) // hack +{ + `Factor' F + `Integer' num_obs, num_vars + `Integer' i + `Integer' limit0 + `Integer' size0, size1, dict_size, max_numkeys1 + `Matrix' min_max + `RowVector' delta + `String' msg, base_method + + if (integers_only == .) integers_only = 0 + if (verbose == .) verbose = 0 + if (method == "") method = "mata" + if (sort_levels == .) sort_levels = 1 + if (count_levels == .) count_levels = 1 + if (save_keys == .) save_keys = 1 + + // Note: Pick a sensible hash ratio; smaller means more collisions + // but faster lookups and less memory usage + + base_method = method + msg = "invalid method: " + method + assert_msg(anyof(("mata", "hash0", "hash1"), method), msg) + + num_obs = rows(data) + num_vars = cols(data) + assert_msg(num_obs > 0, "no observations") + assert_msg(num_vars > 0, "no variables") + assert_msg(count_levels == 0 | count_levels == 1, "count_levels") + assert_msg(save_keys == 0 | save_keys == 1, "save_keys") + + // Compute upper bound for number of levels + size0 = . + if (integers_only) { + // We must nest the conditions; else they will fail with strings + if (all(data:<=.)) { + min_max = colminmax(data) + delta = 1 :+ min_max[2, .] - min_max[1, .] + (colmissing(data) :> 0) + for (i = size0 = 1; i <= num_vars; i++) { + size0 = size0 * delta[i] + } + } + } + + max_numkeys1 = min((size0, num_obs)) + if (hash_ratio == .) { + if (size0 < 2 ^ 16) hash_ratio = 5.0 + else if (size0 < 2 ^ 20) hash_ratio = 3.0 + else hash_ratio = 1.3 // Standard hash table load factor + } + msg = sprintf("invalid hash ratio %5.1f", hash_ratio) + assert_msg(hash_ratio > 1.0, msg) + size1 = ceil(hash_ratio * max_numkeys1) + size1 = max((size1, 2 ^ 10)) // at least + + if (size0 == .) { + if (method == "hash0") { + printf("{txt}method hash0 cannot be applied, using hash1\n") + } + method = "hash1" + } + else if (method == "mata") { + limit0 = 2 ^ 26 // 2 ^ 28 is 1GB; be careful with memory!!! + // Pick hash0 if it uses less space than hash1 + // (b/c it has no collisions and is sorted at no extra cost) + method = (size0 < limit0) | (size0 < size1) ? "hash0" : "hash1" + } + + dict_size = (method == "hash0") ? size0 : size1 + // Mata hard coded limit! (2,147,483,647 rows) + assert_msg(dict_size <= 2 ^ 31, "dict size exceeds Mata limits") + + // Hack: alternative approach + // all(delta :< num_obs) --> otherwise we should just run hash1 + if (base_method == "mata" & method == "hash1" & integers_only & num_vars > 1 & cols(vars)==num_vars & num_obs > 1e5 & all(delta :< num_obs)) { + F = _factor_alt(vars[1], vars[2..num_vars], touse, verbose, sort_levels, count_levels, save_keys) + method = "join" + } + else if (method == "hash0") { + F = __factor_hash0(data, verbose, dict_size, count_levels, min_max, save_keys) + } + else if (method == "hash1"){ + F = __factor_hash1(data, verbose, dict_size, sort_levels, max_numkeys1, save_keys) + if (!count_levels) F.counts = J(0, 1, .) + } + else { + assert(0) + } + + F.method = method + + F.num_obs = num_obs + assert_msg(rows(F.levels) == F.num_obs & cols(F.levels) == 1, "levels") + if (save_keys==1) assert_msg(rows(F.keys) == F.num_levels, "keys") + if (count_levels) { + assert_msg(rows(F.counts)==F.num_levels & cols(F.counts)==1, "counts") + } + if (verbose) { + msg = "{txt}(obs: {res}%s{txt}; levels: {res}%s{txt};" + printf(msg, strofreal(num_obs, "%12.0gc"), strofreal(F.num_levels, "%12.0gc")) + msg = "{txt} method: {res}%s{txt}; dict size: {res}%s{txt})\n" + printf(msg, method, method == "join" ? "n/a" : strofreal(dict_size, "%12.0gc")) + } + F.is_sorted = F.num_levels == 1 // if there is only one level it is already sorted + return(F) +} + + +`Factor' _factor_alt(`Varname' first_var, + `Varlist' other_vars, + `DataCol' touse, + `Boolean' verbose, + `Boolean' sort_levels, + `Boolean' count_levels, + `Boolean' save_keys) +{ + `Factor' F, F1, F2 + F1 = factor(first_var, touse, verbose, "mata", sort_levels, 1, ., save_keys) + F2 = factor(other_vars, touse, verbose, "mata", sort_levels, count_levels, ., save_keys) + F = join_factors(F1, F2, count_levels, save_keys) + return(F) +} + + +`Factor' join_factors(`Factor' F1, + `Factor' F2, + | `Boolean' count_levels, + `Boolean' save_keys, + `Boolean' levels_as_keys) +{ + `Factor' F + `Varlist' vars + `Boolean' is_sorted // is sorted by (F1.varlist F2.varlist) + `Integer' num_levels, old_num_levels, N, M, i, j + `Integer' levels_start, levels_end + `Integer' v, last_v, c + `Integer' num_keys1, num_keys2 + `RowVector' key_idx + `Vector' Y, p, y, levels, counts, idx + `DataFrame' keys + + if (save_keys == .) save_keys = 1 + if (count_levels == .) count_levels = 1 + if (levels_as_keys == .) levels_as_keys = 0 + + if (save_keys & !levels_as_keys & !( rows(F1.keys) & rows(F2.keys)) ) { + _error(123, "join_factors() with save_keys==1 requires the -keys- vector") + } + + is_sorted = 0 + if (F1.sortedby == F2.sortedby & cols(F1.sortedby) > 0) { + vars = F1.varlist, F2.varlist + i = min(( cols(vars) , cols(F1.sortedby) )) + is_sorted = vars == F1.sortedby[1..i] + } + + F1.panelsetup() + Y = F1.sort(F2.levels) + levels = J(F1.num_obs, 1, 0) + if (count_levels | save_keys) counts = J(F1.num_obs, 1, 1) + + if (save_keys) { + if (levels_as_keys) { + keys = J(F1.num_obs, 2, .) + } + else { + num_keys1 = cols(F1.keys) + num_keys2 = cols(F2.keys) + key_idx = (num_keys1 + 1)..(num_keys1 + num_keys2) + keys = J(F1.num_obs, num_keys1 + num_keys2, missingof(F1.keys)) + } + } + N = F1.num_levels + levels_end = num_levels = 0 + + for (i = 1; i <= N; i++) { + y = panelsubmatrix(Y, i, F1.info) + M = rows(y) + old_num_levels = num_levels + + if (M == 1) { + // Case where i matched with only one key of F2 + + levels[++levels_end] = ++num_levels + if (save_keys) { + if (levels_as_keys) { + keys[num_levels, .] = (i, y) + } + else { + keys[num_levels, .] = F1.keys[i, .] , F2.keys[y, .] + } + } + // no need to update counts as it's ==1 + } + else { + // Case where i matched with more than one key of F2 + + // Compute F.levels + if (!is_sorted) { + p = order(y, 1) + y = y[p] + } + idx = runningsum(1 \ (y[2::M] :!= y[1::M-1])) + levels_start = levels_end + 1 + levels_end = levels_end + M + if (!is_sorted) { + levels[|levels_start \ levels_end |] = num_levels :+ idx[invorder(p)] + } + else { + levels[|levels_start \ levels_end |] = num_levels :+ idx + } + + // Compute F.counts + if (count_levels | save_keys) { + last_v = y[1] + c = 1 + for (j=2; j<=M; j++) { + v = y[j] + if (v==last_v) { + c++ + } + else { + counts[++num_levels] = c + c = 1 + + if (save_keys) { + if (levels_as_keys) { + keys[num_levels, .] = (i, last_v) + } + else { + keys[num_levels , key_idx] = F2.keys[last_v, .] + } + } + } + last_v = v // swap? + } + if (c) { + counts[++num_levels] = c + + if (save_keys) { + if (levels_as_keys) { + keys[num_levels, .] = (i, y[M]) + } + else { + keys[num_levels , key_idx] = F2.keys[y[M], .] + } + } + + } + } + else { + num_levels = num_levels + idx[M] + } + + // F.keys: compute the keys for the first factor + if (save_keys & !levels_as_keys) { + keys[| old_num_levels + 1 , 1 \ num_levels , num_keys1 |] = J(idx[M], 1, F1.keys[i, .]) + } + } // end case where M>1 + } // end for + + F = Factor() + F.num_obs = F1.num_obs + F.num_levels = num_levels + F.method = "join" + F.sortedby = F1.sortedby + F.varlist = vars + F.levels_as_keys = levels_as_keys + + if (!is_sorted) levels = F1.invsort(levels) + if (count_levels) counts = counts[| 1 \ num_levels |] + swap(F.levels, levels) + if (save_keys) { + keys = keys[| 1 , 1 \ num_levels , . |] + swap(F.keys, keys) + } + swap(F.counts, counts) + + // Extra stuff (labels, etc) + F.is_sorted = is_sorted + return(F) +} + + +`Factor' __factor_hash0( + `Matrix' data, + `Boolean' verbose, + `Integer' dict_size, + `Boolean' count_levels, + `Matrix' min_max, + `Boolean' save_keys) +{ + `Factor' F + `Integer' K, i, num_levels, num_obs, j + `Vector' hashes, dict, levels + `RowVector' min_val, max_val, offsets, has_mv + `Matrix' keys + `Vector' counts + + // assert(all(data:<=.)) // no .a .b ... + + K = cols(data) + num_obs = rows(data) + has_mv = (colmissing(data) :> 0) + min_val = min_max[1, .] + max_val = min_max[2, .] + has_mv + + // Build the hash: + // Example with K=2: + // hash = (col1 - min(col1)) * (max_col2 - min_col2 + 1) + (col2 - min_col2) + + offsets = J(1, K, 1) + // 2x speedup when K = 1 wrt the formula with [., K] + if (K == 1) { + hashes = editmissing(data, max_val) :- (min_val - 1) + } + else { + hashes = editmissing(data[., K], max_val[K]) :- (min_val[K] - 1) + for (i = K - 1; i >= 1; i--) { + offsets[i] = offsets[i+1] * (max_val[i+1] - min_val[i+1] + 1) + hashes = hashes + (editmissing(data[., i], max_val[i]) :- min_val[i]) :* offsets[i] + } + } + assert(offsets[1] * (max_val[1] - min_val[1] + 1) == dict_size) + + + // Once we have the -hashes- vector, these are the steps: + // 1) Create a -dict- vector with more obs. than unique values (our hash table) + // 2) Mark the slots of dict that map to a hash "dict[hashes] = J..." + // 3) Get the obs. of those slots "levels = selectindex(dict)" + // Note that "num_levels = rows(levels)" + // Also, at this point -levels- is just the sorted unique values of -hashes- + // 4) We can get the keys based on levels by undoing the hash + // 5) To create new IDs, do this trick: + // dict[levels] = 1::num_levels + // levels = dict[hashes] + + // Build the new keys + dict = J(dict_size, 1, 0) + // It's faster to do dict[hashes] than dict[hashes, .], + // but that fails if dict is 1x1 + if (length(dict) > 1) { + dict[hashes] = J(num_obs, 1, 1) + } + else { + dict = 1 + } + + levels = `selectindex'(dict) + + num_levels = rows(levels) + dict[levels] = 1::num_levels + + if (save_keys) { + if (K == 1) { + keys = levels :+ (min_val - 1) + if (has_mv) keys[num_levels] = . + } + else { + keys = J(num_levels, K, .) + levels = levels :- 1 + for (i = 1; i <= K; i++) { + keys[., i] = floor(levels :/ offsets[i]) + levels = levels - keys[., i] :* offsets[i] + if (has_mv[i]) keys[., i] = editvalue(keys[., i], max_val[i] - min_val[i], .) + } + keys = keys :+ min_val + } + } + + // faster than "levels = dict[hashes, .]" + levels = rows(dict) > 1 ? dict[hashes] : hashes + + hashes = dict = . // Save memory + + if (count_levels) { + // We need a builtin function that does: increment(counts, levels) + // Using decrement+while saves us 10% time wrt increment+for + counts = J(num_levels, 1, 0) + i = num_obs + 1 + while (--i) { + j = levels[i] + counts[j] = counts[j] + 1 + } + // maybe replace this with a permutation of levels plus counts[j] = i-last_i + } + + F = Factor() + F.num_levels = num_levels + if (save_keys) swap(F.keys, keys) + swap(F.levels, levels) + swap(F.counts, counts) + return(F) +} + + +`Factor' __factor_hash1( + `DataFrame' data, + `Boolean' verbose, + `Integer' dict_size, + `Boolean' count_levels, + `Integer' max_numkeys1, + `Boolean' save_keys) +{ + if (cols(data)==1) { + return(__factor_hash1_1(data, verbose, dict_size, count_levels, max_numkeys1, save_keys)) + } + else { + return(__factor_hash1_0(data, verbose, dict_size, count_levels, max_numkeys1, save_keys)) + } +} + +end diff --git a/110/replication_package/replication/ado/plus/f/ftools_plugin.mata b/110/replication_package/replication/ado/plus/f/ftools_plugin.mata new file mode 100644 index 0000000000000000000000000000000000000000..28383a3e107136966dd839779fe2fd3ac956a319 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools_plugin.mata @@ -0,0 +1,74 @@ +mata: + +// Call gtools plugin +`Factor' __factor_gtools( + `Varlist' vars, + `String' touse, + `Boolean' verbose, + `Boolean' sort_levels, + `Boolean' count_levels, + `Boolean' save_keys) +{ + `Factor' F + `Integer' num_vars, num_levels, num_obs + `String' levels_var, tag_var, counts_var, cmd, if_cmd, counts_cmd + `Vector' levels, counts, idx + `Matrix' keys + + // Options + if (verbose == .) verbose = 0 + if (sort_levels == .) sort_levels = 1 + if (count_levels == .) count_levels = 1 + if (save_keys == .) save_keys = 1 + + assert_msg(count_levels == 0 | count_levels == 1, "count_levels") + assert_msg(save_keys == 0 | save_keys == 1, "save_keys") + + // Load data, based on output from -gegen group- + levels_var = st_tempname() + counts_var = st_tempname() + + // Run gegen group() from Stata + if_cmd = touse == "" ? "" : " if " + touse + counts_cmd = count_levels ? sprintf("counts(%s) fill(data)", counts_var) : "" + cmd = "gegen long %s = group(%s)%s, missing %s" + cmd = sprintf(cmd, levels_var, invtokens(vars), if_cmd, counts_cmd) + if (verbose) printf(cmd + "\n") + stata(cmd) + + num_levels = st_numscalar("r(J)") + num_obs = st_numscalar("r(N)") + num_vars = cols(vars) + levels = st_data(., levels_var, touse) + + + if (count_levels) { + counts = st_data( (1,num_levels) , counts_var, .) + } + + if (save_keys) { + idx = J(num_levels, 1, .) + idx[levels] = 1::num_obs + keys = st_data(idx, vars, touse) + } + + if (count_levels) assert_msg(num_levels == rows(counts), "num_levels") + assert_msg(num_obs == rows(levels), "num_obs") + assert_msg(num_obs > 0, "no observations") + assert_msg(num_vars > 0, "no variables") + + F = Factor() + F.num_levels = num_levels + F.num_obs = num_obs + if (save_keys) swap(F.keys, keys) + swap(F.levels, levels) + swap(F.counts, counts) + F.method = "gtools" + assert_msg(rows(F.levels) == F.num_obs & cols(F.levels) == 1, "levels") + if (save_keys==1) assert_msg(rows(F.keys) == F.num_levels, "keys") + if (count_levels) assert_msg(rows(F.counts)==F.num_levels & cols(F.counts)==1, "counts") + F.is_sorted = 0 + return(F) +} + +end diff --git a/110/replication_package/replication/ado/plus/f/ftools_type_aliases.mata b/110/replication_package/replication/ado/plus/f/ftools_type_aliases.mata new file mode 100644 index 0000000000000000000000000000000000000000..fdde6eebca9b8b11172702493af46738fa6163e4 --- /dev/null +++ b/110/replication_package/replication/ado/plus/f/ftools_type_aliases.mata @@ -0,0 +1,61 @@ +// Type aliases ------------------------------------------------------------- + + // Numeric (scalars) + loc Boolean real scalar + loc Integer real scalar + loc Real real scalar + + // Numeric (matrices) + loc Vector real colvector + loc RowVector real rowvector + loc Matrix real matrix + + // String (scalars) + loc String string scalar + + // String (matrices) + loc StringVector string colvector + loc StringRowVector string rowvector + loc StringMatrix string matrix + + // Stata-specific + loc Varname string scalar + loc Varlist string rowvector // after tokens() + + loc Variable real colvector // N * 1 + loc Variables real matrix // N * K + + loc DataFrame transmorphic matrix // N * K + loc DataCol transmorphic colvector // N * 1 + loc DataRow transmorphic rowvector // 1 * K + loc DataCell transmorphic scalar // 1 * 1 + + // Classes + loc Handle transmorphic scalar // General scalar + loc Anything transmorphic matrix // General matrix + loc Dict transmorphic scalar // Use for asarray() + loc Factor class Factor scalar + + // Pointers + local FunctionP pointer(`Variables' function) scalar + + // Misc + loc Void void + + +// Backwards compatibility for Mata functions ------------------------------- + + loc selectindex selectindex + loc panelsum panelsum + + if (c(stata_version) < 13) { + cap mata: boottestVersion() + if (c(rc)) { + di as err "Error: Stata versions 12 or earlier require the boottest package" + di as err "To install, from within Stata type " _c + di as smcl "{stata ssc install boottest :ssc install boottest}" + exit 601 + } + loc selectindex boottest_selectindex + loc panelsum _panelsum + } diff --git a/110/replication_package/replication/ado/plus/g/geodist.ado b/110/replication_package/replication/ado/plus/g/geodist.ado new file mode 100644 index 0000000000000000000000000000000000000000..a01a7e2dd32ad8e604fa458057b7b0eaed8fa265 --- /dev/null +++ b/110/replication_package/replication/ado/plus/g/geodist.ado @@ -0,0 +1,383 @@ +*! version 1.1.0 18jun2019 Robert Picard, picard@netbox.com +program define geodist, rclass + + version 9 + + syntax anything [in] [if], /// + [ /// + Generate(name) /// + MIles /// + Radius(string) /// + Sphere /// + Ellipsoid(string) /// + Maxiter(integer 25) /// undocumented + ] + + + if ("`sphere'" != "" | "`radius'" != "") & "`ellipsoid'" != "" { + dis as err "You must choose either a sphere or ellipsoid model" + exit 198 + } + + // each lat1 lon1 lat2 lon2 is either variable, scalar, or a number (as string) + // coordinates are expected in signed decimal degrees, east and north positive + tokenize `anything' + if "`5'" != "" { + dis as err "unexpected extra text: `5'" + exit 198 + } + if "`4'" == "" { + dis as err "You must provide lat1 lon1 lat2 lon2" + exit 198 + } + + local hasvar 0 + local allmissing 0 + local j 0 + + forvalues i = 1/2 { + foreach l in lat lon { + local ++j + local `l'`i' ``j'' + + cap confirm string var ``j'' + if !_rc { + dis as err "``j'' is a string variable" + exit 109 + } + + cap confirm numeric var ``j'' + if !_rc { + + local hasvar 1 + + if "`touse'" == "" marksample touse, novar + markout `touse' ``j'' + + sum ``j'' if `touse', meanonly + if r(N) == 0 local allmissing 1 + else { + if "`l'" == "lat" & (r(max) > 90 | r(min) < -90) { + dis as err "latitude `i' must be between -90 and 90" + exit 198 + } + if "`l'" == "lon" & (r(max) > 180 | r(min) < -180) { + dis as err "longitude `i' must be between -180 and 180" + exit 198 + } + } + + } + else { + + cap confirm number ``j'' + local isnum = _rc == 0 + + cap confirm scalar ``j'' + local isscalar = _rc == 0 + if `isscalar' { + tempname what + cap scalar `what' = ``j'' / 1 + if _rc { + dis as err "``j'' is a string scalar" + exit 109 + } + } + + cap local is_missing = mi(``j'') + if _rc local is_missing 0 + + if !(`isnum' | `isscalar' | `is_missing') { + dis as err "Was expecting a number or a numeric scalar " /// + "instead of -``j''-" + exit 198 + } + + if `is_missing' local allmissing 1 + else { + if "`l'" == "lat" & abs(``j'') > 90 { + dis as err "latitude `i' must be between -90 and 90" + exit 198 + } + if "`l'" == "lon" & abs(``j'') > 180 { + dis as err "longitude `i' must be between -180 and 180" + exit 198 + } + } + } + } + } + + + if !`hasvar' { + if "`if'`in'" != "" { + dis as err "lat/lon are not variables, if or in option not allowed" + exit 198 + } + if "`generate'" != "" { + dis as err "lat/lon are not variables, nothing to generate" + exit 198 + } + if `allmissing' { + return scalar distance = . + exit 0 + } + } + else { + if "`generate'" == "" { + dis as err "you must specify gen(newvar) option" + exit 198 + } + if `allmissing' { + qui gen double `generate' = . + exit 0 + } + } + + if "`miles'" != "" { + local km_to_miles / 1.609344 + local units miles + } + else local units km + + tempname d2r + scalar `d2r' = c(pi) / 180 + + if "`sphere'" != "" | "`radius'" != "" { + + // default to mean earth radius + // see http://en.wikipedia.org/wiki/Earth_radius#Mean_radii + if "`radius'" == "" local radius 6371 + + if `hasvar' { + + // use haversine formula + // http://www.movable-type.co.uk/scripts/gis-faq-5.1.html + qui gen double `generate' = 2 * asin(min(1,sqrt( /// + sin((`lat2' - `lat1') * `d2r' / 2)^2 + /// + cos(`lat1' * `d2r') * cos(`lat2' * `d2r') * /// + sin((`lon2' - `lon1') * `d2r' / 2)^2))) /// + * `radius' `km_to_miles' if `touse' + + } + else { + tempname generate + scalar `generate' = 2 * asin(min(1,sqrt( /// + sin((`lat2' - `lat1') * `d2r' / 2)^2 + /// + cos(`lat1' * `d2r') * cos(`lat2' * `d2r') * /// + sin((`lon2' - `lon1') * `d2r' / 2)^2))) /// + * `radius' `km_to_miles' + + dis as txt "Great-circle distance " /// + "(haversine formula, radius of `radius'km) = " /// + as res `generate' " `units'" + return scalar distance = `generate' + } + + } + else { + + // allow for any reference ellipsoid by allowing user-specified parameters + tempname a b f + if "`ellipsoid'" != "" { + tokenize "`ellipsoid'", parse(" ,") + capture confirm number `1' + local rc = _rc + if "`2'" == "," local 2 `3' + capture confirm number `2' + if _rc | `rc' { + dis as err "the reference ellipsoid parameters (a,f^-1) are not numbers" + dis as err "a = semi-major axis, in meters" + dis as err "f^-1 = reciprocal of flattening ratio" + dis as err "e.g. ellipsoid(6378249.145, 293.465) for Clarke 1880" + exit 198 + } + scalar `a' = `1' + scalar `f' = 1 / `2' + } + else { + // Use WGS 1984 ellipsoid + // source: http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf + // Section 3.2 + scalar `a' = 6378137 // semi-major axis + scalar `f' = 1 / 298.257223563 // Flattening + local ename "WGS 1984" + } + scalar `b' = `a' - `a' * `f' + + + if `hasvar' { + + // implement Vincenty's (1975) inverse solution + // Source: http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf + + // first approximation, equation 13 + tempvar L lambda + qui gen double `L' = `d2r' * (`lon2' - `lon1') if `touse' + qui gen double `lambda' = `L' if `touse' + + // for speed, precompute all sin and cos of U + tempvar U1 U2 sin_U1 sin_U2 cos_U1 cos_U2 + qui gen double `U1' = atan((1-`f') * tan(`d2r' *`lat1')) if `touse' + qui gen double `U2' = atan((1-`f') * tan(`d2r' *`lat2')) if `touse' + qui gen double `sin_U1' = sin(`U1') if `touse' + qui gen double `sin_U2' = sin(`U2') if `touse' + qui gen double `cos_U1' = cos(`U1') if `touse' + qui gen double `cos_U2' = cos(`U2') if `touse' + drop `U1' `U2' + + // Find lambda by iteration; mark out observation when converged + tempvar cont + gen `cont' = `touse' + foreach v in sin_sigma cos_sigma sigma sin_alpha /// + cos_sq_alpha cos_2sigma_m C lambda_old { + tempvar `v' + qui gen double ``v'' = . + } + local iter 0 + local more 1 + while `++iter' < `maxiter' & `more' { + // equation 14 + qui replace `sin_sigma' = sqrt((`cos_U2' * sin(`lambda'))^2 + /// + (`cos_U1' * `sin_U2' - `sin_U1' * `cos_U2' * cos(`lambda'))^2) /// + if `cont' + // mark out co-incident points + qui replace `cont' = 0 if `sin_sigma' == 0 + // equation 15 + qui replace `cos_sigma' = `sin_U1' * `sin_U2' + `cos_U1' * /// + `cos_U2' * cos(`lambda') if `cont' + // equation 16 + qui replace `sigma' = atan2(`sin_sigma',`cos_sigma') if `cont' + // equation 17 + qui replace `sin_alpha' = `cos_U1' * `cos_U2' * sin(`lambda') / /// + `sin_sigma' if `cont' + // use trig identity to obtain cos^2 alpha + qui replace `cos_sq_alpha' = 1 - `sin_alpha'^2 if `cont' + // equation 18 + qui replace `cos_2sigma_m' = `cos_sigma' - 2 * `sin_U1' * /// + `sin_U2' / `cos_sq_alpha' if `cont' + // adjust if both points are on the equator + qui replace `cos_2sigma_m' = 0 if `cos_sq_alpha' == 0 & `cont' + // compute new lambda and compare to previous one + qui replace `lambda_old' = `lambda' if `cont' + // equation 10 + qui replace `C' = `f' / 16 * `cos_sq_alpha' * /// + (4 + `f' * (4 - 3 * `cos_sq_alpha')) if `cont' + // equation 11 + qui replace `lambda' = `L' + (1 - `C') * `f' * `sin_alpha' * /// + ( `sigma' + `C' * `sin_sigma' * (`cos_2sigma_m' + /// + `C'*`cos_sigma' * (-1 + 2* `cos_2sigma_m'^2))) if `cont' + // mark out observations that have converged + qui replace `cont' = 0 if abs(`lambda'-`lambda_old') <= 1e-12 + // we are done if all observations have converged + sum `cont', meanonly + local more = r(max) + } + drop `L' `sin_U1' `sin_U2' `cos_U1' `cos_U2' `lambda' `sin_alpha' `C' `lambda_old' + + tempvar u_sq A B delta_sigma + qui gen double `u_sq' = `cos_sq_alpha' * (`a'^2 - `b'^2) / (`b'^2) if `touse' + // equation 3 + qui gen double `A' = 1 + `u_sq' / 16384 * (4096 + /// + `u_sq' * (-768 + `u_sq' * (320 - 175 * `u_sq'))) if `touse' + // equation 4 + qui gen double `B' = `u_sq' / 1024 * (256 + /// + `u_sq' * (-128 + `u_sq' * (74 - 47 * `u_sq'))) if `touse' + // equation 6 + qui gen double `delta_sigma' = `B' * `sin_sigma' * (`cos_2sigma_m' + /// + `B' / 4 * (`cos_sigma' * /// + (-1 + 2 * `cos_2sigma_m'^2) - /// + `B' / 6 * `cos_2sigma_m' * (-3 + 4 * `sin_sigma'^2) * /// + (-3 + 4 * `cos_2sigma_m'^2))) if `touse' + // equation 19; convert to km and then to miles if requested + qui gen double `generate' = `b' * `A' * (`sigma' - `delta_sigma') /// + / 1000 `km_to_miles' if `touse' + // co-incident points were marked out of the iteration loop + qui replace `generate' = 0 if `sin_sigma' == 0 & `touse' + + // use an extended missing value to flag observations that failed to converge + qui replace `generate' = .a if `cont' + qui count if `generate' == .a + if r(N) { + dis as err "Warning: failed to converge due to near-antipodal points" + dis as err "Replaced distance(s) with missing value .a" + dis as err "Number of distance(s) affected = " r(N) + } + + } + else { + + // since there are no variables, do as above but with scalars + tempname L lambda + scalar `L' = `d2r' * (`lon2' - `lon1') + scalar `lambda' = `L' + + tempname U1 U2 sin_U1 sin_U2 cos_U1 cos_U2 + scalar `U1' = atan((1-`f') * tan(`d2r' *`lat1')) + scalar `U2' = atan((1-`f') * tan(`d2r' *`lat2')) + scalar `sin_U1' = sin(`U1') + scalar `sin_U2' = sin(`U2') + scalar `cos_U1' = cos(`U1') + scalar `cos_U2' = cos(`U2') + + foreach v in sin_sigma cos_sigma sigma sin_alpha /// + cos_sq_alpha cos_2sigma_m C lambda_old { + tempname `v' + } + local iter 0 + local more 1 + while `++iter' < `maxiter' & `more' { + scalar `sin_sigma' = sqrt((`cos_U2' * sin(`lambda'))^2 + /// + (`cos_U1' * `sin_U2' - `sin_U1' * `cos_U2' * cos(`lambda'))^2) + // break out of loop if points are co-incident + if `sin_sigma' == 0 continue, break + scalar `cos_sigma' = `sin_U1' * `sin_U2' + `cos_U1' * /// + `cos_U2' * cos(`lambda') + scalar `sigma' = atan2(`sin_sigma',`cos_sigma') + scalar `sin_alpha' = `cos_U1' * `cos_U2' * sin(`lambda') / `sin_sigma' + scalar `cos_sq_alpha' = 1 - `sin_alpha'^2 + scalar `cos_2sigma_m' = `cos_sigma' - 2 * `sin_U1' * /// + `sin_U2' / `cos_sq_alpha' + // adjust if both points are on the equator + if `cos_sq_alpha' == 0 scalar `cos_2sigma_m' = 0 + scalar `lambda_old' = `lambda' + scalar `C' = `f' / 16 * `cos_sq_alpha' * /// + (4 + `f' * (4 - 3 * `cos_sq_alpha')) + scalar `lambda' = `L' + (1 - `C') * `f' * `sin_alpha' * /// + ( `sigma' + `C' * `sin_sigma' * (`cos_2sigma_m' + /// + `C'*`cos_sigma' * (-1 + 2* `cos_2sigma_m'^2))) + local more = abs(`lambda'-`lambda_old') > 1e-12 + } + + tempname d + if `sin_sigma' == 0 scalar `d' = 0 + else { + tempname u_sq A B delta_sigma d + scalar `u_sq' = `cos_sq_alpha' * (`a'^2 - `b'^2) / (`b'^2) + scalar `A' = 1 + `u_sq' / 16384 * (4096 + /// + `u_sq' * (-768 + `u_sq' * (320 - 175 * `u_sq'))) + scalar `B' = `u_sq' / 1024 * (256 + /// + `u_sq' * (-128 + `u_sq' * (74 - 47 * `u_sq'))) + scalar `delta_sigma' = `B' * `sin_sigma' * (`cos_2sigma_m' + /// + `B' / 4 * (`cos_sigma' * /// + (-1 + 2 * `cos_2sigma_m'^2) - /// + `B' / 6 * `cos_2sigma_m' * (-3 + 4 * `sin_sigma'^2) * /// + (-3 + 4 * `cos_2sigma_m'^2))) + scalar `d' = `b' * `A' * (`sigma' - `delta_sigma') / 1000 `km_to_miles' + } + + if `iter' == `maxiter' { + scalar `d' = .a + dis as err "Warning: failed to converge due to near-antipodal points" + } + else { + dis as txt "`ename' ellipsoid(`=`a'',`=1/`f'') distance = " /// + as res `d' " `units'" + } + return scalar distance = `d' + return scalar iterations = `iter' + + } + } + +end diff --git a/110/replication_package/replication/ado/plus/g/geodist.hlp b/110/replication_package/replication/ado/plus/g/geodist.hlp new file mode 100644 index 0000000000000000000000000000000000000000..29a75f482eb9e7d4cf99d708a52e3b9bd9847b8c --- /dev/null +++ b/110/replication_package/replication/ado/plus/g/geodist.hlp @@ -0,0 +1,300 @@ +{smcl} +{* *! version 1.1.0 20jun2019}{...} +{cmd:help geodist} +{hline} + +{title:Title} + +{phang} +{bf:geodist} {hline 2} Calculates geographical distances. + +{marker syntax}{...} +{title:Syntax} + +{phang} +If one or more lat/lon coordinates are numeric variables + +{p 8 16 2} +{cmd:geodist} +{it:lat1 lon1 lat2 lon2} +{ifin} +{cmd:,} +{opt g:enerate(new_dist_var)} +[{it:options}] + + +{phang} +If all lat/lon coordinates are numeric scalars or numbers + +{p 8 16 2} +{cmd:geodist} +{it:lat1 lon1 lat2 lon2} +{cmd:,} +[{it:options}] + + +{synoptset 20 tabbed}{...} +{synopthdr} +{synoptline} +{syntab:Main} +{synopt:{opt mi:les}}report distances in miles{p_end} + +{syntab:Ellipsoid} +{synopt:{opt e:llipsoid(#1,#2)}}custom ellipsoid parameters {it:(a,f)}{p_end} + +{syntab:Sphere} +{synopt:{opt s:phere}}calculate great-circle distances{p_end} +{synopt:{opt rad:ius(#)}}custom radius {it:#} (in km){p_end} +{synoptline} +{p2colreset}{...} + + +{marker description}{...} +{title:Description} + +{pstd} +{cmd:geodist} calculates +{browse "https://en.wikipedia.org/wiki/Geographical_distance":geographical distances } +by measuring the length of +the shortest path between two points along +the surface of a mathematical model of the earth. + +{pstd} +By default, {cmd:geodist} implements +{browse "https://en.wikipedia.org/wiki/Vincenty%27s_formulae":Vincenty's (1975) formula} +to calculate distances on a +{browse "https://en.wikipedia.org/wiki/Reference_ellipsoid":reference ellipsoid}. +If the {opt sphere} option is specified, +{cmd:geodist} calculates +{browse "https://en.wikipedia.org/wiki/Great-circle_distance":great-circle distances} +using the +{browse "https://en.wikipedia.org/wiki/Haversine_formula":haversine formula}. +Distances on an ellipsoid are more accurate but note that Vincenty's formula may +fail to find a solution for +{browse "https://en.wikipedia.org/wiki/Antipodes":near-antipodal} + points. +The haversine formula is much simpler and runs fast. + +{pstd} +Geographical coordinates must be in signed decimal degrees, +positive for north and east, and negative for south and west. +{browse "https://en.wikipedia.org/wiki/Latitude":Latitudes} +range from -90 to 90 and +{browse "https://en.wikipedia.org/wiki/Longitude":longitudes} +from -180 to 180. +You may specify each {it:lat1 lon1 lat2 lon2} independently using either a numeric variable, +a numeric scalar, or simply a number. +If {it:lat1 lon1 lat2 lon2} include one or more variables, +{cmd:geodist} will calculate a distance for each observation in the sample +and store these in {help newvar:{it:new_dist_var}}. + + +{marker options}{...} +{title:Options} +{dlgtab:Main} +{phang} +{opt mi:les} indicates that distances are to be reported in miles; if omitted, +distances are in kilometers. + +{dlgtab:Ellipsoid} +{phang} +{opt e:llipsoid(#1,#2)} is used to specify an alternate +{browse "https://en.wikipedia.org/wiki/Reference_ellipsoid":reference ellipsoid}. +#1 is the length of the +semi-major axis in meters (equatorial radius) and +#2 is the flattening ratio. +For example, the +{browse "https://en.wikipedia.org/wiki/Earth_ellipsoid#Historical_Earth_ellipsoids":Airy 1830} +reference ellipsoid +can be specified with {opt ellipsoid(6377563.396,299.3249646)}. +If omitted, {cmd:geodist} uses the +{browse "https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84":WGS 1984} +reference ellipsoid, the same used by +{browse "https://en.wikipedia.org/wiki/Global_Positioning_System":GPS} +devices. + +{dlgtab:Sphere} +{phang} +{opt s:phere} requests great-circle distances. + +{phang} +{opt r:adius(#)} specifies that great-circle distances be computed on +a sphere with a radius of {bind:{it:#} km.} +The default is 6371 ({browse "https://en.wikipedia.org/wiki/Earth_radius#Mean_radius":Earth's mean radius}). + + +{marker examples}{...} +{title:Examples} + +{pstd} +You can use {cmd:geodist} to calculate the distance between two points +if you know the latitude/longitude for each. +For instance, the Michigan Stadium is located at +{browse "https://www.google.com/maps/search/?api=1&query=42.265837,-83.748696":42.265837,-83.748696} +and the North Terminal of the Detroit Metro Airport is located at +{browse "https://www.google.com/maps/search/?api=1&query=42.207667,-83.356022":42.207667,-83.356022}. +The following example calculates the distance in miles between these two +points. The first command calculates the distance on an ellipsoid and the +second on a sphere. + +{space 8}{hline 27} {it:example do-file content} {hline 27} +{cmd}{...} +{* example_start - example1}{...} + geodist 42.265837 -83.748696 42.207667 -83.356022, miles + geodist 42.265837 -83.748696 42.207667 -83.356022, miles sphere +{* example_end}{...} +{txt}{...} +{space 8}{hline 80} +{space 8}{it:({stata geodist_run example1 using geodist.hlp:click to run})} + +{pstd} +Note that these are "as the crow flies" distances. +Compare these results with +{browse "https://www.google.com/maps/dir/?api=1&origin=42.265837,-83.748696&destination=42.207667,-83.356022&travelmode=driving":Google Maps driving directions}. + +{pstd} +If you have a dataset of points, you can calculate the distance between each +point and a fixed location. +In the following example, we create a dataset with the location of +four parks near the University of Michigan and then calculate the distance in kilometers +(the default) between each park and the Michigan Stadium. +Note that Stata's {hi:float} data type holds at most 7 digits of accuracy +so coordinates should always be stored as {hi:double}s. + +{space 8}{hline 27} {it:example do-file content} {hline 27} +{cmd}{...} +{* example_start - example2}{...} + version 9.2 + + clear + input long parkid str17 parkname double(lat lon) + 1 "Gallup Park" 42.273170 -83.694174 + 2 "Argo Park" 42.291516 -83.744604 + 3 "Hudson Mills" 42.382194 -83.911197 + 4 "Nichols Arboretum" 42.281123 -83.725575 + end + save "geodist_example.dta" + + geodist 42.265837 -83.748696 lat lon, gen(d) + list +{* erase "geodist_example.dta"}{...} +{* example_end}{...} +{txt}{...} +{space 8}{hline 80} +{space 8}{it:({stata geodist_run example2 using geodist.hlp:click to run})} + +{pstd} +If you have two datasets of points and want to calculate the distance between +each pair of points, you will have to find a way to combine the observations. +Usually, this will involve using {help cross} or {help joinby} to form +all pairwise combinations of points. +You can use the same technique if you have a single dataset of points and +want to calculate the distance to every other point. +For example, here is how to find the nearest neighbor park: + +{space 8}{hline 27} {it:example do-file content} {hline 27} +{cmd}{...} +{* example_start - example3}{...} + version 9.2 + + clear + input long parkid str17 parkname double(lat lon) + 1 "Gallup Park" 42.273170 -83.694174 + 2 "Argo Park" 42.291516 -83.744604 + 3 "Hudson Mills" 42.382194 -83.911197 + 4 "Nichols Arboretum" 42.281123 -83.725575 + end + save "geodist_example.dta" + + * rename all variables and form all pairwise combinations + rename parkid parkid0 + rename parkname parkname0 + rename lat lat0 + rename lon lon0 + cross using "geodist_example.dta" + + * calculate distances and order by distance + geodist lat0 lon0 lat lon, gen(d) + sort parkid0 d parkid + list, sepby(parkid0) + + * drop distance to self and keep the nearest neighbor + drop if parkid0 == parkid + by parkid0: keep if _n == 1 + list +{* erase "geodist_example.dta"}{...} +{* example_end}{...} +{txt}{...} +{space 8}{hline 80} +{space 8}{it:({stata geodist_run example3 using geodist.hlp:click to run})} + +{pstd} +The above works well for small datasets but you should use +{stata ssc des geonear:geonear} (from SSC) if you have more than +a few thousand observations. + + +{title:Saved results} + +{pstd} +{cmd:geodist} saves the following in {cmd:r() } when no variable is specified: + +{synoptset 15 tabbed}{...} +{p2col 5 15 19 2: Scalars}{p_end} +{synopt:{cmd:r(iterations)}}number of iterations (only for ellipsoidal distances){p_end} +{synopt:{cmd:r(distance)}}distance{p_end} +{p2colreset}{...} + + +{title:References} + +{pstd} +Sinnott, R. W., "Virtues of the Haversine", Sky and Telescope 68 (2), 159 (1984). + +{pstd} +Veness, Chris, +"Vincenty solutions of geodesics on the ellipsoid", +{browse "http://www.movable-type.co.uk/scripts/latlong-vincenty.html"} + +{pstd} +Veness, Chris, +"Calculate distance, bearing and more between Latitude/Longitude points", +{browse "http://www.movable-type.co.uk/scripts/latlong.html"} + +{pstd} +Vincenty, T. (1975) Direct and inverse solutions of geodesics on the ellipsoid +with application of nested equations, Survey Review 22(176): 88-93. +{browse "http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf"} + + +{title:Author} + +{pstd} +Robert Picard + + +{marker alsosee}{...} +{title:Also see} + +{psee} +Stata: +{help cross}, +{help joinby} +{p_end} + +{psee} +Stata 15 or higher: +{help sp}, +{help spdistance} +{p_end} + +{psee} +SSC: +{stata "ssc desc geonear":geonear}, +{stata "ssc desc runby":runby}, +{stata "ssc desc shp2dta":shp2dta}, +{stata "ssc desc geo2xy":geo2xy}, +{stata "ssc desc geoinpoly":geoinpoly}, +{stata "ssc desc mergepoly":mergepoly}, +{stata "ssc desc geocircles":geocircles} +{p_end} diff --git a/110/replication_package/replication/ado/plus/g/geodist_run.ado b/110/replication_package/replication/ado/plus/g/geodist_run.ado new file mode 100644 index 0000000000000000000000000000000000000000..b78605522c29a92785683c2cf1ba1165837a0085 --- /dev/null +++ b/110/replication_package/replication/ado/plus/g/geodist_run.ado @@ -0,0 +1,61 @@ +*! version 1.0.0 04june2019 Robert Picard +program define geodist_run + + version 9.2 + + syntax anything(name=example_name id="example name") /// + using/ /// + , /// + [requires(string)] /// + [preserve] + + + local package geodist + local p1 = substr("`package'", 1,1) + + if `"`requires'"' != "" { + foreach f of local requires { + cap confirm file `f' + if _rc { + dis as err "a dataset used in this example is not in the current directory" + dis as err `"> {it:{stata `"net get `package', from("http://fmwww.bc.edu/repec/bocode/`p1'")"':click to install `package' example datasets from SSC}}"' + exit 601 + } + } + } + + + quietly { + + findfile `"`using'"' + + `preserve' + + infix str s 1-244 using `"`r(fn)'"', clear + + gen long obs = _n + + sum obs if strpos(s, "{* example_start - `example_name'}{...}") + if r(min) == . { + dis as err "example `example_name' not found" + exit 111 + } + local pos1 = r(min) + 1 + sum obs if strpos(s, "{* example_end}{...}") & obs > `pos1' + local pos2 = r(min) - 1 + + if mi(`pos1',`pos2') exit + + keep in `pos1'/`pos2' + + // remove code hidden in SMCL comments + replace s = regexr(trim(s), "}{...}", "") if substr(s,1,3) == "{* " + replace s = substr(s,4,.) if substr(s,1,3) == "{* " + + } + + tempfile f + outfile s using "`f'", noquote + do "`f'" + +end diff --git a/110/replication_package/replication/ado/plus/i/ivreg2.ado b/110/replication_package/replication/ado/plus/i/ivreg2.ado new file mode 100644 index 0000000000000000000000000000000000000000..d580ba7259e10a72710ab16151cfd807d455f896 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg2.ado @@ -0,0 +1,6809 @@ +*! ivreg2 4.1.11 22Nov2019 +*! authors cfb & mes +*! see end of file for version comments + +* Variable naming: +* lhs = LHS endogenous +* endo = X1, RHS endogenous (instrumented) = #K1 +* inexog = X2 = Z2 = included exogenous (instruments) = #K2 = #L2 +* exexog = Z1 = excluded exogenous (instruments) = #L1 +* iv = {inexog exexog} = all instruments +* rhs = {endo inexog} = RHS regressors +* no 0 or 1 at end of varlist means original varlist but after expansion of FV and TS vars +* 0 at the end of the name means the varlist after duplicates removed and collinearities/omitteds marked +* 1 means the same as 0 but after omitted vars dropped and extraneous FV operators "o", "b" and "n" removed. +* 0, 1 etc. also apply to _ct variables that are counts of these varlists +* dofminus is large-sample adjustment (e.g., #fixed effects) +* sdofminus is small-sample adjustment (e.g., #partialled-out regressors) + +if c(version) < 12 & c(version) >= 9 { +* livreg2 Mata library. +* Ensure Mata library is indexed if new install. +* Not needed for Stata 12+ since ssc.ado does this when installing. + capture mata: mata drop m_calckw() + capture mata: mata drop m_omega() + capture mata: mata drop ms_vcvorthog() + capture mata: mata drop s_vkernel() + capture mata: mata drop s_cdsy() + mata: mata mlib index +} + +********************************************************************************* +***************************** PARENT IVREG2 ************************************* +****************** FORKS TO EXTERNAL IVREG2S IF CALLER < 11 ********************* +********************************************************************************* + +* Parent program, forks to versions as appropriate after version call +* Requires byable(onecall) +program define ivreg2, eclass byable(onecall) /* properties(svyj) */ sortpreserve + local lversion 04.1.11 + +* local to store Stata version of calling program + local caller = _caller() + +* Minimum of version 8 required for parent program (earliest ivreg2 is ivreg28) + version 8 + +* Replay = no arguments before comma + if replay() { +* Call to ivreg2 will either be for version, in which case there should be no other arguments, +* or a postestimation call, in which case control should pass to main program. + syntax [, VERsion * ] + if "`version'"~="" & "`options'"=="" { +* Call to ivreg2 is for version + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + else if "`version'"~="" & "`options'"~="" { +* Improper use of version option +di as err "invalid syntax - cannot combine version with other options" + exit 198 + } + else { +* Postestimation call, so put `options' macro (i.e. *) back into `0' macro with preceding comma + local 0 `", `options'"' + } + } + +* replay can't be combined with by + if replay() & _by() { +di as err "invalid syntax - cannot use by with replay" + exit 601 + } + +* Handling of by. ivreg2x programs are byable(recall), so must set prefix for them. + if _by() { + local BY `"by `_byvars'`_byrc0':"' + } + +* If calling version is < 11, pass control to earlier version +* Note that this means calls from version 11.0 will not go to legacy version +* but will fail requirement of version 11.2 in main code. + if `caller' < 11 { + local ver = round(`caller') + local ivreg2cmd ivreg2`ver' +* If replay, change e(cmd) macro to name of legacy ivreg2 before calling it, then change back +* Note by not allowed with replay; caught above so prefix not needed here. + if replay() { + ereturn local cmd "`ivreg2cmd'" + `ivreg2cmd' `0' + ereturn local cmd "ivreg2" + + } + else { +* If not replay, call legacy ivreg2 and then add macros + `BY' `ivreg2cmd' `0' + ereturn local cmd "ivreg2" + ereturn local ivreg2cmd "`ivreg2cmd'" + ereturn local version `lversion' + ereturn local predict ivreg2_p + } + exit + } + +// Version is 11 or above. +// Pass control to current estimation program ivreg211. + if replay() { + ivreg211 `0' + } +// If not replay, call ivreg211 and then add macros + else { + // use to separate main args from options + syntax [anything] [if] [in] [aw fw pw iw] [, * ] + // append caller(.) to options + `BY' ivreg211 `anything' `if' `in' [`weight' `exp'], `options' caller(`caller') +// `BY' ivreg211 `0' + ereturn local cmd "ivreg2" + ereturn local ivreg2cmd "ivreg2" + ereturn local version `lversion' + ereturn local predict ivreg2_p + ereturn local cmdline ivreg2 `0' // `0' rather than `*' in case of any "s in string + } + +end +********************************************************************************* +*************************** END PARENT IVREG2 *********************************** +********************************************************************************* + + +********************* EXIT IF STATA VERSION < 11 ******************************** + +* When do file is loaded, exit here if Stata version calling program is < 11. +* Prevents loading of rest of program file (could cause earlier Statas to crash). + +if c(stata_version) < 11 { + exit +} + +******************** END EXIT IF STATA VERSION < 11 ***************************** + + +********************************************************************************* +***************** BEGIN MAIN IVREG2 ESTIMATION CODE ***************************** +********************************************************************************* + +* Main estimation program +program define ivreg211, eclass byable(recall) sortpreserve + version 11.2 + + local ivreg2cmd "ivreg211" // actual command name + local ivreg2name "ivreg2" // name used in command line and for default naming of equations etc. + + if replay() { + syntax [, /// + FIRST FFIRST RF SFIRST /// + dropfirst droprf dropsfirst /// + Level(integer $S_level) /// + NOHEader NOFOoter /// + EForm(string) PLUS /// + NOOMITTED vsquish noemptycells /// + baselevels allbaselevels /// + VERsion /// + caller(real 0) /// + ] + if "`version'" != "" & "`first'`ffirst'`rf'`noheader'`nofooter'`dropfirst'`droprf'`eform'`plus'" != "" { + di as err "option version not allowed" + error 198 + } + if "`version'" != "" { + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + if `"`e(cmd)'"' != "ivreg2" { + error 301 + } +// Set display options + local dispopt eform(`eform') `noomitted' `vsquish' `noemptycells' `baselevels' `allbaselevels' + +// On replay, set flag so saved eqns aren't dropped + if "`e(firsteqs)'" != "" & "`dropfirst'" == "" { + local savefirst "savefirst" + } + if "`e(rfeq)'" != "" & "`droprf'" == "" { + local saverf "saverf" + } + if "`e(sfirsteq)'" != "" & "`dropsfirst'" == "" { + local savesfirst "savesfirst" + } +// On replay, re-display collinearities and duplicates messages + DispCollinDups + } + else { +// MAIN CODE BLOCK + +// Start parsing + syntax [anything(name=0)] [if] [in] [aw fw pw iw/] [, /// + NOID NOCOLLIN /// + FIRST FFIRST SAVEFIRST SAVEFPrefix(name) /// + RF SAVERF SAVERFPrefix(name) /// + SFIRST SAVESFIRST SAVESFPrefix(name) /// + SMall NOConstant /// + Robust CLuster(varlist) kiefer dkraay(integer 0) /// + BW(string) kernel(string) center /// + GMM GMM2s CUE /// + LIML COVIV FULLER(real 0) Kclass(real 0) /// + ORTHOG(string) ENDOGtest(string) REDundant(string) /// + PARTIAL(string) FWL(string) /// + Level(integer $S_level) /// + NOHEader NOFOoter NOOUTput /// + bvclean NOOMITTED omitted vsquish noemptycells /// + baselevels allbaselevels /// + title(string) subtitle(string) /// + DEPname(string) EForm(string) PLUS /// + Tvar(varname) Ivar(varname) /// + B0(string) SMATRIX(string) WMATRIX(string) /// + sw psd0 psda useqr /// + dofminus(integer 0) sdofminus(integer 0) /// + NOPARTIALSMALL /// + fvall fvsep /// + caller(real 0) /// + ] + +// Confirm ranktest is installed (necessary component). + checkversion_ranktest `caller' + local ranktestcmd `r(ranktestcmd)' + +// Parse after clearing any sreturn macros (can be left behind in Stata 11) + sreturn clear + ivparse `0', ivreg2name(`ivreg2name') /// needed for some options + partial(`partial') /// + fwl(`fwl') /// legacy option + orthog(`orthog') /// + endogtest(`endogtest') /// + redundant(`redundant') /// + depname(`depname') /// + `robust' /// + cluster(`cluster') /// + bw(`bw') /// + kernel(`kernel') /// + dkraay(`dkraay') /// + `center' /// + `kiefer' /// + `sw' /// + `noconstant' /// + tvar(`tvar') /// + ivar(`ivar') /// + `gmm2s' /// + `gmm' /// legacy option, produces error message + `cue' /// + `liml' /// + fuller(`fuller') /// + kclass(`kclass') /// + b0(`b0') /// + wmatrix(`wmatrix') /// + `noid' /// + `savefirst' /// + savefprefix(`savefprefix') /// + `saverf' /// + saverfprefix(`saverfprefix') /// + `savesfirst' /// + savesfprefix(`savesfprefix') /// + dofminus(`dofminus') /// + `psd0' /// + `psda' /// + `nocollin' /// + `useqr' /// + `bvclean' /// + eform(`eform') /// + `noomitted' /// + `vsquish' /// + `noemptycells' /// + `baselevels' /// + `allbaselevels' + +// varlists are unexpanded; may be empty + local lhs `s(lhs)' + local depname `s(depname)' + local endo `s(endo)' + local inexog `s(inexog)' + local exexog `s(exexog)' + local partial `s(partial)' + local cons =s(cons) + local partialcons =s(partialcons) + local tvar `s(tvar)' + local ivar `s(ivar)' + local tdelta `s(tdelta)' + local tsops =s(tsops) + local fvops =s(fvops) + local robust `s(robust)' + local cluster `s(cluster)' + local bw =`s(bw)' // arrives as string but return now as number + local bwopt `s(bwopt)' + local kernel `s(kernel)' // also used as flag for HAC estimation + local center =`s(center)' // arrives as string but now boolean + local kclassopt `s(kclassopt)' + local fulleropt `s(fulleropt)' + local liml `s(liml)' + local noid `s(noid)' // can also be triggered by b0(.) option + local useqr =`s(useqr)' // arrives as string but now boolean; nocollin=>useqr + local savefirst `s(savefirst)' + local savefprefix `s(savefprefix)' + local saverf `s(saverf)' + local saverfprefix `s(saverfprefix)' + local savesfirst `s(savesfirst)' + local savesfprefix `s(savesfprefix)' + local psd `s(psd)' // triggered by psd0 or psda + local dofmopt `s(dofmopt)' + local bvclean =`s(bvclean)' // arrives as string but return now as boolean + local dispopt `s(dispopt)' + +// Can now tsset; sortpreserve will restore sort after exit + if `tsops' | "`kernel'"~="" { + cap tsset // restores sort if tsset or xtset but sort disrupted + if _rc>0 { + tsset `ivar' `tvar' + } + } + +*********************************************************** + +// Weights +// fweight and aweight accepted as is +// iweight not allowed with robust or gmm and requires a trap below when used with summarize +// pweight is equivalent to aweight + robust +// Since we subsequently work with wvar, tsrevar of weight vars in weight `exp' not needed. + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + if "`weight'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "fweight" & "`sw'" != "" { + di in red "fweights currently not supported with -sw- option" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`gmm2s'`kernel'" !="" { + di in red "iweights not allowed with robust or gmm" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + qui gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + +******************************************************************************** +// markout sample +// include `tvar' to limit sample to where tvar is available, but only if TS operators used + marksample touse + if `tsops' { + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster' `tvar', strok + } + else { + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster', strok + } + +******************************************************************************** +// weight factor and sample size +// Every time a weight is used, must multiply by scalar wf ("weight factor") +// wf=1 for no weights, fw and iw, wf = scalar that normalizes sum to be N if aw or pw + + sum `wvar' if `touse' `wtexp', meanonly +// Weight statement + if "`weight'" ~= "" { +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + if "`weight'"=="" | "`weight'"=="fweight" | "`weight'"=="iweight" { +// Effective number of observations is sum of weight variable. +// If weight is "", weight var must be column of ones and N is number of rows + local wf=1 + local N=r(sum_w) + } + else if "`weight'"=="aweight" | "`weight'"=="pweight" { + local wf=r(N)/r(sum_w) + local N=r(N) + } + else { +// Should never reach here +di as err "ivreg2 error - misspecified weights" + exit 198 + } + if `N'==0 { +di as err "no observations" + exit 2000 + } + +*************************************************************** +// Time-series data +// tindex used by Mata code so that ts operators work correctly + + tempvar tindex + qui gen `tindex'=1 if `touse' + qui replace `tindex'=sum(`tindex') if `touse' + + if `tsops' | "`kernel'"~="" { +// Report gaps in data + tsreport if `touse', panel + if `r(N_gaps)' != 0 { +di as text "Warning: time variable " as res "`tvar'" as text " has " /// + as res "`r(N_gaps)'" as text " gap(s) in relevant range" + } +// Set local macro T and check that bw < (T-1) + sum `tvar' if `touse', meanonly + local T = r(max)-r(min) + 1 + local T1 = `T' - 1 + if (`bw' > (`T1'/`tdelta')) { +di as err "invalid bandwidth in option bw() - cannot exceed timespan of data" + exit 198 + } + } + +// kiefer VCV = kernel(tru) bw(T) and no robust with tsset data + if "`kiefer'" ~= "" { + local bw =`T' + } + +*********** Column of ones for constant set up here ************** + + if "`noconstant'"=="" { +// If macro not created, automatically omitted. + tempvar ones + qui gen byte `ones' = 1 if `touse' + } + +************* Varlists, FV varlists, duplicates ***************** +// Varlists come in 4 versions, e.g., for inexog: +// (a) inexog = full list of original expanded vnames; may have duplicates +// (b) inexog0 = as with inexog with duplicates removed but RETAINING base/omitted/etc. varnames +// (c) inexog1 = as with inexog0 but WITHOUT base/omitted/etc. +// (d) fv_inexog1 = corresponding list with temp vars minus base/omitted/etc., duplicates, collinearities etc. +// Varlists (c) and (d) are definitive, i.e., have the variables actually used in the estimation. + +// Create consistent expanded varlists. +// "Consistent" means base vars for FVs must be consistent +// hence default rhs=endo+inexog is expanded as one. +// fvall: overrides, endo+inexog+exexog expanded as one +// fvsep: overrides, endo, inexog and exexog expanded separately +// NB: expanding endo+inexog+exexog is dangerous because +// fvexpand can zap a list in case of overlap +// e.g. fvexpand mpg + i(1/4).rep78 + i5.rep78 +// => mpg 1b.rep78 2.rep78 3.rep78 4.rep78 5.rep78 +// but fvexpand mpg + i.rep78 + i5.rep78 +// => mpg 5.rep78 + + CheckDupsCollin, /// + lhs(`lhs') /// + endo(`endo') /// + inexog(`inexog') /// + exexog(`exexog') /// + partial(`partial') /// + orthog(`orthog') /// + endogtest(`endogtest') /// + redundant(`redundant') /// + touse(`touse') /// + wvar(`wvar') /// + wf(`wf') /// + `noconstant' /// + `nocollin' /// + `fvall' /// + `fvsep' + +// Replace basic varlists and create "0" versions of varlists + foreach vl in lhs endo inexog exexog partial orthog endogtest redundant { + local `vl' `s(`vl')' + local `vl'0 `s(`vl'0)' + } + local dups `s(dups)' + local collin `s(collin)' + local ecollin `s(ecollin)' + +// Create "1" and fv versions of varlists + foreach vl in lhs endo inexog exexog partial orthog endogtest redundant { + foreach var of local `vl'0 { // var-by-var so that fvrevar doesn't decide on base etc. + _ms_parse_parts `var' + if ~`r(omit)' { // create temp var only if not omitted + fvrevar `var' if `touse' + local `vl'1 ``vl'1' `var' + local fv_`vl'1 `fv_`vl'1' `r(varlist)' + } + } + local `vl'1 : list retokenize `vl'1 + local fv_`vl'1 : list retokenize fv_`vl'1 + } + +// Check that LHS expanded to a single variable + local wrongvars_ct : word count `lhs' + if `wrongvars_ct' > 1 { +di as err "multiple dependent variables specified: `lhs'" + error 198 + } + +// Check that option varlists are compatible with main varlists +// orthog() + local wrongvars : list orthog1 - inexog1 + local wrongvars : list wrongvars - exexog1 + local wrongvars_ct : word count `wrongvars' + if `wrongvars_ct' { +di as err "Error: `wrongvars' listed in orthog() but does not appear as exogenous." + error 198 + } +// endog() + local wrongvars : list endogtest1 - endo1 + local wrongvars_ct : word count `wrongvars' + if `wrongvars_ct' { +di as err "Error: `wrongvars' listed in endog() but does not appear as endogenous." + error 198 + } +// redundant() + local wrongvars : list redundant1 - exexog1 + local wrongvars_ct : word count `wrongvars' + if `wrongvars_ct' { +di as err "Error: `wrongvars' listed in redundant() but does not appear as exogenous." + error 198 + } + +// And create allnames macros + local allnames `lhs' `endo' `inexog' `exexog' + local allnames0 `lhs0' `endo0' `inexog0' `exexog0' + local allnames1 `lhs1' `endo1' `inexog1' `exexog1' + local fv_allnames1 `fv_lhs1' `fv_endo1' `fv_inexog1' `fv_exexog1' + + +// *************** Partial-out block ************** // + +// `partial' has all to be partialled out except for constant + if "`partial1'" != "" | `partialcons'==1 { + preserve + +// Remove partial0 from inexog0. +// Remove partial1 from inexog1. + local inexog0 : list inexog0 - partial0 + local inexog1 : list inexog1 - partial1 + local fv_inexog1 : list fv_inexog1 - fv_partial1 + +// Check that cluster, weight, tvar or ivar variables won't be transformed +// Use allnames1 (expanded varlist) + if "`cluster'"~="" { + local pvarcheck : list cluster in allnames1 + if `pvarcheck' { +di in r "Error: cannot use cluster variable `cluster' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`tvar'"~="" { + local pvarcheck : list tvar in allnames1 + if `pvarcheck' { +di in r "Error: cannot use time variable `tvar' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`ivar'"~="" { + local pvarcheck : list ivar in allnames1 + if `pvarcheck' { +di in r "Error: cannot use panel variable `ivar' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`wtexp'"~="" { + tokenize `exp', parse("*/()+-^&|~") + local wvartokens `*' + local nwvarnames : list allnames1 - wvartokens + local wvarnames : list allnames1 - nwvarnames + if "`wvarnames'"~="" { +di in r "Error: cannot use weight variables as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } +// Partial out +// But first replace everything with doubles + recast double `fv_lhs1' `fv_endo1' `fv_inexog1' `fv_exexog1' `fv_partial1' + mata: s_partial ("`fv_lhs1'", /// + "`fv_endo1'", /// + "`fv_inexog1'", /// + "`fv_exexog1'", /// + "`fv_partial1'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + `cons') + + local partial_ct : word count `partial1' +// Constant is partialled out, unless nocons already specified in the first place + capture drop `ones' + local ones "" + if "`noconstant'" == "" { +// partial_ct used for small-sample adjustment to regression F-stat + local partial_ct = `partial_ct' + 1 + local noconstant "noconstant" + local cons 0 + } + } + else { +// Set count of partial vars to zero if option not used + local partial_ct 0 + local partialcons 0 + } +// Add partial_ct to small dof adjustment sdofminus + if "`nopartialsmall'"=="" { + local sdofminus = `sdofminus'+`partial_ct' + } + +********************************************* + + local rhs0 `endo0' `inexog0' // needed for display of omitted/base/etc. + local rhs1 `endo1' `inexog1' + local insts1 `exexog1' `inexog1' + local fv_insts1 `fv_exexog1' `fv_inexog1' + local fv_rhs1 `fv_endo1' `fv_inexog1' + local rhs0_ct : word count `rhs0' // needed for display of omitted/base/etc. + local rhs1_ct : word count `fv_rhs1' + local iv1_ct : word count `fv_insts1' + local endo1_ct : word count `fv_endo1' + local exex1_ct : word count `fv_exexog1' + local endoexex1_c : word count `fv_endo1' `fv_exexog1' + local inexog1_ct : word count `fv_inexog1' + +// Counts modified to include constant if appropriate + local rhs1_ct = `rhs1_ct' + `cons' + local rhs0_ct = `rhs0_ct' + `cons' // needed for display of omitted/base/etc. + local iv1_ct = `iv1_ct' + `cons' + +// Column/row names for matrices b, V, S, etc. + local cnb0 `endo0' `inexog0' // including omitted + local cnb1 `endo1' `inexog1' // excluding omitted + local cnZ0 `exexog0' `inexog0' // excluding omitted + local cnZ1 `exexog1' `inexog1' // excluding omitted + if `cons' { + local cnb0 "`cnb0' _cons" + local cnb1 "`cnb1' _cons" + local cnZ0 "`cnZ0' _cons" + local cnZ1 "`cnZ1' _cons" + } + +********************************************* +// Remaining checks: variable counts, col/row names of b0, smatrix, wmatrix + CheckMisc, /// + rhs1_ct(`rhs1_ct') /// + iv1_ct(`iv1_ct') /// + bvector(`b0') /// + smatrix(`smatrix') /// + wmatrix(`wmatrix') /// + cnb1(`cnb1') /// + cnZ1(`cnZ1') + + if "`b0'"~="" { + tempname b0 // so we can overwrite without changing original user matrix + mat `b0' = r(b0) + } + if "`smatrix'"~="" { + tempname S0 + mat `S0' = r(S0) + } + if "`wmatrix'"~="" { + tempname wmatrix // so we can overwrite without changing original user matrix + mat `wmatrix' = r(W0) + } + +*************** Commonly used matrices **************** + tempname YY yy yyc + tempname XX X1X1 X2X2 X1Z X1Z1 XZ Xy + tempname ZZ Z1Z1 Z2Z2 Z1Z2 Z1X2 Zy ZY Z2y Z2Y + tempname XXinv X2X2inv ZZinv XPZXinv + tempname rankxx rankzz condxx condzz + +// use fv_ varlists + mata: s_crossprods ("`fv_lhs1'", /// + "`fv_endo1'", /// + "`fv_inexog1' `ones'", /// + "`fv_exexog1'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N') + mat `XX' =r(XX) + mat `X1X1' =r(X1X1) + mat `X1Z' =r(X1Z) + mat `ZZ' =r(ZZ) + mat `Z2Z2' =r(Z2Z2) + mat `Z1Z2' =r(Z1Z2) + mat `XZ' =r(XZ) + mat `Xy' =r(Xy) + mat `Zy' =r(Zy) + mat `YY' =r(YY) + scalar `yy' =r(yy) + scalar `yyc' =r(yyc) + mat `ZY' =r(ZY) + mat `Z2y' =r(Z2y) + mat `Z2Y' =r(Z2Y) + mat `XXinv' =r(XXinv) + mat `ZZinv' =r(ZZinv) + mat `XPZXinv' =r(XPZXinv) + scalar `condxx' =r(condxx) + scalar `condzz' =r(condzz) + + scalar `rankzz' = rowsof(`ZZinv') - diag0cnt(`ZZinv') + scalar `rankxx' = rowsof(`XXinv') - diag0cnt(`XXinv') + local overid = `rankzz' - `rankxx' + +********** CLUSTER SETUP ********************************************** + +* Mata code requires data are sorted on (1) the first var cluster if there +* is only one cluster var; (2) on the 3rd and then 1st if two-way clustering, +* unless (3) two-way clustering is combined with kernel option, in which case +* the data are tsset and sorted on panel id (first cluster variable) and time +* id (second cluster variable). +* Second cluster var is optional and requires an identifier numbered 1..N_clust2, +* unless combined with kernel option, in which case it's the time variable. +* Third cluster var is the intersection of 1 and 2, unless combined with kernel +* opt, in which case it's unnecessary. +* Sorting on "cluster3 cluster1" means that in Mata, panelsetup works for +* both, since cluster1 nests cluster3. +* Note that it is possible to cluster on time but not panel, in which case +* cluster1 is time, cluster2 is empty and data are sorted on panel-time. +* Note also that if data are sorted here but happen to be tsset, will need +* to be re-tsset after estimation code concludes. + + +// No cluster options or only 1-way clustering +// but for Mata and other purposes, set N_clust vars =0 + local N_clust=0 + local N_clust1=0 + local N_clust2=0 + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + tokenize `cluster' + local cluster1 "`1'" + local cluster2 "`2'" + if "`kernel'"~="" { +* kernel requires either that cluster1 is time var and cluster2 is empty +* or that cluster1 is panel var and cluster2 is time var. +* Either way, data must be tsset and sorted for panel data. + if "`cluster2'"~="" { +* Allow backwards order + if "`cluster1'"=="`tvar'" & "`cluster2'"=="`ivar'" { + local cluster1 "`2'" + local cluster2 "`1'" + } + if "`cluster1'"~="`ivar'" | "`cluster2'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset panel & time vars." +di as err " tsset panel var=`ivar'; tsset time var=`tvar'; cluster vars=`cluster1',`cluster2'" + exit 198 + } + } + else { + if "`cluster1'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset time variable." +di as err " tsset time var=`tvar'; cluster var=`cluster1'" + exit 198 + } + } + } +* Simple way to get quick count of 1st cluster variable without disrupting sort +* clusterid1 is numbered 1.._Nclust1. + tempvar clusterid1 + qui egen `clusterid1'=group(`cluster1') if `touse' + sum `clusterid1' if `touse', meanonly + if "`cluster2'"=="" { + local N_clust=r(max) + local N_clust1=`N_clust' + if "`kernel'"=="" { +* Single level of clustering and no kernel-robust, so sort on single cluster var. +* kernel-robust already sorted via tsset. + sort `cluster1' + } + } + else { + local N_clust1=r(max) + if "`kernel'"=="" { + tempvar clusterid2 clusterid3 +* New cluster id vars are numbered 1..N_clust2 and 1..N_clust3 + qui egen `clusterid2'=group(`cluster2') if `touse' + qui egen `clusterid3'=group(`cluster1' `cluster2') if `touse' +* Two levels of clustering and no kernel-robust, so sort on cluster3/nested in/cluster1 +* kernel-robust already sorted via tsset. + sort `clusterid3' `cluster1' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) + } + else { +* Need to create this only to count the number of clusters + tempvar clusterid2 + qui egen `clusterid2'=group(`cluster2') if `touse' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) +* Now replace with original variable + local clusterid2 `cluster2' + } + + local N_clust=min(`N_clust1',`N_clust2') + + } // end 2-way cluster block + } // end cluster block + + +************************************************************************************************ + + tempname b W S V beta lambda j jp rss mss rmse sigmasq rankV rankS + tempname arubin arubinp arubin_lin arubin_linp + tempname r2 r2_a r2u r2c F Fp Fdf2 ivest + + tempvar resid + qui gen double `resid'=. + +******************************************************************************************* +* LIML +******************************************************************************************* + + if "`liml'`kclassopt'"~="" { + + mata: s_liml( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`Z2Z2'", /// + "`YY'", /// + "`ZY'", /// + "`Z2Y'", /// + "`Xy'", /// + "`ZZinv'", /// + "`fv_lhs1'", /// + "`fv_lhs1' `fv_endo1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_endo1'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`fv_exexog1'", /// + "`fv_inexog1' `ones'", /// + `fuller', /// + `kclass', /// + "`coviv'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + `center', /// + `dofminus', /// + `useqr') + + mat `b'=r(beta) + mat `S'=r(S) + mat `V'=r(V) + scalar `lambda'=r(lambda) + local kclass=r(kclass) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + + scalar `arubin'=(`N'-`dofminus')*ln(`lambda') + scalar `arubin_lin'=(`N'-`dofminus')*(`lambda'-1) + +// collinearities can cause LIML to generate (spurious) OLS results + if "`nocollin'"~="" & `kclass'<1e-8 { +di as err "warning: k=1 in LIML estimation; results equivalent to OLS;" +di as err " may be caused by collinearities" + } + } + +******************************************************************************************* +* OLS, IV and 2SGMM. Also enter to get CUE starting values. +************************************************************************************************ + + if "`liml'`kclassopt'`b0'"=="" { + +* Call to s_gmm1s to do 1st-step GMM. +* If W or S supplied, calculates GMM beta and residuals +* If none of the above supplied, calculates GMM beta using default IV weighting matrix and residuals +* Block not entered if b0 is provided. + +* 1-step GMM is efficient and V/J/Sargan can be returned if: +* - estimator is IV, W is known and S can be calculated from 1st-step residuals +* - S is provided (and W is NOT) so W=inv(S) and beta can be calculated using W +* 1-step GMM is inefficient if: +* - non-iid VCE is requested +* - W is provided + + local effic1s = ( /// + "`gmm2s'`robust'`cluster'`kernel'"=="" /// + | ("`smatrix'"~="" & "`wmatrix'"=="") /// + ) + +// use fv_ varlists + mata: s_gmm1s( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`ZZinv'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`wmatrix'", /// + "`S0'", /// + `dofminus', /// + `effic1s', /// + `overid', /// + `useqr') + mat `b'=r(beta) + mat `W'=r(W) + +* If 1st-step is efficient, save remaining results and we're done + if `effic1s' { + mat `V'=r(V) + mat `S'=r(S) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + } + else { +* ...we're not done - do inefficient or 2-step efficient GMM + +* Pick up matrix left by s_gmm1s(.) + tempname QXZ_W_QZX + mat `QXZ_W_QZX'=r(QXZ_W_QZX) + +* Block calls s_omega to get cov matrix of orthog conditions, if not supplied + if "`smatrix'"~="" { + mat `S'=`S0' + } + else { + +* NB: xtivreg2 calls ivreg2 with data sorted on ivar and optionally tvar. +* Stock-Watson adjustment -sw- assumes data are sorted on ivar. Checked at start of ivreg2. + +* call abw code if bw() is defined and bw(auto) selected + if `bw' != 0 { + if `bw' == -1 { + tempvar abwtouse + gen byte `abwtouse' = (`resid' < .) + abw `resid' `exexog1' `inexog1' `abwtouse', /* + */ tindex(`tindex') nobs(`N') tobs(`T') noconstant kernel(`kernel') + local bw `r(abw)' + local bwopt "bw(`bw')" + local bwchoice "`r(bwchoice)'" + } + } +* S covariance matrix of orthogonality conditions +// use fv_ varlists + mata: s_omega( "`ZZ'", /// + "`resid'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + `center', /// + `dofminus') + mat `S'=r(S) + } + +* By this point: `b' has 1st-step inefficient beta +* `resid' has resids from the above beta +* `S' has vcv of orthog conditions using either `resid' or user-supplied `S0' +* `QXZ_W_QZX' was calculated in s_gmm1s(.) for use in s_iegmm(.) + +* Inefficient IV. S, W and b were already calculated above. + if "`gmm2s'"=="" & "`robust'`cluster'`kernel'"~="" { + mata: s_iegmm( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`QXZ_W_QZX'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`W'", /// + "`S'", /// + "`b'", /// + `dofminus', /// + `overid', /// + `useqr') + } + +* 2-step efficient GMM. S calculated above, b and W will be updated. + if "`gmm2s'"~="" { + mata: s_egmm( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`ZZinv'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`S'", /// + `dofminus', /// + `overid', /// + `useqr') + mat `b'=r(beta) + mat `W'=r(W) + } + + mat `V'=r(V) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + } +* Finished with non-CUE/LIML block + } + +*************************************************************************************** +* Block for cue gmm +******************************************************************************************* + if "`cue'`b0'" != "" { + +* s_gmmcue is passed initial b from IV/2-step GMM block above +* OR user-supplied b0 for evaluation of CUE obj function at b0 + mata: s_gmmcue( "`ZZ'", /// + "`XZ'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + "`b'", /// + "`b0'", /// + `center', /// + `dofminus', /// + `useqr') + + mat `b'=r(beta) + mat `S'=r(S) + mat `W'=r(W) + mat `V'=r(V) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + + } + +**************************************************************** +* Done with estimation blocks +**************************************************************** + + mat colnames `b' = `cnb1' + mat colnames `V' = `cnb1' + mat rownames `V' = `cnb1' + mat colnames `S' = `cnZ1' + mat rownames `S' = `cnZ1' +* No W matrix for LIML or kclass + capture mat colnames `W' = `cnZ1' + capture mat rownames `W' = `cnZ1' + +******************************************************************************************* +* RSS, counts, dofs, F-stat, small-sample corrections +******************************************************************************************* + +// rankxx = rhs1_ct except if nocollin +// rankzz = iv1_ct except if nocollin +// nocollin means count may exceed rank (because of dropped vars), so rank #s foolproof + scalar `rmse'=sqrt(`sigmasq') + if "`noconstant'"=="" { + scalar `mss'=`yyc' - `rss' + } + else { + scalar `mss'=`yy' - `rss' + } + + local Fdf1 = `rankxx' - `cons' + local df_m = `rankxx' - `cons' + (`sdofminus'-`partialcons') + +* Residual dof + if "`cluster'"=="" { +* Use int(`N') because of non-integer N with iweights, and also because of +* possible numeric imprecision with N returned by above. + local df_r = int(`N') - `rankxx' - `dofminus' - `sdofminus' + } + else { +* To match Stata, subtract 1 + local df_r = `N_clust' - 1 + } + +* Sargan-Hansen J dof and p-value +* df=0 doesn't guarantee j=0 since can be call to get value of CUE obj fn + local jdf = `rankzz' - `rankxx' + if `jdf' == 0 & "`b0'"=="" { + scalar `j' = 0 + } + else { + scalar `jp' = chiprob(`jdf',`j') + } + if "`liml'"~="" { + scalar `arubinp' = chiprob(`jdf',`arubin') + scalar `arubin_linp' = chiprob(`jdf',`arubin_lin') + } + +* Small sample corrections for var-cov matrix. +* If robust, the finite sample correction is N/(N-K), and with no small +* we change this to 1 (a la Davidson & MacKinnon 1993, p. 554, HC0). +* If cluster, the finite sample correction is (N-1)/(N-K)*M/(M-1), and with no small +* we change this to 1 (a la Wooldridge 2002, p. 193), where M=number of clusters. + + if "`small'" != "" { + if "`cluster'"=="" { + matrix `V'=`V'*(`N'-`dofminus')/(`N'-`rankxx'-`dofminus'-`sdofminus') + } + else { + matrix `V'=`V'*(`N'-1)/(`N'-`rankxx'-`sdofminus') /// + * `N_clust'/(`N_clust'-1) + } + scalar `sigmasq'=`rss'/(`N'-`rankxx'-`dofminus'-`sdofminus') + scalar `rmse'=sqrt(`sigmasq') + } + + scalar `r2u'=1-`rss'/`yy' + scalar `r2c'=1-`rss'/`yyc' + if "`noconstant'"=="" { + scalar `r2'=`r2c' + scalar `r2_a'=1-(1-`r2')*(`N'-1)/(`N'-`rankxx'-`dofminus'-`sdofminus') + } + else { + scalar `r2'=`r2u' + scalar `r2_a'=1-(1-`r2')*`N'/(`N'-`rankxx'-`dofminus'-`sdofminus') + } +* `N' is rounded down to nearest integer if iweights are used. +* If aw, pw or fw, should already be integer but use round in case of numerical imprecision. + local N=int(`N') + +* Fstat +* To get it to match Stata's, must post separately with dofs and then do F stat by hand +* in case weights generate non-integer obs and dofs +* Create copies so they can be posted + tempname FB FV + mat `FB'=`b' + mat `FV'=`V' + capture ereturn post `FB' `FV' +* If the cov matrix wasn't positive definite, the post fails with error code 506 + local rc = _rc + if `rc' != 506 { +* Strip out omitted/base/etc. vars from RHS list + ivreg2_fvstrip `rhs1', dropomit + capture test `r(varlist)' + if "`small'" == "" { + if "`cluster'"=="" { + capture scalar `F' = r(chi2)/`Fdf1' * `df_r'/(`N'-`dofminus') + } + else { +* sdofminus used here so that F-stat matches test stat from regression with no partial and small + capture scalar `F' = r(chi2)/`Fdf1' * /// + (`N_clust'-1)/`N_clust' * /// + (`N'-`rankxx'-`sdofminus')/(`N'-1) + } + } + else { + capture scalar `F' = r(chi2)/`Fdf1' + } + capture scalar `Fp'=Ftail(`Fdf1',`df_r',`F') + capture scalar `Fdf2'=`df_r' + } + +* If j==. or vcv wasn't full rank, then vcv problems and F is meaningless + if `j' == . | `rc'==506 { + scalar `F' = . + scalar `Fp' = . + } + +* End of counts, dofs, F-stat, small sample corrections + +******************************************************************************************** +* Reduced form and first stage regression options +******************************************************************************************* +* Relies on proper count of (non-collinear) IVs generated earlier. +* Note that nocons option + constant in instrument list means first-stage +* regressions are reported with nocons option. First-stage F-stat therefore +* correctly includes the constant as an explanatory variable. + + if "`sfirst'`savesfirst'`rf'`saverf'`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) { + +* Restore original order if changed for mata code above + capture tsset + + local sdofmopt = "sdofminus(`sdofminus')" +// Need to create Stata placeholders for Mata code so that Stata time-series operators can work on them +// fres1 is Nx1 +// endo1_hat is NxK1 +// fsresall is Nx(K1+1) (used for full system) + tempname fsres1 + qui gen double `fsres1'=. + local fsresall `fsres1' + foreach x of local fv_endo1 { + tempname fsres + qui gen double `fsres'=. + local fsresall "`fsresall' `fsres'" + } + +// mata code requires sorting on cluster 3 / cluster 1 (if 2-way) or cluster 1 (if one-way) + if "`cluster'"!="" { + sort `clusterid3' `cluster1' + } + mata: s_ffirst( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`ZY'", /// + "`ZZinv'", /// + "`XXinv'", /// + "`XPZXinv'", /// + "`Z2Z2'", /// + "`Z1Z2'", /// + "`Z2y'", /// + "`fsres1'", /// Nx1 + "`fsresall'", /// Nx(K1+1) + "`fv_lhs1'", /// + "`fv_endo1'", /// + "`fv_inexog1' `ones'", /// + "`fv_exexog1'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + `N_clust', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + `center', /// + `dofminus', /// + `sdofminus') + + tempname firstmat firstb firstv firsts + mat `firstmat' = r(firstmat) + mat rowname `firstmat' = rmse sheapr2 pr2 F df df_r pvalue /// + SWF SWFdf1 SWFdf2 SWFp SWchi2 SWchi2p SWr2 /// + APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + mat colname `firstmat' = `endo1' + mat `firstb' = r(b) + mat `firstv' = r(V) + mat `firsts' = r(S) + local archi2 =r(archi2) + local archi2p =r(archi2p) + local arf =r(arf) + local arfp =r(arfp) + local ardf =r(ardf) + local ardf_r =r(ardf_r) + local sstat =r(sstat) + local sstatdf =r(sstatdf) + local sstatp =r(sstatp) + local rmse_rf =r(rmse_rf) + +* Restore original order if changed for mata code above + capture tsset +// System of first-stage/reduced form eqns + if "`sfirst'`savesfirst'" ~= "" { + PostFirstRF if `touse', /// + bmat(`firstb') /// + vmat(`firstv') /// + smat(`firsts') /// + firstmat(`firstmat') /// + lhs1(`lhs1') /// + endo1(`endo1') /// + znames0(`cnZ0') /// + znames1(`cnZ1') /// + bvclean(`bvclean') /// + fvops(`fvops') /// + partial_ct(`partial_ct') /// + `robust' /// + cluster(`cluster') /// + cluster1(`cluster1') /// + cluster2(`cluster2') /// + nc(`N_clust') /// + nc1(`N_clust1') /// + nc2(`N_clust2') /// + kernel(`kernel') /// + bw(`bw') /// + ivar(`ivar') /// + tvar(`tvar') /// + obs(`N') /// + iv1_ct(`iv1_ct') /// + cons(`cons') /// + partialcons(`partialcons') /// + dofminus(`dofminus') /// + sdofminus(`sdofminus') + local sfirsteq "`savesfprefix'sfirst_`lhs1'" + local sfirsteq : subinstr local sfirsteq "." "_" + capture est store `sfirsteq', title("System of first-stage/reduced form regressions") + if _rc > 0 { +di +di in ye "Unable to store system of first-stage reduced form regressions." +di + } + } + +// RF regression + if "`rf'`saverf'" ~= "" { + PostFirstRF if `touse', /// + rf /// extract RF regression as saved result + rmse_rf(`rmse_rf') /// provide RMSE for posting + bmat(`firstb') /// + vmat(`firstv') /// + smat(`firsts') /// + firstmat(`firstmat') /// + lhs1(`lhs1') /// + endo1(`endo1') /// + znames0(`cnZ0') /// + znames1(`cnZ1') /// + bvclean(`bvclean') /// + fvops(`fvops') /// + partial_ct(`partial_ct') /// + `robust' /// + cluster(`cluster') /// + cluster1(`cluster1') /// + cluster2(`cluster2') /// + nc(`N_clust') /// + nc1(`N_clust1') /// + nc2(`N_clust2') /// + kernel(`kernel') /// + bw(`bw') /// + ivar(`ivar') /// + tvar(`tvar') /// + obs(`N') /// + iv1_ct(`iv1_ct') /// + cons(`cons') /// + partialcons(`partialcons') /// + dofminus(`dofminus') /// + sdofminus(`sdofminus') + local rfeq "`saverfprefix'`lhs1'" + local rfeq : subinstr local rfeq "." "_" + capture est store `rfeq', title("Reduced-form regression: `lhs'") + if _rc > 0 { +di +di in ye "Unable to store reduced form regression of `lhs1'." +di + } + } + +// Individual first-stage equations + if "`first'`savefirst'" ~= "" { + foreach vn in `endo1' { + + PostFirstRF if `touse', /// + first(`vn') /// extract first-stage regression + bmat(`firstb') /// + vmat(`firstv') /// + smat(`firsts') /// + firstmat(`firstmat') /// + lhs1(`lhs1') /// + endo1(`endo1') /// + znames0(`cnZ0') /// + znames1(`cnZ1') /// + bvclean(`bvclean') /// + fvops(`fvops') /// + partial_ct(`partial_ct') /// + `robust' /// + cluster(`cluster') /// + cluster1(`cluster1') /// + cluster2(`cluster2') /// + nc(`N_clust') /// + nc1(`N_clust1') /// + nc2(`N_clust2') /// + kernel(`kernel') /// + bw(`bw') /// + ivar(`ivar') /// + tvar(`tvar') /// + obs(`N') /// + iv1_ct(`iv1_ct') /// + cons(`cons') /// + partialcons(`partialcons') /// + dofminus(`dofminus') /// + sdofminus(`sdofminus') + local eqname "`savefprefix'`vn'" + local eqname : subinstr local eqname "." "_" + capture est store `eqname', title("First-stage regression: `vn'") + if _rc == 0 { + local firsteqs "`firsteqs' `eqname'" + } + else { +di +di in ye "Unable to store first-stage regression of `vn'." +di + } + } + } + } +* End of RF and first-stage regression code + +******************************************************************************************* +* Re-tsset if necessary +************************************************************************************************ + + capture tsset + +******************************************************************************************* +* orthog option: C statistic (difference of Sargan statistics) +******************************************************************************************* +* Requires j dof from above + if "`orthog'"!="" { + tempname cj cstat cstatp +* Initialize cstat + scalar `cstat' = 0 +* Remove orthog from inexog and put in endo +* Remove orthog from exexog + local cexexog1 : list fv_exexog1 - fv_orthog1 + local cinexog1 : list fv_inexog1 - fv_orthog1 + local cendo1 : list fv_inexog1 - cinexog1 + local cendo1 `fv_endo1' `cendo1' + local clist_ct : word count `orthog1' + +* If robust, HAC/AC or GMM (but not LIML or IV), create optimal weighting matrix to pass to ivreg2 +* by extracting the submatrix from the full S and then inverting. +* This guarantees the C stat will be non-negative. See Hayashi (2000), p. 220. +* Calculate C statistic with recursive call to ivreg2 +* Collinearities may cause problems, hence -capture-. +* smatrix works generally, including homoskedastic case with Sargan stat + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } +* clopt is omitted because it requires calculation of numbers of clusters, which is done +* only when S matrix is calculated +* S matrix has final varnames, but need to call ivreg2 with temp vars +* so must rename cols/rows of S + tempname fv_S + mat `fv_S'=`S' + if `cons' { + mat colnames `fv_S' = `fv_exexog1' `fv_inexog1' _cons + mat rownames `fv_S' = `fv_exexog1' `fv_inexog1' _cons + } + else { + mat colnames `fv_S' = `fv_exexog1' `fv_inexog1' + mat rownames `fv_S' = `fv_exexog1' `fv_inexog1' + } + capture `ivreg2cmd' `fv_lhs1' /// + `cinexog1' /// + (`cendo1'=`cexexog1') /// + if `touse' /// + `wtexp', /// + `noconstant' /// + `options' /// + `small' /// + `robust' /// + `gmm2s' /// + `bwopt' /// + `kernopt' /// + `dofmopt' /// + `sw' /// + `psd' /// + smatrix("`fv_S'") /// + noid /// + nocollin + local rc = _rc + if `rc' == 481 { + scalar `cstat' = 0 + local cstatdf = 0 + } + else { + scalar `cj'=e(j) + local cjdf=e(jdf) + scalar `cstat' = `j' - `cj' + local cstatdf = `jdf' - `cjdf' + } + _estimates unhold `ivest' + scalar `cstatp'= chiprob(`cstatdf',`cstat') +* Collinearities may cause C-stat dof to differ from the number of variables in orthog() +* If so, set cstat=0 + if `cstatdf' != `clist_ct' { + scalar `cstat' = 0 + } + } +* End of orthog block + +******************************************************************************************* +* Endog option +******************************************************************************************* +* Uses recursive call with orthog + if "`endogtest'"!="" { + tempname estat estatp +* Initialize estat + scalar `estat' = 0 +* Remove endogtest vars from endo and put in inexog + local eendo1 : list fv_endo1 - fv_endogtest1 + local einexog1 `fv_inexog1' `fv_endogtest1' + local elist_ct : word count `endogtest1' + +* Recursive call to ivreg2 using orthog option to obtain endogeneity test statistic +* Collinearities may cause problems, hence -capture-. + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + capture `ivreg2cmd' `fv_lhs1' /// + `einexog1' /// + (`eendo1'=`fv_exexog1') /// + if `touse' /// + `wtexp', /// + `noconstant' /// + `robust' /// + `clopt' /// + `gmm2s' /// + `liml' /// + `bwopt' /// + `kernopt' /// + `small' /// + `dofmopt' /// + `sw' /// + `psd' /// + `options' /// + orthog(`fv_endogtest1') /// + noid /// + nocollin + local rc = _rc + if `rc' == 481 { + scalar `estat' = 0 + local estatdf = 0 + } + else { + scalar `estat'=e(cstat) + local estatdf=e(cstatdf) + scalar `estatp'=e(cstatp) + } + _estimates unhold `ivest' +* Collinearities may cause endog stat dof to differ from the number of variables in endog() +* If so, set estat=0 + if `estatdf' != `elist_ct' { + scalar `estat' = 0 + } + } +* End of endogeneity test block + } + +******************************************************************************************* +* Rank identification and redundancy block +******************************************************************************************* + if `endo1_ct' > 0 & "`noid'"=="" { + +// id=underidentification statistic, wid=weak identification statistic + tempname idrkstat widrkstat iddf idp + tempname ccf cdf rkf cceval cdeval cd + tempname idstat widstat + +// UNDERIDENTIFICATION +// Anderson canon corr underidentification statistic if homo, rk stat if not +// Need only id stat for testing full rank=(#cols-1) +// ranktest can exit with error if not full rank +// May not exit with error if e.g. ranktest (x y) (x w), +// i.e. collinearity across lists, so need to catch that. +// If no collinearity, can use iv1_ct and rhs1_ct etc. + cap `ranktestcmd' /// + (`fv_endo1') /// + (`fv_exexog1') /// + `wtexp' /// + if `touse', /// + partial(`fv_inexog1') /// + full /// + `noconstant' /// + `robust' /// + `clopt' /// + `bwopt' /// + `kernopt' +// Returned in e(.) macro: + local rkcmd `r(ranktestcmd)' + +// Canonical correlations returned in r(ccorr), sorted in descending order. +// If largest = 1, collinearities so enter error block. + local rkerror = _rc>0 | r(chi2)==. + if ~`rkerror' { + local rkerror = el(r(ccorr),1,1)==1 + } + if `rkerror' { +di as err "warning: -ranktest- error in calculating underidentification test statistics;" +di as err " may be caused by collinearities" + scalar `idstat' = . + local iddf = . + scalar `idp' = . + scalar `cd' = . + scalar `cdf' = . + } + else { + if "`cluster'"=="" { + scalar `idstat'=r(chi2)/r(N)*(`N'-`dofminus') + } + else { +// No dofminus adjustment needed for cluster-robust + scalar `idstat'=r(chi2) + } + mat `cceval'=r(ccorr) + mat `cdeval' = J(1,`endo1_ct',.) + forval i=1/`endo1_ct' { + mat `cceval'[1,`i'] = (`cceval'[1,`i'])^2 + mat `cdeval'[1,`i'] = `cceval'[1,`i'] / (1 - `cceval'[1,`i']) + } + local iddf = `iv1_ct' - (`rhs1_ct'-1) + scalar `idp' = chiprob(`iddf',`idstat') +// Cragg-Donald F statistic. +// Under homoskedasticity, Wald cd eigenvalue = cc/(1-cc) Anderson canon corr eigenvalue. + scalar `cd'=`cdeval'[1,`endo1_ct'] + scalar `cdf'=`cd'*(`N'-`sdofminus'-`iv1_ct'-`dofminus')/`exex1_ct' + } // end underidentification stat + +// WEAK IDENTIFICATION +// Weak id statistic is Cragg-Donald F stat, rk Wald F stat if not +// ranktest exits with error if not full rank so can use iv1_ct and rhs1_ct etc. + if "`robust'`cluster'`kernel'"=="" { + scalar `widstat'=`cdf' + } + else { +// Need only test of full rank + cap `ranktestcmd' /// + (`fv_endo1') /// + (`fv_exexog1') /// + `wtexp' /// + if `touse', /// + partial(`fv_inexog1') /// + full /// + wald /// + `noconstant' /// + `robust' /// + `clopt' /// + `bwopt' /// + `kernopt' +// Canonical correlations returned in r(ccorr), sorted in descending order. +// If largest = 1, collinearities so enter error block. + local rkerror = _rc>0 | r(chi2)==. + if ~`rkerror' { + local rkerror = el(r(ccorr),1,1)==1 + } + if `rkerror' { +di as err "warning: -ranktest- error in calculating weak identification test statistics;" +di as err " may be caused by collinearities" + scalar `rkf' = . + scalar `widstat' = . + } + else { +// sdofminus used here so that F-stat matches test stat from regression with no partial + if "`cluster'"=="" { + scalar `rkf'=r(chi2)/r(N)*(`N'-`iv1_ct'-`sdofminus'-`dofminus')/`exex1_ct' + } + else { + scalar `rkf' = r(chi2)/(`N'-1) * /// + (`N'-`iv1_ct'-`sdofminus') * /// + (`N_clust'-1)/`N_clust' / /// + `exex1_ct' + } + scalar `widstat'=`rkf' + } + } // end weak-identification stat + } // end under- and weak-identification stats + +* LM redundancy test + if `endo1_ct' > 0 & "`redundant'" ~= "" & "`noid'"=="" { +* Use K-P rk statistics and LM version of test +* Statistic is the rank of the matrix of Z_1B*X_2, where Z_1B are the possibly redundant +* instruments and X_1 are the endogenous regressors; both have X_2 (exogenous regressors) +* and Z_1A (maintained excluded instruments) partialled out. LM test of rank is +* is numerically equivalent to estimation of set of RF regressions and performing +* standard LM test of possibly redundant instruments. + + local rexexog1 : list fv_exexog1 - fv_redundant1 + local redlist_ct : word count `redundant1' +* LM version requires only -nullrank- rk statistics so would not need -all- option + tempname rkmatrix + qui `ranktestcmd' /// + (`fv_endo1') /// + (`fv_redundant1') /// + `wtexp' /// + if `touse', /// + partial(`fv_inexog1' `rexexog1') /// + null /// + `noconstant' /// + `robust' /// + `clopt' /// + `bwopt' /// + `kernopt' + mat `rkmatrix'=r(rkmatrix) + tempname redstat redp +* dof adjustment needed because it doesn't use the adjusted S + if "`cluster'"=="" { + scalar `redstat' = `rkmatrix'[1,1]/r(N)*(`N'-`dofminus') + } + else { +* No dofminus adjustment needed for cluster-robust + scalar `redstat' = `rkmatrix'[1,1] + } + local reddf = `endo1_ct'*`redlist_ct' + scalar `redp' = chiprob(`reddf',`redstat') + } + +* End of identification stats block + +******************************************************************************************* +* Error-checking block +******************************************************************************************* + +* Check if adequate number of observations + if `N' <= `iv1_ct' { +di in r "Error: number of observations must be greater than number of instruments" +di in r " including constant." + error 2001 + } + +* Check if robust VCV matrix is of full rank + if ("`gmm2s'`robust'`cluster'`kernel'" != "") & (`rankS' < `iv1_ct') { +* Robust covariance matrix not of full rank means either a singleton dummy or too few +* clusters (in which case the indiv SEs are OK but no F stat or 2-step GMM is possible), +* or there are too many AC/HAC-lags, or the HAC covariance estimator +* isn't positive definite (possible with truncated and Tukey-Hanning kernels) +* or nocollin option has been used. +* Previous versions of ivreg2 exited if 2-step GMM but beta and VCV may be OK. +* Continue but J, F, and C stat (if present) all possibly meaningless. +* Set j = missing so that problem can be reported in output. + scalar `j' = . + if "`orthog'"!="" { + scalar `cstat' = . + } + if "`endogtest'"!="" { + scalar `estat' = . + } + } + +* End of error-checking block + +********************************************************************************************** +* Post and display results. +******************************************************************************************* + +// rankV = rhs1_ct except if nocollin +// rankS = iv1_ct except if nocollin +// nocollin means count may exceed rank (because of dropped vars), so rank #s foolproof + +// Add back in omitted vars from "0" varlists unless bvclean requested +// or unless there are no omitted regressors that need adding back in. + if ~`bvclean' & (`rhs0_ct' > `rhs1_ct') { + AddOmitted, bmat(`b') vmat(`V') cnb0(`cnb0') cnb1(`cnb1') + mat `b' = r(b) + mat `V' = r(V) +// build fv info (base, empty, etc.) unless there was partialling out + if `fvops' & ~`partial_ct' { + local bfv "buildfvinfo" + } + } + +******************************************************************************************* + +// restore data if preserved for partial option + if `partial_ct' { + restore + } + + if "`small'"!="" { + local NminusK = `N'-`rankxx'-`sdofminus' + capture ereturn post `b' `V', dep(`depname') obs(`N') esample(`touse') dof(`NminusK') `bfv' + } + else { + capture ereturn post `b' `V', dep(`depname') obs(`N') esample(`touse') `bfv' + } + + local rc = _rc + if `rc' == 504 { +di in red "Error: estimated variance-covariance matrix has missing values" + exit 504 + } + if `rc' == 506 { +di in red "Error: estimated variance-covariance matrix not positive-definite" + exit 506 + } + if `rc' > 0 { +di in red "Error: estimation failed - could not post estimation results" + exit `rc' + } + + local mok =1 // default - margins OK + local mok = `mok' & ~`partial_ct' // but not if partialling out + local mok = `mok' & ~(`fvops' & `bvclean') // nor if there are FVs and the base vars are not in e(b) + if `mok' & `endo1_ct' { // margins can be used, endog regressors + ereturn local marginsnotok "Residuals SCores" // same as official -ivregress- + ereturn local marginsok "XB default" + } + else if `mok' & ~`endo1_ct' { // margins can be used, no endog regressors + ereturn local marginsok "XB default" // same as official -regress' + } + else { // don't allow margins + ereturn local marginsnotok "Residuals SCores XB default" + } + +// Original varlists without removed duplicates, collinears, etc. +// "0" varlists after removing duplicates and reclassifying vars, and including omitteds, FV base vars, etc. +// "1" varlists without omitted, FV base vars, and partialled-out vars + ereturn local ecollin `ecollin' + ereturn local collin `collin' + ereturn local dups `dups' + ereturn local partial1 `partial1' + ereturn local partial `partial' + ereturn local inexog1 `inexog1' + ereturn local inexog0 `inexog0' + ereturn local inexog `inexog' + ereturn local exexog1 `exexog1' + ereturn local exexog0 `exexog0' + ereturn local exexog `exexog' + ereturn local insts1 `exexog1' `inexog1' + ereturn local insts0 `exexog0' `inexog0' + ereturn local insts `exexog' `inexog' + ereturn local instd1 `endo1' + ereturn local instd0 `endo0' + ereturn local instd `endo' + ereturn local depvar1 `lhs1' + ereturn local depvar0 `lhs0' + ereturn local depvar `lhs' + + ereturn scalar inexog_ct =`inexog1_ct' + ereturn scalar exexog_ct =`exex1_ct' + ereturn scalar endog_ct =`endo1_ct' + ereturn scalar partial_ct =`partial_ct' + + if "`smatrix'" == "" { + ereturn matrix S `S' + } + else { + ereturn matrix S `S0' // it's a copy so original won't be zapped + } + +* No weighting matrix defined for LIML and kclass + if "`wmatrix'"=="" & "`liml'`kclassopt'"=="" { + ereturn matrix W `W' + } + else if "`liml'`kclassopt'"=="" { + ereturn matrix W `wmatrix' // it's a copy so original won't be zapped + } + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + if "`bwchoice'" ~= "" { + ereturn local bwchoice "`bwchoice'" + } + } + + if "`small'"!="" { + ereturn scalar df_r=`df_r' + ereturn local small "small" + } + if "`nopartialsmall'"=="" { + ereturn local partialsmall "small" + } + + + if "`robust'" != "" { + local vce "robust" + } + if "`cluster1'" != "" { + if "`cluster2'"=="" { + local vce "`vce' cluster" + } + else { + local vce "`vce' two-way cluster" + } + } + if "`kernel'" != "" { + if "`robust'" != "" { + local vce "`vce' hac" + } + else { + local vce "`vce' ac" + } + local vce "`vce' `kernel' bw=`bw'" + } + if "`sw'" != "" { + local vce "`vce' sw" + } + if "`psd'" != "" { + local vce "`vce' `psd'" + } + local vce : list clean vce + local vce = lower("`vce'") + ereturn local vce `vce' + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + if "`cluster2'"!="" { + ereturn scalar N_clust1=`N_clust1' + ereturn scalar N_clust2=`N_clust2' + ereturn local clustvar1 `cluster1' + ereturn local clustvar2 `cluster2' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + + ereturn scalar df_m=`df_m' + ereturn scalar sdofminus=`sdofminus' + ereturn scalar dofminus=`dofminus' + ereturn scalar center=`center' + ereturn scalar r2=`r2' + ereturn scalar rmse=`rmse' + ereturn scalar rss=`rss' + ereturn scalar mss=`mss' + ereturn scalar r2_a=`r2_a' + ereturn scalar F=`F' + ereturn scalar Fp=`Fp' + ereturn scalar Fdf1=`Fdf1' + ereturn scalar Fdf2=`Fdf2' + ereturn scalar yy=`yy' + ereturn scalar yyc=`yyc' + ereturn scalar r2u=`r2u' + ereturn scalar r2c=`r2c' + ereturn scalar condzz=`condzz' + ereturn scalar condxx=`condxx' + ereturn scalar rankzz=`rankzz' + ereturn scalar rankxx=`rankxx' + ereturn scalar rankS=`rankS' + ereturn scalar rankV=`rankV' + ereturn scalar ll = -0.5 * (`N'*ln(2*_pi) + `N'*ln(`rss'/`N') + `N') + +* Always save J. Also save as Sargan if homoskedastic; save A-R if LIML. + ereturn scalar j=`j' + ereturn scalar jdf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar jp=`jp' + } + if ("`robust'`cluster'"=="") { + ereturn scalar sargan=`j' + ereturn scalar sargandf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar sarganp=`jp' + } + } + if "`liml'"!="" { + ereturn scalar arubin=`arubin' + ereturn scalar arubin_lin=`arubin_lin' + if `j' != 0 & `j' != . { + ereturn scalar arubinp=`arubinp' + ereturn scalar arubin_linp=`arubin_linp' + } + ereturn scalar arubindf=`jdf' + } + + if "`orthog'"!="" { + ereturn scalar cstat=`cstat' + if `cstat'!=0 & `cstat' != . { + ereturn scalar cstatp=`cstatp' + ereturn scalar cstatdf=`cstatdf' + ereturn local clist `orthog1' + } + } + + if "`endogtest'"!="" { + ereturn scalar estat=`estat' + if `estat'!=0 & `estat' != . { + ereturn scalar estatp=`estatp' + ereturn scalar estatdf=`estatdf' + ereturn local elist `endogtest1' + } + } + + if `endo1_ct' > 0 & "`noid'"=="" { + ereturn scalar idstat=`idstat' + ereturn scalar iddf=`iddf' + ereturn scalar idp=`idp' + ereturn scalar cd=`cd' + ereturn scalar widstat=`widstat' + ereturn scalar cdf=`cdf' + capture ereturn matrix ccev=`cceval' + capture ereturn matrix cdev `cdeval' + capture ereturn scalar rkf=`rkf' + } + + if "`redundant'"!="" & "`noid'"=="" { + ereturn scalar redstat=`redstat' + ereturn scalar redp=`redp' + ereturn scalar reddf=`reddf' + ereturn local redlist `redundant1' + } + + if "`first'`ffirst'`savefirst'`sfirst'`savesfirst'" != "" & `endo1_ct'>0 { +// Capture here because firstmat may be empty if mvs encountered in 1st stage regressions + capture ereturn matrix first `firstmat' + ereturn scalar arf=`arf' + ereturn scalar arfp=`arfp' + ereturn scalar archi2=`archi2' + ereturn scalar archi2p=`archi2p' + ereturn scalar ardf=`ardf' + ereturn scalar ardf_r=`ardf_r' + ereturn scalar sstat=`sstat' + ereturn scalar sstatp=`sstatp' + ereturn scalar sstatdf=`sstatdf' + } +// not saved if empty + ereturn local firsteqs `firsteqs' + ereturn local rfeq `rfeq' + ereturn local sfirsteq `sfirsteq' + + if "`liml'"!="" { + ereturn local model "liml" + ereturn scalar kclass=`kclass' + ereturn scalar lambda=`lambda' + if `fuller' > 0 & `fuller' < . { + ereturn scalar fuller=`fuller' + } + } + else if "`kclassopt'" != "" { + ereturn local model "kclass" + ereturn scalar kclass=`kclass' + } + else if "`gmm2s'`cue'`b0'`wmatrix'"=="" { + if "`endo1'" == "" { + ereturn local model "ols" + } + else { + ereturn local model "iv" + } + } + else if "`cue'`b0'"~="" { + ereturn local model "cue" + } + else if "`gmm2s'"~="" { + ereturn local model "gmm2s" + } + else if "`wmatrix'"~="" { + ereturn local model "gmmw" + } + else { +* Should never enter here + ereturn local model "unknown" + } + + if "`weight'" != "" { + ereturn local wexp "=`exp'" + ereturn local wtype `weight' + } + ereturn local cmd `ivreg2cmd' + ereturn local ranktestcmd `rkcmd' + ereturn local version `lversion' + ereturn scalar nocollin =("`nocollin'"~="") + ereturn scalar partialcons =`partialcons' + ereturn scalar cons =`cons' + + ereturn local predict "`ivreg2cmd'_p" + + if "`e(model)'"=="gmm2s" & "`wmatrix'"=="" { + local title2 "2-Step GMM estimation" + } + else if "`e(model)'"=="gmm2s" & "`wmatrix'"~="" { + local title2 "2-Step GMM estimation with user-supplied first-step weighting matrix" + } + else if "`e(model)'"=="gmmw" { + local title2 "GMM estimation with user-supplied weighting matrix" + } + else if "`e(model)'"=="cue" & "`b0'"=="" { + local title2 "CUE estimation" + } + else if "`e(model)'"=="cue" & "`b0'"~="" { + local title2 "CUE evaluated at user-supplied parameter vector" + } + else if "`e(model)'"=="ols" { + local title2 "OLS estimation" + } + else if "`e(model)'"=="iv" { + local title2 "IV (2SLS) estimation" + } + else if "`e(model)'"=="liml" { + local title2 "LIML estimation" + } + else if "`e(model)'"=="kclass" { + local title2 "k-class estimation" + } + else { +* Should never reach here + local title2 "unknown estimation" + } + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" & "`e(clustvar)'"=="" { + local hacsubtitle3 "autocorrelation" + } + if "`kiefer'"!="" { + local hacsubtitle3 "within-cluster autocorrelation (Kiefer)" + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + else { + local hacsubtitle3 "clustering on `e(clustvar1)' and `e(clustvar2)'" + } + if "`e(kernel)'" != "" { + local hacsubtitle4 "and kernel-robust to common correlated disturbances (Driscoll-Kraay)" + } + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + if "`title'"=="" { + ereturn local title "`title1'`title2'" + } + else { + ereturn local title "`title'" + } + if "`subtitle'"~="" { + ereturn local subtitle "`subtitle'" + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`b0'"~="" { + ereturn local hacsubtitleB "Estimates based on supplied parameter vector" + } + else if "`hacsubtitle'"~="" & "`gmm2s'`cue'"~="" { + ereturn local hacsubtitleB "Estimates efficient for arbitrary `hacsubtitle'" + } + else if "`wmatrix'"~="" { + ereturn local hacsubtitleB "Efficiency of estimates dependent on weighting matrix" + } + else { + ereturn local hacsubtitleB "Estimates efficient for homoskedasticity only" + } + if "`hacsubtitle'"~="" { + ereturn local hacsubtitleV "Statistics robust to `hacsubtitle'" + } + else { + ereturn local hacsubtitleV "Statistics consistent for homoskedasticity only" + } + if "`hacsubtitle4'"~="" { + ereturn local hacsubtitleV2 "`hacsubtitle4'" + } + if "`sw'"~="" { + ereturn local hacsubtitleV "Stock-Watson heteroskedastic-robust statistics (BETA VERSION)" + } + } + +******************************************************************************************* +* Display results unless ivreg2 called just to generate stats or nooutput option + + if "`nooutput'" == "" { + +// Display supplementary first-stage/RF results + if "`savesfirst'`saverf'`savefirst'" != "" { + DispStored `"`savesfirst'"' `"`saverf'"' `"`savefirst'"' + } + if "`rf'" != "" { + local eqname "`e(rfeq)'" + tempname ivest + _estimates hold `ivest', copy + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to display stored reduced form estimation." +di + } + else { + DispSFirst "rf" `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + _estimates unhold `ivest' + } + if "`first'" != "" { + DispFirst `"`ivreg2name'"' + } + if "`sfirst'"!="" { + local eqname "`e(sfirsteq)'" + tempname ivest + _estimates hold `ivest', copy + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to display stored first-stage/reduced form estimations." +di + } + else { + DispSFirst "sfirst" `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + _estimates unhold `ivest' + } + if "`first'`ffirst'`sfirst'" != "" { + DispFFirst `"`ivreg2name'"' + } + +// Display main output. Can be standard ivreg2, or first-stage-type results + if "`e(model)'"=="first" | "`e(model)'"=="rf" | "`e(model)'"=="sfirst" { + DispSFirst "`e(model)'" `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + else { + DispMain `"`noheader'"' `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + } + +// Drop first stage estimations unless explicitly saved or if replay + if "`savefirst'" == "" { + local firsteqs "`e(firsteqs)'" + foreach eqname of local firsteqs { + capture estimates drop `eqname' + } + ereturn local firsteqs + } +// Drop reduced form estimation unless explicitly saved or if replay + if "`saverf'" == "" { + local eqname "`e(rfeq)'" + capture estimates drop `eqname' + ereturn local rfeq + } +// Drop first stage/reduced form estimation unless explicitly saved or if replay + if "`savesfirst'" == "" { + local eqname "`e(sfirsteq)'" + capture estimates drop `eqname' + ereturn local sfirsteq + } + +end + +******************************************************************************************* +* SUBROUTINES +******************************************************************************************* + +// ************* Display system of or single first-stage and/or RF estimations ************ // + +program define DispSFirst, eclass + args model plus level nofooter helpfile dispopt + version 11.2 + +di + if "`model'"=="first" { +di in gr "First-stage regression of `e(depvar)':" + } + else if "`model'"=="rf" { + local strlen = length("`e(depvar)'")+25 +di in gr "Reduced-form regression: `e(depvar)'" +di in smcl in gr "{hline `strlen'}" + } + else if "`model'"=="sfirst" { +di in gr "System of first-stage/reduced-form regressions:" +di in smcl in gr "{hline 47}" + } + +// Display coefficients etc. +// Header info + if "`e(hacsubtitleV)'" ~= "" { +di in gr _n "`e(hacsubtitleV)'" + } + if "`e(hacsubtitleV2)'" ~= "" { +di in gr "`e(hacsubtitleV2)'" + } +di in gr "Number of obs = " _col(31) in ye %8.0f e(N) + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=" `e(bw)' + if "`e(bwchoice)'"!="" { +di in gr " `e(bwchoice)'" + } +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local N_clust `e(N_clust)' + local clustvar `e(clustvar)' + } + else { + local N_clust `e(N_clust1)' + local clustvar `e(clustvar1)' + } +di in gr "Number of clusters (`clustvar') = " _col(33) in ye %6.0f `N_clust' + } + if "`e(clustvar2)'"!="" { +di in gr "Number of clusters (" "`e(clustvar2)'" ") = " _col(33) in ye %6.0f e(N_clust2) + } + +// Unfortunate but necessary hack here: to suppress message about cluster adjustment of +// standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + ereturn local clustvar + +// Display output + ereturn display, `plus' level(`level') `dispopt' + ereturn local clustvar `cluster' + +end + +// ************* Display main estimation outpout ************** // + +program define DispMain, eclass + args noheader plus level nofooter helpfile dispopt + version 11.2 +* Prepare for problem resulting from rank(S) being insufficient +* Results from insuff number of clusters, too many lags in HAC, +* to calculate robust S matrix, HAC matrix not PD, singleton dummy, +* and indicated by missing value for j stat +* Macro `rprob' is either 1 (problem) or 0 (no problem) + capture local rprob ("`e(j)'"==".") + + if "`noheader'"=="" { + if "`e(title)'" ~= "" { +di in gr _n "`e(title)'" + local tlen=length("`e(title)'") +di in gr "{hline `tlen'}" + } + if "`e(subtitle)'" ~= "" { +di in gr "`e(subtitle)'" + } + if "`e(model)'"=="liml" | "`e(model)'"=="kclass" { +di in gr "k =" %7.5f `e(kclass)' + } + if "`e(model)'"=="liml" { +di in gr "lambda =" %7.5f `e(lambda)' + } + if e(fuller) > 0 & e(fuller) < . { +di in gr "Fuller parameter=" %-5.0f `e(fuller)' + } + if "`e(hacsubtitleB)'" ~= "" { +di in gr _n "`e(hacsubtitleB)'" _c + } + if "`e(hacsubtitleV)'" ~= "" { +di in gr _n "`e(hacsubtitleV)'" + } + if "`e(hacsubtitleV2)'" ~= "" { +di in gr "`e(hacsubtitleV2)'" + } + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=" `e(bw)' + if "`e(bwchoice)'"!="" { +di in gr " `e(bwchoice)'" + } +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + di + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local N_clust `e(N_clust)' + local clustvar `e(clustvar)' + } + else { + local N_clust `e(N_clust1)' + local clustvar `e(clustvar1)' + } +di in gr "Number of clusters (`clustvar') = " _col(33) in ye %6.0f `N_clust' _continue + } +di in gr _col(55) "Number of obs = " in ye %8.0f e(N) + if "`e(clustvar2)'"!="" { +di in gr "Number of clusters (" "`e(clustvar2)'" ") = " _col(33) in ye %6.0f e(N_clust2) _continue + } +di in gr _c _col(55) "F(" %3.0f e(Fdf1) "," %6.0f e(Fdf2) ") = " + if e(F) < 99999 { +di in ye %8.2f e(F) + } + else { +di in ye %8.2e e(F) + } +di in gr _col(55) "Prob > F = " in ye %8.4f e(Fp) + +di in gr "Total (centered) SS = " in ye %12.0g e(yyc) _continue +di in gr _col(55) "Centered R2 = " in ye %8.4f e(r2c) +di in gr "Total (uncentered) SS = " in ye %12.0g e(yy) _continue +di in gr _col(55) "Uncentered R2 = " in ye %8.4f e(r2u) +di in gr "Residual SS = " in ye %12.0g e(rss) _continue +di in gr _col(55) "Root MSE = " in ye %8.4g e(rmse) +di + } + +* Display coefficients etc. +* Unfortunate but necessary hack here: to suppress message about cluster adjustment of +* standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + ereturn local clustvar + ereturn display, `plus' level(`level') `dispopt' + ereturn local clustvar `cluster' + +* Display 1st footer with identification stats +* Footer not displayed if -nofooter- option or if pure OLS, i.e., model="ols" and Sargan-Hansen=0 + if ~("`nofooter'"~="" | (e(model)=="ols" & (e(sargan)==0 | e(j)==0))) { + +* Under ID test + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##idtest:Underidentification test}" + if "`e(vcetype)'`e(kernel)'"=="" { +di in gr _c " (Anderson canon. corr. LM statistic):" + } + else { +di in gr _c " (Kleibergen-Paap rk LM statistic):" + } +di in ye _col(71) %8.3f e(idstat) +di in gr _col(52) "Chi-sq(" in ye e(iddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(idp) +* IV redundancy statistic + if "`e(redlist)'"!="" { +di in gr "-redundant- option:" +di in smcl _c "{help `helpfile'##redtest:IV redundancy test}" +di in gr _c " (LM test of redundancy of specified instruments):" +di in ye _col(71) %8.3f e(redstat) +di in gr _col(52) "Chi-sq(" in ye e(reddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(redp) +di in gr "Instruments tested: " _c + Disp `e(redlist)', _col(23) + } +di in smcl in gr "{hline 78}" + } +* Report Cragg-Donald statistic + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##widtest:Weak identification test}" +di in gr " (Cragg-Donald Wald F statistic):" in ye _col(71) %8.3f e(cdf) + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr " (Kleibergen-Paap rk Wald F statistic):" in ye _col(71) %8.3f e(widstat) + } +di in gr _c "Stock-Yogo weak ID test critical values:" + Disp_cdsy, model(`e(model)') k2(`e(exexog_ct)') nendog(`e(endog_ct)') fuller("`e(fuller)'") col1(42) col2(73) + if `r(cdmissing)' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + } + di in smcl in gr "{hline 78}" + } + +* Report either (a) Sargan-Hansen-C stats, or (b) robust covariance matrix problem +* e(model)="gmmw" means user-supplied weighting matrix and Hansen J using 2nd-step resids reported + if `rprob' == 0 { +* Display overid statistic + if "`e(vcetype)'" == "Robust" | "`e(model)'" == "gmmw" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } + else { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } +di in ye _col(71) %8.3f e(j) + if e(jdf) { +di in gr _col(52) "Chi-sq(" in ye e(jdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(jp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + +* Display orthog option: C statistic (difference of Sargan statistics) + if e(cstat) != . { +* If C-stat = 0 then warn, otherwise output + if e(cstat) > 0 { +di in gr "-orthog- option:" + if "`e(vcetype)'" == "Robust" { +di in gr _c "Hansen J statistic (eqn. excluding suspect orthog. conditions): " + } + else { +di in gr _c "Sargan statistic (eqn. excluding suspect orthogonality conditions):" + } +di in ye _col(71) %8.3f e(j)-e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(jdf)-e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f chiprob(e(jdf)-e(cstatdf),e(j)-e(cstat)) +di in smcl _c "{help `helpfile'##ctest:C statistic}" +di in gr _c " (exogeneity/orthogonality of suspect instruments): " +di in ye _col(71) %8.3f e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f e(cstatp) +di in gr "Instruments tested: " _c + Disp `e(clist)', _col(23) + } + if e(cstat) == 0 { +di in gr _n "Collinearity/identification problems in eqn. excl. suspect orthog. conditions:" +di in gr " C statistic not calculated for -orthog- option" + } + } + } + else { +* Problem exists with robust VCV - notify and list possible causes +di in r "Warning: estimated covariance matrix of moment conditions not of full rank." + if e(j)==. { +di in r " overidentification statistic not reported, and standard errors and" + } +di in r " model tests should be interpreted with caution." +di in r "Possible causes:" + if e(nocollin) { +di in r " collinearities in regressors or instruments (with -nocollin- option)" + } + if "`e(N_clust)'" != "" { +di in r " number of clusters insufficient to calculate robust covariance matrix" + } + if "`e(kernel)'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in r in smcl _c "{help `helpfile'##partial:partial}" +di in r " option may address problem." + } + +* Display endog option: endogeneity test statistic + if e(estat) != . { +* If stat = 0 then warn, otherwise output + if e(estat) > 0 { +di in gr "-endog- option:" +di in smcl _c "{help `helpfile'##endogtest:Endogeneity test}" +di in gr _c " of endogenous regressors: " +di in ye _col(71) %8.3f e(estat) +di in gr _col(52) "Chi-sq(" in ye e(estatdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(estatp) +di in gr "Regressors tested: " _c + Disp `e(elist)', _col(23) + } + if e(estat) == 0 { +di in gr _n "Collinearity/identification problems in restricted equation:" +di in gr " Endogeneity test statistic not calculated for -endog- option" + } + } + + di in smcl in gr "{hline 78}" +* Display AR overid statistic if LIML and not robust + if "`e(model)'" == "liml" & "`e(vcetype)'" ~= "Robust" & "`e(kernel)'" == "" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (LR test of excluded instruments):" + } +di in ye _col(72) %7.3f e(arubin) + if e(arubindf) { +di in gr _col(52) "Chi-sq(" in ye e(arubindf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(arubinp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + di in smcl in gr "{hline 78}" + } + } + +* Display 2nd footer with variable lists + if "`nofooter'"=="" { + +* Warn about dropped instruments if any +* Can happen with nocollin option and rank(S) < cols(S) + if colsof(e(S)) > e(rankzz) { +di in gr "Collinearities detected among instruments: " _c +di in gr colsof(e(S))-e(rankzz) " instrument(s) dropped" + } + + if "`e(collin)'`e(dups)'" != "" | e(partial_ct) { +* If collinearities, duplicates or partial, abbreviated varlists saved with a 1 at the end + local one "1" + } + if e(endog_ct) { + di in gr "Instrumented:" _c + Disp `e(instd`one')', _col(23) + } + if e(inexog_ct) { + di in gr "Included instruments:" _c + Disp `e(inexog`one')', _col(23) + } + if e(exexog_ct) { + di in gr "Excluded instruments:" _c + Disp `e(exexog`one')', _col(23) + } + if e(partial_ct) { + if e(partialcons) { + local partial "`e(partial`one')' _cons" + } + else { + local partial "`e(partial`one')'" + } +di in smcl _c "{help `helpfile'##partial:Partialled-out}" + di in gr ":" _c + Disp `partial', _col(23) + if "`e(partialsmall)'"=="" { +di in gr _col(23) "nb: total SS, model F and R2s are after partialling-out;" +di in gr _col(23) " any {help `helpfile'##s_small:small-sample adjustments} do not include" +di in gr _col(23) " partialled-out variables in regressor count K" + } + else { +di in gr _col(23) "nb: total SS, model F and R2s are after partialling-out;" +di in gr _col(23) " any {help `helpfile'##s_small:small-sample adjustments} include partialled-out" +di in gr _col(23) " variables in regressor count K" + } + } + if "`e(dups)'" != "" { + di in gr "Duplicates:" _c + Disp `e(dups)', _col(23) + } + if "`e(collin)'" != "" { + di in gr "Dropped collinear:" _c + Disp `e(collin)', _col(23) + } + if "`e(ecollin)'" != "" { + di in gr "Reclassified as exog:" _c + Disp `e(ecollin)', _col(23) + } + di in smcl in gr "{hline 78}" + } +end + +************************************************************************************** + +// ************ Display collinearity and duplicates warning messages ************ // + +program define DispCollinDups + version 11.2 + if "`e(dups)'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `e(dups)', _col(16) + } + if "`e(collin)'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `e(collin)', _col(16) + } +end + +// ************* Display all first-stage estimations ************ // + +program define DispFirst + version 11.2 + args helpfile + tempname firstmat ivest sheapr2 pr2 F df df_r pvalue + tempname SWF SWFdf1 SWFdf2 SWFp SWr2 + + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display first-stage estimates; macro e(first) is missing" + exit + } +di in gr _newline "First-stage regressions" +di in smcl in gr "{hline 23}" +di + local endo1 : colnames(`firstmat') + local nrvars : word count `endo1' + local firsteqs "`e(firsteqs)'" + local nreqs : word count `firsteqs' + if `nreqs' < `nrvars' { +di in ye "Unable to display all first-stage regressions." +di in ye "There may be insufficient room to store results using -estimates store-," +di in ye "or names of endogenous regressors may be too long to store the results." +di in ye "Try dropping one or more estimation results using -estimates drop-," +di in ye "using the -savefprefix- option, or using shorter variable names." +di + } + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + foreach eqname of local firsteqs { + _estimates hold `ivest' + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to list stored estimation `eqname'." +di in ye "There may be insufficient room to store results using -estimates store-," +di in ye "or names of endogenous regressors may be too long to store the results." +di in ye "Try dropping one or more estimation results using -estimates drop-," +di in ye "using the -savefprefix- option, or using shorter variable names." +di + } + else { + local vn "`e(depvar)'" + estimates replay `eqname', noheader + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `SWF' =`firstmat'["SWF","`vn'"] + mat `SWFdf1' =`firstmat'["SWFdf1","`vn'"] + mat `SWFdf2' =`firstmat'["SWFdf2","`vn'"] + mat `SWFp' =`firstmat'["SWFp","`vn'"] + mat `SWr2' =`firstmat'["SWr2","`vn'"] + +di in gr "F test of excluded instruments:" +di in gr " F(" %3.0f `df'[1,1] "," %6.0f `df_r'[1,1] ") = " in ye %8.2f `F'[1,1] +di in gr " Prob > F = " in ye %8.4f `pvalue'[1,1] + +di in smcl "{help `helpfile'##swstats:Sanderson-Windmeijer multivariate F test of excluded instruments:}" +di in gr " F(" %3.0f `SWFdf1'[1,1] "," %6.0f `SWFdf2'[1,1] ") = " in ye %8.2f `SWF'[1,1] +di in gr " Prob > F = " in ye %8.4f `SWFp'[1,1] + +di + } + _estimates unhold `ivest' + } +end + +// ************* Display list of stored first-stage and RF estimations ************ // + +program define DispStored + args savesfirst saverf savefirst + version 11.2 + + if "`savesfirst'" != "" { + local eqlist "`e(sfirsteq)'" + } + if "`saverf'" != "" { + local eqlist "`eqlist' `e(rfeq)'" + } + if "`savefirst'" != "" { + local eqlist "`eqlist' `e(firsteqs)'" + } + local eqlist : list retokenize eqlist + +di in gr _newline "Stored estimation results" +di in smcl in gr "{hline 25}" _c + capture estimates dir `eqlist' + if "`eqlist'" != "" & _rc == 0 { +// Estimates exist and can be listed + estimates dir `eqlist' + } + else if "`eqlist'" != "" & _rc != 0 { +di +di in ye "Unable to list stored estimations." +di + } +end + +// ************* Display summary first-stage and ID test results ************ // + +program define DispFFirst + version 11.2 + args helpfile + tempname firstmat + tempname sheapr2 pr2 F df df_r pvalue + tempname SWF SWFdf1 SWFdf2 SWFp SWchi2 SWchi2p SWr2 + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display summary of first-stage estimates; macro e(first) is missing" + exit + } + local endo : colnames(`firstmat') + local nrvars : word count `endo' + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + local efirsteqs "`e(firsteqs)'" + + mat `df' =`firstmat'["df",1] + mat `df_r' =`firstmat'["df_r",1] + mat `SWFdf1' =`firstmat'["SWFdf1",1] + mat `SWFdf2' =`firstmat'["SWFdf2",1] + +di +di in gr _newline "Summary results for first-stage regressions" +di in smcl in gr "{hline 43}" +di + +di _c in smcl _col(44) "{help `helpfile'##swstats:(Underid)}" +di in smcl _col(65) "{help `helpfile'##swstats:(Weak id)}" + +di _c in gr "Variable |" +di _c in smcl _col(16) "{help `helpfile'##swstats:F}" in gr "(" +di _c in ye _col(17) %3.0f `df'[1,1] in gr "," in ye %6.0f `df_r'[1,1] in gr ") P-val" +di _c in gr _col(37) "|" +di _c in smcl _col(39) "{help `helpfile'##swstats:SW Chi-sq}" in gr "(" +di _c in ye %3.0f `SWFdf1'[1,1] in gr ") P-val" +di _c in gr _col(60) "|" +di _c in smcl _col(62) "{help `helpfile'##swstats:SW F}" in gr "(" +di in ye _col(67) %3.0f `SWFdf1'[1,1] in gr "," in ye %6.0f `SWFdf2'[1,1] in gr ")" + + local i = 1 + foreach vn of local endo { + + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `SWF' =`firstmat'["SWF","`vn'"] + mat `SWFdf1' =`firstmat'["SWFdf1","`vn'"] + mat `SWFdf2' =`firstmat'["SWFdf2","`vn'"] + mat `SWFp' =`firstmat'["SWFp","`vn'"] + mat `SWchi2' =`firstmat'["SWchi2","`vn'"] + mat `SWchi2p' =`firstmat'["SWchi2p","`vn'"] + mat `SWr2' =`firstmat'["SWr2","`vn'"] + + local vnlen : length local vn + if `vnlen' > 12 { + local vn : piece 1 12 of "`vn'" + } +di _c in y %-12s "`vn'" _col(14) in gr "|" _col(18) in y %8.2f `F'[1,1] +di _c _col(28) in y %8.4f `pvalue'[1,1] +di _c _col(37) in g "|" _col(42) in y %8.2f `SWchi2'[1,1] _col(51) in y %8.4f `SWchi2p'[1,1] +di _col(60) in g "|" _col(65) in y %8.2f `SWF'[1,1] + local i = `i' + 1 + } +di + + if "`robust'`cluster'" != "" { + if "`cluster'" != "" { + local rtype "cluster-robust" + } + else if "`kernel'" != "" { + local rtype "heteroskedasticity and autocorrelation-robust" + } + else { + local rtype "heteroskedasticity-robust" + } + } + else if "`kernel'" != "" { + local rtype "autocorrelation-robust" + } + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: first-stage test statistics `rtype'" +di + } + + local k2 = `SWFdf1'[1,1] +di in gr "Stock-Yogo weak ID F test critical values for single endogenous regressor:" + Disp_cdsy, model(`e(model)') k2(`e(exexog_ct)') nendog(1) fuller("`e(fuller)'") col1(36) col2(67) + if `r(cdmissing)' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(model)'"=="iv" & "`e(vcetype)'`e(kernel)'"=="" { +di in gr "NB: Critical values are for Sanderson-Windmeijer F statistic." + } + else { +di in gr "NB: Critical values are for i.i.d. errors only." + } +di + } + +* Check that SW chi-sq and F denominator are correct and = underid test dof + if e(iddf)~=`SWFdf1'[1,1] { +di in red "Warning: Error in calculating first-stage id statistics above;" +di in red " dof of SW statistics is " `SWFdf1'[1,1] ", should be L-(K-1)=`e(iddf)'." + } + + tempname iddf idstat idp widstat cdf rkf + scalar `iddf'=e(iddf) + scalar `idstat'=e(idstat) + scalar `idp'=e(idp) + scalar `widstat'=e(widstat) + scalar `cdf'=e(cdf) + capture scalar `rkf'=e(rkf) +di in smcl "{help `helpfile'##idtest:Underidentification test}" +di in gr "Ho: matrix of reduced form coefficients has rank=K1-1 (underidentified)" +di in gr "Ha: matrix has rank=K1 (identified)" + if "`robust'`kernel'"=="" { +di in ye "Anderson canon. corr. LM statistic" _c + } + else { +di in ye "Kleibergen-Paap rk LM statistic" _c + } +di in gr _col(42) "Chi-sq(" in ye `iddf' in gr ")=" %-7.2f in ye `idstat' /* + */ _col(61) in gr "P-val=" %6.4f in ye `idp' + +di +di in smcl "{help `helpfile'##widtest:Weak identification test}" +di in gr "Ho: equation is weakly identified" +di in ye "Cragg-Donald Wald F statistic" _col(65) %8.2f `cdf' + if "`robust'`kernel'"~="" { +di in ye "Kleibergen-Paap Wald rk F statistic" _col(65) %8.2f `rkf' + } +di + +di in gr "Stock-Yogo weak ID test critical values for K1=`e(endog_ct)' and L1=`e(exexog_ct)':" + Disp_cdsy, model(`e(model)') k2(`e(exexog_ct)') nendog(`e(endog_ct)') fuller("`e(fuller)'") col1(36) col2(67) + if `r(cdmissing)' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + } +di + + tempname arf arfp archi2 archi2p ardf ardf_r + tempname sstat sstatp sstatdf +di in smcl "{help `helpfile'##wirobust:Weak-instrument-robust inference}" +di in gr "Tests of joint significance of endogenous regressors B1 in main equation" +di in gr "Ho: B1=0 and orthogonality conditions are valid" +* Needs to be small so that adjusted dof is reflected in F stat + scalar `arf'=e(arf) + scalar `arfp'=e(arfp) + scalar `archi2'=e(archi2) + scalar `archi2p'=e(archi2p) + scalar `ardf'=e(ardf) + scalar `ardf_r'=e(ardf_r) + scalar `sstat'=e(sstat) + scalar `sstatp'=e(sstatp) + scalar `sstatdf'=e(sstatdf) +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "F(" in ye `ardf' in gr "," in ye `ardf_r' in gr ")=" /* + */ _col(49) in ye %7.2f `arf' _col(61) in gr "P-val=" in ye %6.4f `arfp' +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "Chi-sq(" in ye `ardf' in gr ")=" /* + */ _col(49) in ye %7.2f `archi2' _col(61) in gr "P-val=" in ye %6.4f `archi2p' +di in ye _c "Stock-Wright LM S statistic" +di in gr _col(36) "Chi-sq(" in ye `sstatdf' in gr ")=" /* + */ _col(49) in ye %7.2f `sstat' _col(61) in gr "P-val=" in ye %6.4f `sstatp' +di + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: Underidentification, weak identification and weak-identification-robust" +di in gr " test statistics `rtype'" +di + } + + if "`cluster'" != "" & "`e(clustvar2)'"=="" { +di in gr "Number of clusters N_clust = " in ye %10.0f e(N_clust) + } + else if "`e(clustvar2)'" ~= "" { +di in gr "Number of clusters (1) N_clust1 = " in ye %10.0f e(N_clust1) +di in gr "Number of clusters (2) N_clust2 = " in ye %10.0f e(N_clust2) + } +di in gr "Number of observations N = " in ye %10.0f e(N) +di in gr "Number of regressors K = " in ye %10.0f e(rankxx) +di in gr "Number of endogenous regressors K1 = " in ye %10.0f e(endog_ct) +di in gr "Number of instruments L = " in ye %10.0f e(rankzz) +di in gr "Number of excluded instruments L1 = " in ye %10.0f e(ardf) + if "`e(partial)'" != "" { +di in gr "Number of partialled-out regressors/IVs = " in ye %10.0f e(partial_ct) +di in gr "NB: K & L do not included partialled-out variables" + } + +end + +// ************* Post first-stage and/or RF estimations ************ // + +program define PostFirstRF, eclass + version 11.2 + syntax [if] /// + [ , /// + first(string) /// can be fv + rf /// omit first(.) and rf => post system of eqns + rmse_rf(real 0) /// + bmat(name) /// + vmat(name) /// + smat(name) /// + firstmat(name) /// + lhs1(string) /// can be fv + endo1(string) /// + znames0(string) /// + znames1(string) /// + bvclean(integer 0) /// + fvops(integer 0) /// + partial_ct(integer 0) /// + robust /// + cluster(string) /// + cluster1(string) /// + cluster2(string) /// + nc(integer 0) /// + nc1(integer 0) /// + nc2(integer 0) /// + kernel(string) /// + bw(real 0) /// + ivar(name) /// + tvar(name) /// + obs(integer 0) /// + iv1_ct(integer 0) /// + cons(integer 0) /// + partialcons(integer 0) /// + dofminus(integer 0) /// + sdofminus(integer 0) /// + ] + +// renaming/copying + local N = `obs' + local N_clust = `nc' + local N_clust1 = `nc1' + local N_clust2 = `nc2' + tempname b V S + mat `b' = `bmat' + mat `V' = `vmat' + mat `S' = `smat' + + marksample touse + + mat colname `b' = `lhs1' `endo1' + mat rowname `b' = `znames1' + mat `b' = vec(`b') + mat `b' = `b'' + mat colname `V' = `: colfullnames `b'' + mat rowname `V' = `: colfullnames `b'' + mat colname `S' = `: colfullnames `b'' + mat rowname `S' = `: colfullnames `b'' + + if "`cluster'"=="" { + matrix `V'=`V'*(`N'-`dofminus')/(`N'-`iv1_ct'-`dofminus'-`sdofminus') + } + else { + matrix `V'=`V'*(`N'-1)/(`N'-`iv1_ct'-`sdofminus') /// + * `N_clust'/(`N_clust'-1) + } + +// If RF or first-stage estimation required, extract it +// also set macros for model and depvar + if "`rf'`first'"~="" { + if "`rf'"~="" { // RF + local vnum = 0 + local model rf + local depvar `lhs1' + local rmse = `rmse_rf' + } + else { // first-stage + local vnum : list posof "`first'" in endo1 + local vnum = `vnum' + local model first + local depvar `first' + local rmse = el(`firstmat', rownumb(`firstmat',"rmse"), colnumb(`firstmat',"`first'")) + } + local c0 = 1 + `vnum'*`iv1_ct' + local c1 = (`vnum'+1)*`iv1_ct' + mat `b' = `b'[1,`c0'..`c1'] + mat `V' = `V'[`c0'..`c1',`c0'..`c1'] + mat `S' = `S'[`c0'..`c1',`c0'..`c1'] + mat coleq `b' = "" + mat coleq `V' = "" + mat roweq `V' = "" + mat coleq `S' = "" + mat roweq `S' = "" + } + else { + local model sfirst + local eqlist `lhs1' `endo1' + } + +// reinsert omitteds etc. unless requested not to +// eqlist empty unless first-stage/rf system + if ~`bvclean' { + AddOmitted, bmat(`b') vmat(`V') cnb0(`znames0') cnb1(`znames1') eqlist(`eqlist') + mat `b' = r(b) + mat `V' = r(V) +// build fv info (base, empty, etc.) unless there was partialling out + if `fvops' & ~`partial_ct' { + local bfv "buildfvinfo" + } + } + + local dof = `N' - `iv1_ct' - `dofminus' - `sdofminus' + ereturn post `b' `V', obs(`obs') esample(`touse') dof(`dof') depname(`depvar') `bfv' + +// saved RF/first-stage equation scalars + if "`rf'`first'"~="" { + ereturn scalar rmse = `rmse' + ereturn scalar df_r = `dof' + ereturn scalar df_m = `iv1_ct' - `cons' + `sdofminus' - `partialcons' + } + ereturn scalar k_eq = `: word count `endo1'' + ereturn local cmd ivreg2 + ereturn local model `model' + ereturn matrix S `S' + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + } + + if "`robust'" != "" { + local vce "robust" + } + if "`cluster1'" != "" { + if "`cluster2'"=="" { + local vce "`vce' cluster" + } + else { + local vce "`vce' two-way cluster" + } + } + if "`kernel'" != "" { + if "`robust'" != "" { + local vce "`vce' hac" + } + else { + local vce "`vce' ac" + } + local vce "`vce' `kernel' bw=`bw'" + } + + local vce : list clean vce + local vce = lower("`vce'") + ereturn local vce `vce' + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + if "`cluster2'"!="" { + ereturn scalar N_clust1=`N_clust1' + ereturn scalar N_clust2=`N_clust2' + ereturn local clustvar1 `cluster1' + ereturn local clustvar2 `cluster2' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + +// Assemble output titles + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" & "`e(clustvar)'"=="" { + local hacsubtitle3 "autocorrelation" + } + if "`kiefer'"!="" { + local hacsubtitle3 "within-cluster autocorrelation (Kiefer)" + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + else { + local hacsubtitle3 "clustering on `e(clustvar1)' and `e(clustvar2)'" + } + if "`e(kernel)'" != "" { + local hacsubtitle4 "and kernel-robust to common correlated disturbances (Driscoll-Kraay)" + } + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`hacsubtitle'"~="" { + ereturn local hacsubtitleV "Statistics robust to `hacsubtitle'" + } + else { + ereturn local hacsubtitleV "Statistics consistent for homoskedasticity only" + } + if "`hacsubtitle4'"~="" { + ereturn local hacsubtitleV2 "`hacsubtitle4'" + } + if "`sw'"~="" { + ereturn local hacsubtitleV "Stock-Watson heteroskedastic-robust statistics (BETA VERSION)" + } + +end + + + +************************************************************************************** +program define IsStop, sclass + /* sic, must do tests one-at-a-time, + * 0, may be very large */ + version 11.2 + if `"`0'"' == "[" { + sret local stop 1 + exit + } + if `"`0'"' == "," { + sret local stop 1 + exit + } + if `"`0'"' == "if" { + sret local stop 1 + exit + } +* per official ivreg 5.1.3 + if substr(`"`0'"',1,3) == "if(" { + sret local stop 1 + exit + } + if `"`0'"' == "in" { + sret local stop 1 + exit + } + if `"`0'"' == "" { + sret local stop 1 + exit + } + else sret local stop 0 +end + +// ************* Display list of variables ************ // + +program define Disp + version 11.2 + syntax [anything] [, _col(integer 15) ] + local maxlen = 80-`_col' + local len = 0 + local first = 1 + foreach vn in `anything' { +* Don't display if base or omitted variable + _ms_parse_parts `vn' + if ~`r(omit)' { + local vnlen : length local vn + if `len'+`vnlen' > `maxlen' { + di + local first = 1 + local len = `vnlen' + } + else { + local len = `len'+`vnlen'+1 + } + if `first' { + local first = 0 + di in gr _col(`_col') "`vn'" _c + } + else { + di in gr " `vn'" _c + } + } + } +* Finish with a newline + di +end + +// *********** Display Cragg-Donald/Stock-Yogo critical values etc. ******** // + +program define Disp_cdsy, rclass + version 11.2 + syntax , col1(integer) col2(integer) model(string) k2(integer) nendog(integer) [ fuller(string) ] + local cdmissing=1 + if "`model'"=="iv" | "`model'"=="gmm2s" | "`model'"=="gmmw" { + cdsy, type(ivbias5) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') " 5% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "30% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "15% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "25% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`model'"=="liml" & "`fuller'"=="") | "`model'"=="cue" { + cdsy, type(limlsize10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "15% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "25% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`model'"=="liml" & "`fuller'"~="") { + cdsy, type(fullrel5) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') " 5% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "30% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') " 5% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "30% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + return scalar cdmissing =`cdmissing' +end + +program define cdsy, rclass + version 11.2 + syntax , type(string) k2(integer) nendog(integer) + +* type() can be ivbias5 (k2<=100, nendog<=3) +* ivbias10 (ditto) +* ivbias20 (ditto) +* ivbias30 (ditto) +* ivsize10 (k2<=100, nendog<=2) +* ivsize15 (ditto) +* ivsize20 (ditto) +* ivsize25 (ditto) +* fullrel5 (ditto) +* fullrel10 (ditto) +* fullrel20 (ditto) +* fullrel30 (ditto) +* fullmax5 (ditto) +* fullmax10 (ditto) +* fullmax20 (ditto) +* fullmax30 (ditto) +* limlsize10 (ditto) +* limlsize15 (ditto) +* limlsize20 (ditto) +* limlsize25 (ditto) + + tempname temp cv + +* Initialize critical value as MV + scalar `cv'=. + + if "`type'"=="ivbias5" { + mata: s_cdsy("`temp'", 1) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias10" { + mata: s_cdsy("`temp'", 2) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias20" { + mata: s_cdsy("`temp'", 3) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias30" { + mata: s_cdsy("`temp'", 4) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivsize10" { + mata: s_cdsy("`temp'", 5) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize15" { + mata: s_cdsy("`temp'", 6) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize20" { + mata: s_cdsy("`temp'", 7) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize25" { + mata: s_cdsy("`temp'", 8) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel5" { + mata: s_cdsy("`temp'", 9) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel10" { + mata: s_cdsy("`temp'", 10) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel20" { + mata: s_cdsy("`temp'", 11) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel30" { + mata: s_cdsy("`temp'", 12) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax5" { + mata: s_cdsy("`temp'", 13) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax10" { + mata: s_cdsy("`temp'", 14) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax20" { + mata: s_cdsy("`temp'", 15) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax30" { + mata: s_cdsy("`temp'", 16) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize10" { + mata: s_cdsy("`temp'", 17) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize15" { + mata: s_cdsy("`temp'", 18) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize20" { + mata: s_cdsy("`temp'", 19) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize25" { + mata: s_cdsy("`temp'", 20) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + return scalar cv=`cv' +end + +// ***************************** Parse ivreg2 arguments **************** // + +program define ivparse, sclass + version 11.2 + syntax [anything(name=0)] /// + [ , /// + ivreg2name(name) /// + partial(string) /// as string because may have nonvariable in list + fwl(string) /// legacy option + orthog(varlist fv ts) /// + endogtest(varlist fv ts) /// + redundant(varlist fv ts) /// + depname(string) /// + robust /// + cluster(varlist fv ts) /// + bw(string) /// as string because may have noninteger option "auto" + kernel(string) /// + dkraay(integer 0) /// + sw /// + kiefer /// + center /// + NOCONSTANT /// + tvar(varname) /// + ivar(varname) /// + gmm2s /// + gmm /// + cue /// + liml /// + fuller(real 0) /// + kclass(real 0) /// + b0(string) /// + wmatrix(string) /// + NOID /// + savefirst /// + savefprefix(name) /// + saverf /// + saverfprefix(name) /// + savesfirst /// + savesfprefix(name) /// + psd0 /// + psda /// + dofminus(integer 0) /// + NOCOLLIN /// + useqr /// + bvclean /// + eform(string) /// + NOOMITTED /// + vsquish /// + noemptycells /// + baselevels /// + allbaselevels /// + ] + +// TS and FV opts based on option varlists + local tsops = ("`s(tsops)'"=="true") + local fvops = ("`s(fvops)'"=="true") +// useful boolean + local cons =("`noconstant'"=="") + + local n 0 + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + while `s(stop)'==0 { + if "`paren'"=="(" { + local ++n + if `n'>1 { +di as err `"syntax is "(all instrumented variables = instrument variables)""' + exit 198 + } + gettoken p lhs : lhs, parse(" =") + while "`p'"!="=" { + if "`p'"=="" { +di as err `"syntax is "(all instrumented variables = instrument variables)""' +di as er `"the equal sign "=" is required"' + exit 198 + } + local endo `endo' `p' + gettoken p lhs : lhs, parse(" =") + } + local exexog `lhs' + } + else { + local inexog `inexog' `lhs' + } + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + } +// lhs attached to front of inexog + gettoken lhs inexog : inexog + local endo : list retokenize endo + local inexog : list retokenize inexog + local exexog : list retokenize exexog +// If depname not provided (default) name is lhs variable + if "`depname'"=="" { + local depname `lhs' + } + +// partial, including legacy FWL option + local partial `partial' `fwl' +// Need to nonvars "_cons" from list if present +// Also set `partialcons' local to 0/1 +// Need word option so that varnames with cons in them aren't zapped + local partial : subinstr local partial "_cons" "", all count(local partialcons) word + local partial : list retokenize partial + if "`partial'"=="_all" { + local partial `inexog' + } +// constant always partialled out if present in regression and other inexog are being partialled out +// (incompatibilities caught in error-check section below) + if "`partial'"~="" { + local partialcons = (`cons' | `partialcons') + } + +// detect if TS or FV operators used in main varlists +// clear any extraneous sreturn macros first + sreturn clear + local 0 `lhs' `inexog' `endo' `exexog' `partial' + syntax varlist(fv ts) + local tsops = ("`s(tsops)'"=="true") | `tsops' + local fvops = ("`s(fvops)'"=="true") | `fvops' + +// TS operators not allowed with cluster, ivar or tvar. Captured in -syntax-. + if "`tvar'" == "" { + local tvar `_dta[_TStvar]' + } + if "`ivar'" == "" { + local ivar `_dta[_TSpanel]' + } + if "`_dta[_TSdelta]'" == "" { + local tdelta 1 + } + else { // use evaluator since _dta[_TSdelta] can + local tdelta = `_dta[_TSdelta]' // be stored as e.g. +1.0000000000000X+000 + } + + sreturn local lhs `lhs' + sreturn local depname `depname' + sreturn local endo `endo' + sreturn local inexog `inexog' + sreturn local exexog `exexog' + sreturn local partial `partial' + sreturn local cons =`cons' + sreturn local partialcons =`partialcons' + sreturn local tsops =`tsops' + sreturn local fvops =`fvops' + sreturn local tvar `tvar' + sreturn local ivar `ivar' + sreturn local tdelta `tdelta' + sreturn local noid `noid' // can be overriden below + sreturn local liml `liml' // can be overriden below + +//convert to boolean + sreturn local useqr =("`useqr'" ~= "") + +// Cluster and SW imply robust + if "`cluster'`sw'"~="" { + local robust "robust" + } + +// HAC estimation. + +// First dkraay(bw): special case of HAC with clustering +// on time-series var in a panel + kernel-robust + if `dkraay' { + if "`bw'" == "" { + local bw `dkraay' + } + if "`cluster'" == "" { + local cluster `tvar' + } + } +// If bw is omitted, default `bw' is 0. +// bw(.) can be number or "auto" hence arrives as string, but is returned as number +// bw=-1 returned if "auto" +// If bw or kernel supplied, check/set `kernel'. +// Macro `kernel' is also used for indicating HAC in use. +// If bw or kernel not supplied, set bw=0 + if "`bw'" == "" & "`kernel'" == "" { + local bw 0 + } + else { +// Check it's a valid kernel and replace with unabbreviated kernel name; check bw. +// s_vkernel is in livreg2 mlib. + mata: s_vkernel("`kernel'", "`bw'", "`ivar'") + local kernel `r(kernel)' + local bw `r(bw)' // = -1 if bw(auto) option chosen + local tsops = 1 + } +// kiefer = kernel(tru) bw(T) and no robust + if "`kiefer'" ~= "" & "`kernel'" == "" { + local kernel "Truncated" + } + +// Done parsing VCE opts + sreturn local bw `bw' + sreturn local kernel `kernel' + sreturn local robust `robust' + sreturn local cluster `cluster' + if `bw' { + sreturn local bwopt "bw(`bw')" + sreturn local kernopt "kernel(`kernel')" + } +// center arrives as string but is returned as boolean + sreturn local center =("`center'"=="center") + +// Fuller implies LIML + if `fuller' != 0 { + sreturn local liml "liml" + sreturn local fulleropt "fuller(`fuller')" + } + + if `kclass' != 0 { + sreturn local kclassopt "kclass(`kclass')" + } + +// b0 implies noid. + if "`b0'" ~= "" { + sreturn local noid "noid" + } + +// save first, rf + if "`savefprefix'" != "" { // savefprefix implies savefirst + local savefirst "savefirst" + } + else { // default savefprefix is _ivreg2_ + local savefprefix "_`ivreg2name'_" + } + sreturn local savefirst `savefirst' + sreturn local savefprefix `savefprefix' + if "`saverfprefix'" != "" { // saverfprefix implies saverf + local saverf "saverf" + } + else { // default saverfprefix is _ivreg2_ + local saverfprefix "_`ivreg2name'_" + } + sreturn local saverf `saverf' + sreturn local saverfprefix `saverfprefix' + if "`savesfprefix'" != "" { // savesfprefix implies savesfirst + local savesfirst "savesfirst" + } + else { // default saverfprefix is _ivreg2_ + local savesfprefix "_`ivreg2name'_" + } + sreturn local savesfirst `savesfirst' + sreturn local savesfprefix `savesfprefix' + +// Macro psd has either psd0, psda or is empty + sreturn local psd "`psd0'`psda'" + +// dofminus + if `dofminus' { + sreturn local dofmopt dofminus(`dofminus') + } + +// display options + local dispopt eform(`eform') `vsquish' `noomitted' `noemptycells' `baselevels' `allbaselevels' +// now boolean - indicates that omitted and/or base vars should NOT be added to VCV +// automatically triggered by partial + local bvclean = wordcount("`bvclean'") | wordcount("`partial'") | `partialcons' + sreturn local bvclean `bvclean' + sreturn local dispopt `dispopt' + +// ************ ERROR CHECKS ************* // + + if `partialcons' & ~`cons' { +di in r "Error: _cons listed in partial() but equation specifies -noconstant-." + exit 198 + } + if `partialcons' > 1 { +// Just in case of multiple _cons +di in r "Error: _cons listed more than once in partial()." + exit 198 + } + +// User-supplied tvar and ivar checked if consistent with tsset. + if "`tvar'"!="`_dta[_TStvar]'" { +di as err "invalid tvar() option - data already -tsset-" + exit 5 + } + if "`ivar'"!="`_dta[_TSpanel]'" { +di as err "invalid ivar() option - data already -xtset-" + exit 5 + } + +// dkraay + if `dkraay' { + if "`ivar'" == "" | "`tvar'" == "" { +di as err "invalid use of dkraay option - must use tsset panel data" + exit 5 + } + if "`dkraay'" ~= "`bw'" { +di as err "cannot use dkraay(.) and bw(.) options together" + exit 198 + } + if "`cluster'" ~= "`tvar'" { +di as err "invalid use of dkraay option - must cluster on `tvar' (or omit cluster option)" + exit 198 + } + } + +// kiefer VCV = kernel(tru) bw(T) and no robust with tsset data + if "`kiefer'" ~= "" { + if "`ivar'" == "" | "`tvar'" == "" { +di as err "invalid use of kiefer option - must use tsset panel data" + exit 5 + } + if "`robust'" ~= "" { +di as err "incompatible options: kiefer and robust" + exit 198 + } + if "`kernel'" ~= "" & "`kernel'" ~= "Truncated" { +di as err "incompatible options: kiefer and kernel(`kernel')" + exit 198 + } + if (`bw'~=0) { +di as err "incompatible options: kiefer and bw" + exit 198 + } + } + +// sw=Stock-Watson robust SEs + if "`sw'" ~= "" & "`cluster'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -cluster- option" + exit 198 + } + if "`sw'" ~= "" & "`kernel'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -kernel- option" + exit 198 + } + if "`sw'" ~= "" & "`ivar'"=="" { +di as err "Must -xtset- or -tsset- data or specify -ivar- with -sw- option" + exit 198 + } + +// LIML/kclass incompatibilities + if "`liml'`kclassopt'" != "" { + if "`gmm2s'`cue'" != "" { +di as err "GMM estimation not available with LIML or k-class estimators" + exit 198 + } + if `fuller' < 0 { +di as err "invalid Fuller option" + exit 198 + } + if "`liml'" != "" & "`kclassopt'" != "" { +di as err "cannot use liml and kclass options together" + exit 198 + } + if `kclass' < 0 { +di as err "invalid k-class option" + exit 198 + } + } + + if "`gmm2s'" != "" & "`cue'" != "" { +di as err "incompatible options: 2-step efficient gmm and cue gmm" + exit 198 + } + + if "`gmm2s'`cue'" != "" & "`exexog'" == "" { +di as err "option `gmm2s'`cue' invalid: no excluded instruments specified" + exit 102 + } + +// Legacy gmm option + if "`gmm'" ~= "" { +di as err "-gmm- is no longer a supported option; use -gmm2s- with the appropriate option" +di as res " gmm = gmm2s robust" +di as res " gmm robust = gmm2s robust" +di as res " gmm bw() = gmm2s bw()" +di as res " gmm robust bw() = gmm2s robust bw()" +di as res " gmm cluster() = gmm2s cluster()" + exit 198 + } + +// b0 incompatible options. + if "`b0'" ~= "" & "`gmm2s'`cue'`liml'`wmatrix'" ~= "" { +di as err "incompatible options: -b0- and `gmm2s' `cue' `liml' `wmatrix'" + exit 198 + } + if "`b0'" ~= "" & `kclass' ~= 0 { +di as err "incompatible options: -b0- and kclass(`kclass')" + exit 198 + } + + if "`psd0'"~="" & "`psda'"~="" { +di as err "cannot use psd0 and psda options together" + exit 198 + } +end + +// *************** Check varlists for for duplicates and collinearities ***************** // + +program define CheckDupsCollin, sclass + version 11.2 + syntax /// + [ , /// + lhs(string) /// + endo(string) /// + inexog(string) /// + exexog(string) /// + partial(string) /// + orthog(string) /// + endogtest(string) /// + redundant(string) /// + touse(string) /// + wvar(string) /// + wf(real 0) /// + NOCONSTANT /// + NOCOLLIN /// + fvall /// + fvsep /// + ] + + if "`fvall'`fvsep'"=="" { // default, expand RHS and exexog separately + local rhs `endo' `inexog' + foreach vl in lhs rhs exexog { + fvexpand ``vl'' if `touse' + local `vl' `r(varlist)' + } + local allvars `rhs' `exexog' + } + else if "`fvall'"~="" { // expand all 3 varlists as one + fvexpand `lhs' if `touse' + local lhs `r(varlist)' + fvexpand `endo' `inexog' `exexog' if `touse' + local allvars `r(varlist)' + } + else if "`fvsep'"~="" { // expand 3 varlists separately + foreach vl in lhs endo inexog exexog { + fvexpand ``vl'' if `touse' + local `vl' `r(varlist)' + } + local allvars `endo' `inexog' `exexog' + } + else { // shouldn't reach here +di as err "internal ivreg2 err: CheckDupsCollin" + exit 198 + } + +// Create dictionary: `allvars' is list with b/n/o etc., sallvars is stripped version +// NB: lhs is not in dictionary and won't need to recreate it + ivreg2_fvstrip `allvars' + local sallvars `r(varlist)' + +// Create consistent expanded varlists +// (1) expand; (2) strip (since base etc. may be wrong); (3) recreate using dictionary +// NB: matchnames will return unmatched original name if not found in 2nd arg varlist + foreach vl in endo inexog exexog partial orthog endogtest redundant { + fvexpand ``vl'' if `touse' + ivreg2_fvstrip `r(varlist)' + local stripped `r(varlist)' // create stripped version of varlist + matchnames "`stripped'" "`sallvars'" "`allvars'" // match using dictionary + local `vl' `r(names)' // new consistent varlist with correct b/n/o etc. + } + +// Check for duplicates of variables +// (1) inexog > endo +// (2) inexog > exexog +// (3) endo + exexog = inexog, as if it were "perfectly predicted" + local lhs0 `lhs' // create here + local dupsen1 : list dups endo + local dupsin1 : list dups inexog + local dupsex1 : list dups exexog + foreach vl in endo inexog exexog partial orthog endogtest redundant { + local `vl'0 : list uniq `vl' + } +// Remove inexog from endo + local dupsen2 : list endo0 & inexog0 + local endo0 : list endo0 - inexog0 +// Remove inexog from exexog + local dupsex2 : list exexog0 & inexog0 + local exexog0 : list exexog0 - inexog0 +// Remove endo from exexog + local dupsex3 : list exexog0 & endo0 + local exexog0 : list exexog0 - endo0 + local dups "`dupsen1' `dupsex1' `dupsin1' `dupsen2' `dupsex2' `dupsex3'" + local dups : list uniq dups + +// Collinearity checks + +// Need variable counts for "0" varlists +// These do NOT include the constant + local endo0_ct : word count `endo0' + local inexog0_ct : word count `inexog0' + local rhs0_ct : word count `inexog0' `exexog0' + local exexog0_ct : word count `exexog' + + if "`nocollin'" == "" { + +// Needed for ivreg2_rmcollright2 + tempvar normwt + qui gen double `normwt' = `wf' * `wvar' if `touse' + +// Simple case: no endogenous regressors, only included and excluded exogenous + if `endo0_ct'==0 { +// Call ivreg2_rmcollright2 on "0" versions of inexog and exexog +// noexpand since already expanded and don't want inconsistant expansion +// newonly since don't want base vars in collinear list + qui ivreg2_rmcollright2 `inexog0' `exexog0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly +// ivreg2_rmcollright2 returns fulll varlist with omitteds marked as omitted, +// so just need to separate the inexog and exexog lists + if `r(k_omitted)' { + local collin `collin' `r(omitted)' + local inexog0 "" + local exexog0 "" + local nvarlist `r(varlist)' + local i 1 + while `i' <= `rhs0_ct' { + local nvar : word `i' of `nvarlist' + if `i' <= `inexog0_ct' { + local inexog0 `inexog0' `nvar' // first batch go into inexog0 + } + else { + local exexog0 `exexog0' `nvar' // remainder go into exexog0 + } + local ++i + } + local inexog0 : list retokenize inexog0 + local exexog0 : list retokenize exexog0 + } + } +// Not-simple case: endogenous regressors + else { + +// 1st pass through - remove intra-endo collinears + qui ivreg2_rmcollright2 `endo0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly +// ivreg2_rmcollright2 returns fulll varlist with omitteds marked as omitted, +// so just need to separate the inexog and exexog lists + if `r(k_omitted)' { + local collin `collin' `r(omitted)' + local endo0 `r(varlist)' + } + +// 2nd pass through - good enough unless endog appear as colllinear +// noexpand since already expanded and don't want inconsistent expansion +// newonly since don't want base vars in collinear list + qui ivreg2_rmcollright2 `inexog0' `exexog0' `endo0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly + if `r(k_omitted)' { +// Check if any endo are in the collinears. +// If yes, reclassify as inexog, then +// 3rd pass through - and then proceed to process inexog and exexog as above + local ecollin `r(omitted)' + local ecollin : list ecollin - inexog0 + local ecollin : list ecollin - exexog0 + if wordcount("`ecollin'") { +// Collinears in endo, so reclassify as inexog, redo counts and call ivreg2_rmcollright2 again + local endo0 : list endo0 - ecollin + local inexog0 `ecollin' `inexog0' + local inexog0 : list retokenize inexog0 + local endo0_ct : word count `endo0' + local inexog0_ct : word count `inexog0' + local rhs0_ct : word count `inexog0' `exexog0' +// noexpand since already expanded and don't want inconsistant expansion +// newonly since don't want base vars in collinear list + qui ivreg2_rmcollright2 `inexog0' `exexog0' `endo0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly + } +// Collinears in inexog or exexog + local collin `collin' `r(omitted)' + local inexog0 "" + local exexog0 "" + local nvarlist `r(varlist)' + local i 1 + while `i' <= `rhs0_ct' { + local nvar : word `i' of `nvarlist' + if `i' <= `inexog0_ct' { + local inexog0 `inexog0' `nvar' + } + else { + local exexog0 `exexog0' `nvar' + } + local ++i + } + local inexog0 : list retokenize inexog0 + local exexog0 : list retokenize exexog0 + } + } + +// Collinearity and duplicates warning messages, if necessary + if "`dups'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `dups', _col(21) + } + if "`ecollin'" != "" { +di in gr "Warning - endogenous variable(s) collinear with instruments" +di in gr "Vars now exogenous:" _c + Disp `ecollin', _col(21) + } + if "`collin'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `collin', _col(21) + } + } + +// Last step: process partial0 so that names with o/b/n etc. match inexog0 + if wordcount("`partial0'") { + ivreg2_fvstrip `inexog0' if `touse' + local sinexog0 `r(varlist)' // for inexog dictionary + ivreg2_fvstrip `partial0' if `touse' + local spartial0 `r(varlist)' // for partial dictionary + matchnames "`spartial0'" "`sinexog0'" "`inexog0'" // match using dictionary + local partial0 `r(names)' // new partial0 with matches + local partialcheck : list partial0 - inexog0 // unmatched are still in partial0 + if ("`partialcheck'"~="") { // so catch them +di in r "Error: `partialcheck' listed in partial() but not in list of regressors." + error 198 + } + } +// Completed duplicates and collinearity checks + + foreach vl in lhs endo inexog exexog partial orthog endogtest redundant { + sreturn local `vl' ``vl'' + sreturn local `vl'0 ``vl'0' + } + sreturn local dups `dups' + sreturn local collin `collin' + sreturn local ecollin `ecollin' + +end + +// ******************* Misc error checks *************************** // + +program define CheckMisc, rclass + version 11.2 + syntax /// + [ , /// + rhs1_ct(integer 0) /// + iv1_ct(integer 0) /// + bvector(name) /// + smatrix(name) /// + wmatrix(name) /// + cnb1(string) /// + cnZ1(string) /// + ] + +// Check variable counts + if `rhs1_ct' == 0 { +di as err "error: no regressors specified" + exit 102 + } + if `rhs1_ct' > `iv1_ct' { +di as err "equation not identified; must have at least as many instruments" +di as err "not in the regression as there are instrumented variables" + exit 481 + } + +// Check user-supplied b vector + if "`bvector'" != "" { + tempname b0 +// Rearrange/select columns to mat IV matrix + cap matsort `bvector' "`cnb1'" + matrix `b0'=r(sorted) + local scols = colsof(`b0') + local bcols : word count `cnb1' + if _rc ~= 0 | (`scols'~=`bcols') { +di as err "-b0- option error: supplied b0 columns do not match regressor list" +exit 198 + } + return mat b0 = `b0' + } + +// Check user-supplied S matrix + if "`smatrix'" != "" { + tempname S0 +// Check that smatrix is indeed a matrix + cap mat S0 = `smatrix' + if _rc ~= 0 { +di as err "invalid matrix `smatrix' in smatrix option" +exit _rc + } +// Rearrange/select columns to mat IV matrix + cap matsort `smatrix' "`cnZ1'" + matrix `S0'=r(sorted) + local srows = rowsof(`S0') + local scols = colsof(`S0') + local zcols : word count `cnZ1' + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-smatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + if issymmetric(`S0')==0 { +di as err "-smatrix- option error: supplied matrix is not symmetric" +exit 198 + } + return mat S0 = `S0' + } + +// Check user-supplied W matrix + if "`wmatrix'" != "" { + tempname W0 +// Check that wmatrix is indeed a matrix + cap mat W0 = `wmatrix' + if _rc ~= 0 { +di as err "invalid matrix `wmatrix' in wmatrix option" +exit _rc + } +// Rearrange/select columns to mat IV matrix + cap matsort `wmatrix' "`cnZ1'" + matrix `W0'=r(sorted) + local srows = rowsof(`W0') + local scols = colsof(`W0') + local zcols : word count `cnZ1' + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-wmatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + if issymmetric(`W0')==0 { +di as err "-smatrix- option error: supplied matrix is not symmetric" +exit 198 + } + return mat W0 = `W0' + } +end + + +******************************************************************************* +************************* misc utilities ************************************** +******************************************************************************* + +// internal version of ivreg2_fvstrip 1.01 ms 24march2015 +// takes varlist with possible FVs and strips out b/n/o notation +// returns results in r(varnames) +// optionally also omits omittable FVs +// expand calls fvexpand either on full varlist +// or (with onebyone option) on elements of varlist + +program define ivreg2_fvstrip, rclass + version 11.2 + syntax [anything] [if] , [ dropomit expand onebyone NOIsily ] + if "`expand'"~="" { // force call to fvexpand + if "`onebyone'"=="" { + fvexpand `anything' `if' // single call to fvexpand + local anything `r(varlist)' + } + else { + foreach vn of local anything { + fvexpand `vn' `if' // call fvexpand on items one-by-one + local newlist `newlist' `r(varlist)' + } + local anything : list clean newlist + } + } + foreach vn of local anything { // loop through varnames + if "`dropomit'"~="" { // check & include only if + _ms_parse_parts `vn' // not omitted (b. or o.) + if ~`r(omit)' { + local unstripped `unstripped' `vn' // add to list only if not omitted + } + } + else { // add varname to list even if + local unstripped `unstripped' `vn' // could be omitted (b. or o.) + } + } +// Now create list with b/n/o stripped out + foreach vn of local unstripped { + local svn "" // initialize + _ms_parse_parts `vn' + if "`r(type)'"=="variable" & "`r(op)'"=="" { // simplest case - no change + local svn `vn' + } + else if "`r(type)'"=="variable" & "`r(op)'"=="o" { // next simplest case - o.varname => varname + local svn `r(name)' + } + else if "`r(type)'"=="variable" { // has other operators so strip o but leave . + local op `r(op)' + local op : subinstr local op "o" "", all + local svn `op'.`r(name)' + } + else if "`r(type)'"=="factor" { // simple factor variable + local op `r(op)' + local op : subinstr local op "b" "", all + local op : subinstr local op "n" "", all + local op : subinstr local op "o" "", all + local svn `op'.`r(name)' // operator + . + varname + } + else if"`r(type)'"=="interaction" { // multiple variables + forvalues i=1/`r(k_names)' { + local op `r(op`i')' + local op : subinstr local op "b" "", all + local op : subinstr local op "n" "", all + local op : subinstr local op "o" "", all + local opv `op'.`r(name`i')' // operator + . + varname + if `i'==1 { + local svn `opv' + } + else { + local svn `svn'#`opv' + } + } + } + else if "`r(type)'"=="product" { + di as err "ivreg2_fvstrip error - type=product for `vn'" + exit 198 + } + else if "`r(type)'"=="error" { + di as err "ivreg2_fvstrip error - type=error for `vn'" + exit 198 + } + else { + di as err "ivreg2_fvstrip error - unknown type for `vn'" + exit 198 + } + local stripped `stripped' `svn' + } + local stripped : list retokenize stripped // clean any extra spaces + + if "`noisily'"~="" { // for debugging etc. +di as result "`stripped'" + } + + return local varlist `stripped' // return results in r(varlist) +end + +// **************** Add omitted vars to b and V matrices ****************** // + +program define AddOmitted, rclass + version 11.2 + syntax /// + [ , /// + bmat(name) /// + vmat(name) /// + cnb0(string) /// + cnb1(string) /// + eqlist(string) /// if empty, single-equation b and V + ] + + tempname newb newV + local eq_ct =max(1,wordcount("`eqlist'")) + local rhs0_ct : word count `cnb0' + local rhs1_ct : word count `cnb1' + + foreach vn in `cnb1' { + local cnum : list posof "`vn'" in cnb0 + local cnumlist "`cnumlist' `cnum'" + } +// cnumlist is the list of columns in the single-equation new big matrix in which +// the non-zero entries from the reduced matrix (bmat or vmat) will appear. +// E.g., if newb will be [mpg o.mpg2 _cons] then cnum = [1 3]. + + mata: s_AddOmitted( /// + "`bmat'", /// + "`vmat'", /// + "`cnumlist'", /// + `eq_ct', /// + `rhs0_ct', /// + `rhs1_ct') + mat `newb' = r(b) + mat `newV' = r(V) + + if `eq_ct'==1 { + local allnames `cnb0' // simple single-eqn case + } + else { + foreach eqname in `eqlist' { + foreach vname in `cnb0' { + local allnames "`allnames' `eqname':`vname'" + } + } + } + mat colnames `newb' = `allnames' + mat rownames `newb' = y1 + mat colnames `newV' = `allnames' + mat rownames `newV' = `allnames' + + return matrix b =`newb' + return matrix V =`newV' +end + +// ************* More misc utilities ************** // + +program define matsort, rclass + version 11.2 + args bvmat names + tempname m1 m2 + foreach vn in `names' { + mat `m1'=nullmat(`m1'), `bvmat'[1...,"`vn'"] + } + if rowsof(`m1')>1 { + foreach vn in `names' { + mat `m2'=nullmat(`m2') \ `m1'["`vn'",1...] + } + return matrix sorted =`m2' + } + else { + return matrix sorted =`m1' + } +end + + +program define matchnames, rclass + version 11.2 + args varnames namelist1 namelist2 + + local k1 : word count `namelist1' + local k2 : word count `namelist2' + + if `k1' ~= `k2' { + di as err "namelist error" + exit 198 + } + foreach vn in `varnames' { + local i : list posof `"`vn'"' in namelist1 + if `i' > 0 { + local newname : word `i' of `namelist2' + } + else { +* Keep old name if not found in list + local newname "`vn'" + } + local names "`names' `newname'" + } + local names : list clean names + return local names "`names'" +end + + +program define checkversion_ranktest, rclass + version 11.2 + args caller + +* Check that -ranktest- is installed + capture ranktest, version + if _rc != 0 { +di as err "Error: must have ranktest version 01.3.02 or greater installed" +di as err "To install, from within Stata type " _c +di in smcl "{stata ssc install ranktest :ssc install ranktest}" + exit 601 + } + local vernum "`r(version)'" + if ("`vernum'" < "01.3.02") | ("`vernum'" > "09.9.99") { +di as err "Error: must have ranktest version 01.3.02 or greater installed" +di as err "Currently installed version is `vernum'" +di as err "To update, from within Stata type " _c +di in smcl "{stata ssc install ranktest, replace :ssc install ranktest, replace}" + exit 601 + } + +* Minimum Stata version required for ranktest ver 2.0 or higher is Stata 16. +* If calling version is <16 then forks to ranktest ver 1.4 (aka ranktest11). + if `caller' >= 16 { + return local ranktestcmd version `caller': ranktest + } + else { + return local ranktestcmd version 11.2: ranktest + } +end + +// ************ Replacement _rmcollright with tweaks ****************** // + +program define ivreg2_rmcollright2, rclass + version 11.2 + syntax [ anything ] /// anything so that FVs aren't reordered + [if] [in] /// + [, /// + NORMWT(varname) /// + NOCONStant /// + NOEXPAND /// + newonly /// + lindep /// + ] + +// Empty varlist, leave early + if "`anything'"=="" { + return scalar k_omitted =0 + exit + } + + marksample touse + markout `touse' `anything' + + local cons = ("`noconstant'"=="") + local expand = ("`noexpand'"=="") + local newonly = ("`newonly'"~="") + local forcedrop = ("`forcedrop'"~="") + local lindep = ("`lindep'"~="") + local 0 `anything' + sreturn clear // clear any extraneous sreturn macros + syntax varlist(ts fv) + local tsops = ("`s(tsops)'"=="true") + local fvops = ("`s(fvops)'"=="true") + + if `tsops' | `fvops' { + if `expand' { + fvexpand `anything' if `touse' + local anything `r(varlist)' + fvrevar `anything' if `touse' + local fv_anything `r(varlist)' + } + else { +// already expanded and in set order +// loop through fvrevar so that it doesn't rebase or reorder + foreach var in `anything' { + fvrevar `var' if `touse' + local fv_anything `fv_anything' `r(varlist)' + } + } + } + else { + local fv_anything `anything' + } + + tempname wname + if "`normwt'"=="" { + qui gen byte `wname'=1 if `touse' + } + else { + qui gen double `wname' = `normwt' if `touse' + } + + mata: s_rmcoll2("`fv_anything'", "`anything'", "`wname'", "`touse'", `cons', `lindep') + + foreach var in `r(omitted)' { + di as text "note: `var' omitted because of collinearity" + } + + local omitted "`r(omitted)'" // has all omitted, both newly and previously omitted + local k_omitted =r(k_omitted) // but newly omitted not marked with omit operator o + if `lindep' { + tempname lindepmat + mat `lindepmat' = r(lindep) + mat rownames `lindepmat' = `anything' + mat colnames `lindepmat' = `anything' + } + +// Modern Stata version, add omitted notation to newly-missing vars + if `k_omitted' { + foreach var in `omitted' { + _ms_parse_parts `var' // check if already omitted + if r(omit) { // already omitted + local alreadyomitted `alreadyomitted' `var' + } + else { // not already omitted + ivreg2_rmc2_ms_put_omit `var' // add omitted omit operator o and replace in main varlist + local ovar `s(ospec)' + local anything : subinstr local anything "`var'" "`ovar'", word + } + } + if `newonly' { // omitted list should contain only newly omitted + local omitted : list omitted - alreadyomitted + local k_omitted : word count `omitted' + } + } + +// Return results + return scalar k_omitted =`k_omitted' + return local omitted `omitted' + return local varlist `anything' + if `lindep' { + return mat lindep `lindepmat' + } + +end + +// Used by ivreg2_rmcollright2 +// taken from later Stata - not available in Stata 11 +// version 1.0.0 28apr2011 +program ivreg2_rmc2_ms_put_omit, sclass + version 11.2 // added by MS + args vn + _ms_parse_parts `vn' + if r(type) =="variable" { + local name `r(name)' + local ovar o.`name' + } + if r(type) == "factor" { + if !r(base) { + local name `r(name)' + if "`r(ts_op)'" != "" { + local name `r(ts_op)'.`name' + } + local ovar `r(level)'o.`name' + } + else { + local ovar `vn' + } + } + else if r(type) == "interaction" { + local k = r(k_names) + + forval i = 1/`k' { + local name = r(name`i') + if "`r(ts_op`i')'" != "" { + local name `r(ts_op`i')'.`name' + } + if "`r(level`i')'" != "" { + if r(base`i') { + local name `r(level`i')'b.`name' + } + else { + local name `r(level`i')'o.`name' + } + } + else { + local name o.`name' + } + local spec `spec'`sharp'`name' + local sharp "#" + } + local ovar `spec' + + } + _msparse `ovar' + sreturn local ospec `r(stripe)' +end + + +******************************************************************************* +**************** SUBROUTINES FOR KERNEL-ROBUST ******************************** +******************************************************************************* + +// abw wants a varlist of [ eps | Z | touse] +// where Z includes all instruments, included and excluded, with constant if +// present as the last column; eps are a suitable set of residuals; and touse +// marks the observations in the data matrix used to generate the residuals +// (e.g. e(sample) of the appropriate model). +// The Noconstant option indicates that no constant term exists in the Z matrix. +// kern is the name of the HAC kernel. -ivregress- only provides definitions +// for Bartlett (default), Parzen, quadratic spectral. + +// returns the optimal bandwidth as local abw + +// abw 1.0.1 CFB 30jun2007 +// 1.0.1 : redefine kernel names (3 instances) to match ivreg2 +// 1.1.0 : pass nobs and tobs to s_abw; abw bug fix and also handles gaps in data correctly + +prog def abw, rclass + version 11.2 + syntax varlist(ts), [ tindex(varname) nobs(integer 0) tobs(integer 0) NOConstant Kernel(string)] +// validate kernel + if "`kernel'" == "" { + local kernel = "Bartlett" + } +// cfb B102 + if !inlist("`kernel'", "Bartlett", "Parzen", "Quadratic Spectral") { + di as err "Error: kernel `kernel' not compatible with bw(auto)" + return scalar abw = 1 + return local bwchoice "Kernel `kernel' not compatible with bw(auto); bw=1 (default)" + exit + } + else { +// set constant + local cons 1 + if "`noconstant'" != "" { + local cons 0 + } +// deal with ts ops + tsrevar `varlist' + local varlist1 `r(varlist)' + mata: s_abw("`varlist1'", "`tindex'", `nobs', `tobs', `cons', "`kernel'") + return scalar abw = `abw' + return local bwchoice "Automatic bw selection according to Newey-West (1994)" + } +end + + +******************************************************************************* +************** END SUBROUTINES FOR KERNEL-ROBUST ****************************** +******************************************************************************* + +******************************************************************************* +*************************** BEGIN MATA CODE *********************************** +******************************************************************************* + +// capture in case calling under version < 11.2 +capture version 11.2 + +mata: + +// For reference: +// struct ms_vcvorthog { +// string scalar ename, Znames, touse, weight, wvarname +// string scalar robust, clustvarname, clustvarname2, clustvarname3, kernel +// string scalar sw, psd, ivarname, tvarname, tindexname +// real scalar wf, N, bw, tdelta, dofminus +// real scalar center +// real matrix ZZ +// pointer matrix e +// pointer matrix Z +// pointer matrix wvar +// } + + +void s_abw (string scalar Zulist, + string scalar tindexname, + real scalar nobs, + real scalar tobs, + real scalar cons, + string scalar kernel + ) +{ + +// nobs = number of observations = number of data points available = rows(uZ) +// tobs = time span of data = t_N - t_1 + 1 +// nobs = tobs if no gaps in data +// nobs < tobs if there are gaps +// nobs used below when calculating means, e.g., covariances in sigmahat. +// tobs used below when time span of data is needed, e.g., mstar. + + string rowvector Zunames, tov + string scalar v, v2 + real matrix uZ + real rowvector h + real scalar lenzu, abw + +// access the Stata variables in Zulist, honoring touse stored as last column + Zunames = tokens(Zulist) + lenzu=cols(Zunames)-1 + v = Zunames[|1\lenzu|] + v2 = Zunames[lenzu+1] + st_view(uZ,.,v,v2) + tnow=st_data(., tindexname) + +// assume constant in last col of uZ if it exists +// account for eps as the first column of uZ + if (cons) { + nrows1=cols(uZ)-2 + nrows2=1 + } + else { + nrows1=cols(uZ)-1 + nrows2=0 + } +// [R] ivregress p.42: referencing Newey-West 1994 REStud 61(4):631-653 +// define h indicator rowvector + h = J(nrows1,1,1) \ J(nrows2,1,0) + +// calc mstar per p.43 +// Hannan (1971, 296) & Priestley (1981, 58) per Newey-West p. 633 +// corrected per Alistair Hall msg to Brian Poi 17jul2008 +// T = rows(uZ) +// oneT = 1/T + expo = 2/9 + q = 1 +// cgamma = 1.4117 + cgamma = 1.1447 + if(kernel == "Parzen") { + expo = 4/25 + q = 2 + cgamma = 2.6614 + } +// cfb B102 + if(kernel == "Quadratic Spectral") { + expo = 2/25 + q = 2 + cgamma = 1.3221 + } +// per Newey-West p.639, Anderson (1971), Priestley (1981) may provide +// guidance on setting expo for other kernels +// mstar = trunc(20 *(T/100)^expo) +// use time span of data (not number of obs) + mstar = trunc(20 *(tobs/100)^expo) + +// calc uZ matrix + u = uZ[.,1] + Z = uZ[|1,2 \.,.|] + +// calc f vector: (u_i Z_i) * h + f = (u :* Z) * h + +// approach allows for gaps in time series + sigmahat = J(mstar+1,1,0) + for(j=0;j<=mstar;j++) { + lsj = "L"+strofreal(j) + tlag=st_data(., lsj+"."+tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix + // now calculate autocovariance; divide by nobs + sigmahat[j+1] = quadcross(f[tmatrix[.,1],.], f[tmatrix[.,2],.]) / nobs + } + +// calc shat(q), shat(0) + shatq = 0 + shat0 = sigmahat[1] + for(j=1;j<=mstar;j++) { + shatq = shatq + 2 * sigmahat[j+1] * j^q + shat0 = shat0 + 2 * sigmahat[j+1] + } + +// calc gammahat + expon = 1/(2*q+1) + gammahat = cgamma*( (shatq/shat0)^2 )^expon +// use time span of data tobs (not number of obs T) + m = gammahat * tobs^expon + +// calc opt lag + if(kernel == "Bartlett" | kernel == "Parzen") { + optlag = min((trunc(m),mstar)) + } + else if(kernel == "Quadratic Spectral") { + optlag = min((m,mstar)) + } + +// if optlag is the optimal lag to be used, we need to add one to +// specify bandwidth in ivreg2 terms + abw = optlag + 1 + st_local("abw",strofreal(abw)) +} // end program s_abw + + +// *********** s_rmcoll2 (replacement for Stata _rmcollright etc. ********** + +void s_rmcoll2( string scalar fv_vnames, + string scalar vnames, + string scalar wname, + string scalar touse, + scalar cons, + scalar lindep) +{ + st_view(X=., ., tokens(fv_vnames), touse) + st_view(w=., ., tokens(wname), touse) + st_view(mtouse=., ., tokens(touse), touse) + + if (cons) { + Xmean=mean(X,w) + XX=quadcrossdev(X,Xmean, w, X,Xmean) + } + else { + XX=quadcross(X, w, X) + } + + XXinv=invsym(XX, range(1,cols(X),1)) + + st_numscalar("r(k_omitted)", diag0cnt(XXinv)) + if (lindep) { + st_matrix("r(lindep)", XX*XXinv) + } + smat = (diagonal(XXinv) :== 0)' + vl=tokens(vnames) + vl_drop = select(vl, smat) + vl_keep = select(vl, (1 :- smat)) + + if (cols(vl_keep)>0) { + st_global("r(varlist)", invtokens(vl_keep)) + } + if (cols(vl_drop)>0) { + st_global("r(omitted)", invtokens(vl_drop)) + } +} // end program s_rmcoll2 + + +// ************** Add omitted Mata utility ************************ + +void s_AddOmitted( string scalar bname, + string scalar vname, + string scalar cnumlist, + scalar eq_ct, + scalar rhs0_ct, + scalar rhs1_ct) + +{ + b = st_matrix(bname) + V = st_matrix(vname) + cn = strtoreal(tokens(cnumlist)) +// cnumlist is the list of columns in the single-equation new big matrix in which +// the non-zero entries from the reduced matrix (bmat or vmat) will appear. +// E.g., if newb will be [mpg o.mpg2 _cons] then cnum = [1 3]. + col_ct = eq_ct * rhs0_ct + + newb = J(1,col_ct,0) + newV = J(col_ct,col_ct,0) + +// Code needs to accommodate multi-equation case. Since all equations will have +// same reduced and full list of vars, in the same order, can do this with Kronecker +// products etc. Second term below is basically the offset for each equation. + cn = (J(1,eq_ct,1) # cn) + ((range(0,eq_ct-1,1)' # J(1,rhs1_ct,1) ) * rhs0_ct) + +// Insert the values from the reduced matrices into the right places in the big matrices. + newb[1, cn] = b + newV[cn, cn] = V + + st_matrix("r(b)", newb) + st_matrix("r(V)", newV) + +} + + +// ************** Partial out ************************************* + +void s_partial( string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar Pnames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + scalar cons) + +{ + +// All varnames should be basic form, no FV or TS operators etc. +// y = dep var +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 +// PZ = variables to partial out +// cons = 0 or 1 + + ytoken=tokens(yname) + X1tokens=tokens(X1names) + X2tokens=tokens(X2names) + Z1tokens=tokens(Z1names) + Ptokens=tokens(Pnames) + Ytokens = (ytoken, X1tokens, X2tokens, Z1tokens) + + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(Y, ., Ytokens, touse) + st_view(P, ., Ptokens, touse) + L = cols(P) + + if (cons & L>0) { // Vars to partial out including constant + Ymeans = mean(Y,wf*wvar) + Pmeans = mean(P,wf*wvar) + PY = quadcrossdev(P, Pmeans, wf*wvar, Y, Ymeans) + PP = quadcrossdev(P, Pmeans, wf*wvar, P, Pmeans) + } + else if (!cons & L>0) { // Vars to partial out NOT including constant + PY = quadcross(P, wf*wvar, Y) + PP = quadcross(P, wf*wvar, P) + } + else { // Only constant to partial out = demean + Ymeans = mean(Y,wf*wvar) + } + +// Partial-out coeffs. Default Cholesky; use QR if not full rank and collinearities present. +// Not necessary if no vars other than constant + if (L>0) { + b = cholqrsolve(PP, PY) + } +// Replace with residuals + if (cons & L>0) { // Vars to partial out including constant + Y[.,.] = (Y :- Ymeans) - (P :- Pmeans)*b + } + else if (!cons & L>0) { // Vars to partial out NOT including constant + Y[.,.] = Y - P*b + } + else { // Only constant to partial out = demean + Y[.,.] = (Y :- Ymeans) + } + +} // end program s_partial + + + +// ************** Common cross-products ************************************* + +void s_crossprods( string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N) + +{ + +// y = dep var +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + ytoken=tokens(yname) + X1tokens=tokens(X1names) + X2tokens=tokens(X2names) + Z1tokens=tokens(Z1names) + + Xtokens = (X1tokens, X2tokens) + Ztokens = (Z1tokens, X2tokens) + + K1=cols(X1tokens) + K2=cols(X2tokens) + K=K1+K2 + L1=cols(Z1tokens) + L2=cols(X2tokens) + L=L1+L2 + + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(A, ., st_tsrevar((ytoken, Xtokens, Z1tokens)), touse) + + AA = quadcross(A, wf*wvar, A) + + if (K>0) { + XX = AA[(2::K+1),(2..K+1)] + Xy = AA[(2::K+1),1] + } + if (K1>0) { + X1X1 = AA[(2::K1+1),(2..K1+1)] + } + + if (L1 > 0) { + Z1Z1 = AA[(K+2::rows(AA)),(K+2..rows(AA))] + } + + if (L2 > 0) { + Z2Z2 = AA[(K1+2::K+1), (K1+2::K+1)] + Z2y = AA[(K1+2::K+1), 1] + } + + if ((L1>0) & (L2>0)) { + Z2Z1 = AA[(K1+2::K+1), (K+2::rows(AA))] + ZZ2 = Z2Z1, Z2Z2 + ZZ1 = Z1Z1, Z2Z1' + ZZ = ZZ1 \ ZZ2 + } + else if (L1>0) { + ZZ = Z1Z1 + } + else { +// L1=0 + ZZ = Z2Z2 + ZZ2 = Z2Z2 + } + + if ((K1>0) & (L1>0)) { // K1>0, L1>0 + X1Z1 = AA[(2::K1+1), (K+2::rows(AA))] + } + + if ((K1>0) & (L2>0)) { + X1Z2 = AA[(2::K1+1), (K1+2::K+1)] + if (L1>0) { // K1>0, L1>0, L2>0 + X1Z = X1Z1, X1Z2 + XZ = X1Z \ ZZ2 + } + else { // K1>0, L1=0, L2>0 + XZ = X1Z2 \ ZZ2 + X1Z = X1Z2 + } + } + else if (K1>0) { // K1>0, L2=0 + XZ = X1Z1 + X1Z= X1Z1 + } + else if (L1>0) { // K1=0, L2>0 + XZ = AA[(2::K+1),(K+2..rows(AA))], AA[(2::K+1),(2..K+1)] + } + else { // K1=0, L2=0 + XZ = ZZ + } + + if ((L1>0) & (L2>0)) { + Zy = AA[(K+2::rows(AA)), 1] \ AA[(K1+2::K+1), 1] + ZY = AA[(K+2::rows(AA)), (1..K1+1)] \ AA[(K1+2::K+1), (1..K1+1)] + Z2Y = AA[(K1+2::K+1), (1..K1+1)] + } + else if (L1>0) { + Zy = AA[(K+2::rows(AA)), 1] + ZY = AA[(K+2::rows(AA)), (1..K1+1)] + } + else if (L2>0) { + Zy = AA[(K1+2::K+1), 1] + ZY = AA[(K1+2::K+1), (1..K1+1)] + Z2Y = ZY + } +// Zy, ZY, Z2Y not created if L1=L2=0 + + YY = AA[(1::K1+1), (1..K1+1)] + yy = AA[1,1] + st_subview(y, A, ., 1) + ym = sum(wf*wvar:*y)/N + yyc = quadcrossdev(y, ym, wf*wvar, y, ym) + + XXinv = invsym(XX) + if (Xtokens==Ztokens) { + ZZinv = XXinv + XPZXinv = XXinv + } + else { + ZZinv = invsym(ZZ) + XPZX = makesymmetric(XZ*ZZinv*XZ') + XPZXinv=invsym(XPZX) + } + +// condition numbers + condxx=cond(XX) + condzz=cond(ZZ) + + st_matrix("r(XX)", XX) + st_matrix("r(X1X1)", X1X1) + st_matrix("r(X1Z)", X1Z) + st_matrix("r(ZZ)", ZZ) + st_matrix("r(Z2Z2)", Z2Z2) + st_matrix("r(Z1Z2)", Z2Z1') + st_matrix("r(Z2y)",Z2y) + st_matrix("r(XZ)", XZ) + st_matrix("r(Xy)", Xy) + st_matrix("r(Zy)", Zy) + st_numscalar("r(yy)", yy) + st_numscalar("r(yyc)", yyc) + st_matrix("r(YY)", YY) + st_matrix("r(ZY)", ZY) + st_matrix("r(Z2Y)", Z2Y) + st_matrix("r(XXinv)", XXinv) + st_matrix("r(ZZinv)", ZZinv) + st_matrix("r(XPZXinv)", XPZXinv) + st_numscalar("r(condxx)",condxx) + st_numscalar("r(condzz)",condzz) + +} // end program s_crossprods + + +// *************** 1st step GMM ******************** // +// Can be either efficient or inefficient. +// Can be IV or other 1-step GMM estimator. + +void s_gmm1s( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Wmatrix, + string scalar Smatrix, + scalar dofminus, + scalar efficient, // flag to indicate that 1st-step GMM is efficient + scalar overid, // not guaranteed to be right if nocollin option used! + scalar useqr) // flag to force use of QR instead of Cholesky solver +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + ZZ = st_matrix(ZZmatrix) + XX = st_matrix(XXmatrix) + XZ = st_matrix(XZmatrix) + Zy = st_matrix(Zymatrix) + ZZinv = st_matrix(ZZinvmatrix) + + QZZ = ZZ / N + QXX = XX / N + QXZ = XZ / N + QZy = Zy / N + QZZinv = ZZinv*N + + useqr = (diag0cnt(QZZinv)>0) | useqr + +// Weighting matrix supplied (and inefficient GMM) + if (Wmatrix~="") { + W = st_matrix(Wmatrix) + useqr = (diag0cnt(W)>0) | useqr + } +// Var-cov matrix of orthog conditions supplied + if (Smatrix~="") { + omega=st_matrix(Smatrix) + useqr = (diag0cnt(omega)>0) | useqr + } + + if (efficient) { // Efficient 1-step GMM block: OLS, IV or provided S + if ((Xtokens==Ztokens) & (Smatrix=="")) { // OLS + + beta = cholqrsolve(QZZ, QZy, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + omega = sigmasq * QZZ + W = 1/sigmasq * QZZinv + V = 1/N * sigmasq * QZZinv + rankS = rows(omega) - diag0cnt(QZZinv) // inv(omega) is proportional to inv(QZZ) + rankV = rows(V) - diag0cnt(V) // inv(V) is proportional to inv(QZZ) + } + else if (Smatrix=="") { // IV + aux1 = cholqrsolve(QZZ, QXZ', useqr) + aux2 = cholqrsolve(QZZ, QZy, useqr) + aux3 = makesymmetric(QXZ * aux1) + beta = cholqrsolve(aux3, QXZ * aux2, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq = ee/(N-dofminus) + omega = sigmasq * QZZ + W = 1/sigmasq * QZZinv + V = 1/N * sigmasq * invsym(aux3) + rankS = rows(omega) - diag0cnt(QZZinv) // inv(omega) is proportional to inv(QZZ) + rankV = rows(V) - diag0cnt(V) // V is proportional to inv(aux3) + } + else { // efficient GMM with provided S (=omega) + aux1 = cholqrsolve(omega, QXZ', useqr) + aux2 = cholqrsolve(omega, QZy, useqr) + aux3 = makesymmetric(QXZ * aux1) + beta = cholqrsolve(aux3, QXZ * aux2, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = invsym(omega) + V = 1/N * invsym(aux3) // Normalize by N + rankS = rows(omega) - diag0cnt(W) // since W=inv(omega) + rankV = rows(V) - diag0cnt(V) // since V is prop to inv(aux3) + } + if (overid) { // J if overidentified + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + aux4 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux4 + } + else { + j=0 + } + st_matrix("r(beta)", beta) + st_matrix("r(V)", V) + st_matrix("r(S)", omega) + st_matrix("r(W)", W) + st_numscalar("r(rss)", ee) + st_numscalar("r(j)", j) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankS)", rankS) + st_numscalar("r(rankV)", rankV) + } + else { // inefficient 1st-step GMM; don't need V, S, j etc. + if ((Xtokens==Ztokens) & (Wmatrix=="")) { // OLS + beta = cholqrsolve(QZZ, QZy, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = 1/sigmasq * QZZinv + QXZ_W_QZX = 1/sigmasq * QZZ // b/c W incorporates sigma^2 + } + else if (Wmatrix=="") { // IV + aux1 = cholqrsolve(QZZ, QXZ', useqr) + aux2 = cholqrsolve(QZZ, QZy, useqr) + aux3 = makesymmetric(QXZ * aux1) + beta = cholqrsolve(aux3, QXZ * aux2, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = 1/sigmasq * QZZinv + QXZ_W_QZX = 1/sigmasq * aux3 // b/c IV weighting matrix incorporates sigma^2 + } + else { // some other 1st step inefficient GMM with provided W + QXZ_W_QZX = QXZ * W * QXZ' + _makesymmetric(QXZ_W_QZX) + beta = cholqrsolve(QXZ_W_QZX, QXZ * W * QZy, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + } + st_matrix("r(QXZ_W_QZX)", QXZ_W_QZX) + st_matrix("r(beta)", beta) + st_matrix("r(W)",W) // always return W + } + +} // end program s_gmm1s + + +// *************** efficient GMM ******************** // +// Uses inverse of provided S matrix as weighting matrix. +// IV won't be done here but code would work for it as a special case. + +void s_egmm( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Smatrix, // always provided + scalar dofminus, + scalar overid, // not guaranteed to be right if -nocollin- used! + scalar useqr) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + ZZ = st_matrix(ZZmatrix) + XX = st_matrix(XXmatrix) + XZ = st_matrix(XZmatrix) + Zy = st_matrix(Zymatrix) + ZZinv = st_matrix(ZZinvmatrix) + + QZZ = ZZ / N + QXX = XX / N + QXZ = XZ / N + QZy = Zy / N + QZZinv = ZZinv*N + +// Var-cov matrix of orthog conditions supplied + omega=st_matrix(Smatrix) + W = invsym(omega) // Efficient GMM weighting matrix + rankS = rows(omega) - diag0cnt(W) // since W=inv(omega) + + if (rankS cols(X)) { + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + aux4 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux4 + } + else { + j=0 + } + + st_matrix("r(beta)", beta) + st_matrix("r(V)", V) + st_matrix("r(W)", W) + st_numscalar("r(rss)", ee) + st_numscalar("r(j)", j) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankV)",rankV) + st_numscalar("r(rankS)",rankS) + +} // end program s_egmm + +// *************** inefficient GMM ******************** // + +void s_iegmm( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar QXZ_W_QZXmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Wmatrix, + string scalar Smatrix, + string scalar bname, + scalar dofminus, + scalar overid, + scalar useqr) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QXZ = st_matrix(XZmatrix) / N + QZy = st_matrix(Zymatrix) / N + QXZ_W_QZX = st_matrix(QXZ_W_QZXmatrix) + + useqr = (diag0cnt(QXZ_W_QZX)>0) | useqr + +// beta is supplied + beta = st_matrix(bname) + +// Inefficient weighting matrix supplied + W = st_matrix(Wmatrix) + +// Var-cov matrix of orthog conditions supplied + omega=st_matrix(Smatrix) + +// Residuals are supplied + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + +// Calculate V and J. + +// V +// The GMM estimator is "root-N consistent", and technically we do +// inference on sqrt(N)*beta. By convention we work with beta, so we adjust +// the var-cov matrix instead: + aux5 = cholqrsolve(QXZ_W_QZX, QXZ * W, useqr) + V = 1/N * aux5 * omega * aux5' + _makesymmetric(V) + +// alternative +// QXZ_W_QZXinv=invsym(QXZ_W_QZX) +// V = 1/N * QXZ_W_QZXinv * QXZ * W * omega * W * QXZ' * QXZ_W_QZXinv + + rankV = rows(V) - diag0cnt(invsym(V)) // need explicitly to calc rank + rankS = rows(omega) - diag0cnt(invsym(omega)) // need explicitly to calc rank + +// J if overidentified + if (overid) { +// Note that J requires efficient GMM residuals, which means do 2-step GMM to get them. +// QXZ_W2s_QZX = QXZ * W2s * QXZ' +// _makesymmetric(QXZ_W2s_QZX) +// QXZ_W2s_QZXinv=invsym(QXZ_W2s_QZX) +// beta2s = (QXZ_W2s_QZXinv * QXZ * W2s * QZy) + aux1 = cholqrsolve(omega, QXZ', useqr) + aux2 = cholqrsolve(omega, QZy, useqr) + aux3s = makesymmetric(QXZ * aux1) + beta2s = cholqrsolve(aux3s, QXZ * aux2, useqr) + beta2s = beta2s' + e2s = y - X * beta2s' + Ze2s = quadcross(Z, wf*wvar, e2s) + gbar = Ze2s / N + aux4 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux4 + } + else { + j=0 + } + + st_matrix("r(V)", V) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankV)",rankV) + st_numscalar("r(rankS)",rankS) + +} // end program s_iegmm + +// *************** LIML ******************** // + +void s_liml( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar Z2Z2matrix, + string scalar YYmatrix, + string scalar ZYmatrix, + string scalar Z2Ymatrix, + string scalar Xymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar Ynames, + string scalar ename, + string scalar Xnames, + string scalar X1names, + string scalar Znames, + string scalar Z1names, + string scalar Z2names, + scalar fuller, + scalar kclass, + string scalar coviv, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar center, + scalar dofminus, + scalar useqr) + +{ + struct ms_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + Ytokens=tokens(Ynames) + Ztokens=tokens(Znames) + Z1tokens=tokens(Z1names) + Z2tokens=tokens(Z2names) + Xtokens=tokens(Xnames) + X1tokens=tokens(X1names) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QXZ = st_matrix(XZmatrix) / N + QZy = st_matrix(Zymatrix) / N + QZ2Z2 = st_matrix(Z2Z2matrix) / N + QYY = st_matrix(YYmatrix) / N + QZY = st_matrix(ZYmatrix) / N + QZ2Y = st_matrix(Z2Ymatrix) / N + QXy = st_matrix(Xymatrix) / N + QZZinv = st_matrix(ZZinvmatrix)*N + + useqr = (diag0cnt(QZZ)>0) | useqr + +// kclass=0 => LIML or Fuller LIML so calculate lambda + if (kclass == 0) { + aux1 = cholqrsolve(QZZ, QZY, useqr) + QWW = QYY - QZY'*aux1 + _makesymmetric(QWW) + if (cols(Z2tokens) > 0) { + aux2 = cholqrsolve(QZ2Z2, QZ2Y, useqr) + QWW1 = QYY - QZ2Y'*aux2 + _makesymmetric(QWW1) + } + else { +// Special case of no exogenous regressors + QWW1 = QYY + } + M=matpowersym(QWW, -0.5) + Eval=symeigenvalues(M*QWW1*M) + lambda=rowmin(Eval) + } + +// Exactly identified but might not be exactly 1, so make it so + if (cols(Z)==cols(X)) { + lambda=1 + } + + if (fuller > (N-cols(Z))) { +printf("\n{error:Error: invalid choice of Fuller LIML parameter.}\n") + exit(error(3351)) + } + else if (fuller > 0) { + k = lambda - fuller/(N-cols(Z)) + } + else if (kclass > 0) { + k = kclass + } + else { + k = lambda + } + + aux3 = cholqrsolve(QZZ, QXZ', useqr) + QXhXh=(1-k)*QXX + k*QXZ*aux3 + _makesymmetric(QXhXh) + aux4 = cholqrsolve(QZZ, QZy, useqr) + aux5 = cholqrsolve(QXhXh, QXZ, useqr) + aux6 = cholqrsolve(QXhXh, QXy, useqr) + beta = aux6*(1-k) + k*aux5*aux4 + beta = beta' + + e[.,.] = y - X * beta' + ee = quadcross(e, wf*wvar, e) + sigmasq = ee /(N-dofminus) + + omega = m_omega(vcvo) + + QXhXhinv=invsym(QXhXh) + + if ((robust=="") & (clustvarname=="") & (kernel=="")) { +// Efficient LIML + if (coviv=="") { +// Note dof correction is already in sigmasq + V = 1/N * sigmasq * QXhXhinv + rankV = rows(V) - diag0cnt(V) // since V is proportional to inv(QXhXh) + } + else { + aux7 = makesymmetric(QXZ * aux3) + V = 1/N * sigmasq * invsym(aux7) + rankV = rows(V) - diag0cnt(V) // since V is proportional to inv(aux7) + } + rankS = rows(omega) - diag0cnt(invsym(omega)) + if (cols(Z)>cols(X)) { + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + aux8 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux8 + } + else { + j=0 + } + } + else { +// Inefficient LIML + if (coviv=="") { + aux9 = cholqrsolve(QZZ, aux5', useqr) + V = 1/N * aux9' * omega * aux9 + _makesymmetric(V) + rankV = rows(V) - diag0cnt(invsym(V)) // need explicitly to calc rank + rankS = rows(omega) - diag0cnt(invsym(omega)) // need explicitly to calc rank + } + else { + aux10 = QXZ * aux3 + _makesymmetric(aux10) + aux11 = cholqrsolve(aux10, aux3', useqr) + V = 1/N * aux11 * omega * aux11' + _makesymmetric(V) + rankV = rows(V) - diag0cnt(invsym(V)) // need explicitly to calc rank + rankS = rows(omega) - diag0cnt(invsym(omega)) // need explicitly to calc rank + } + if (cols(Z)>cols(X)) { + aux12 = cholqrsolve(omega, QXZ', useqr) + aux13 = cholqrsolve(omega, QZy, useqr) + aux14 = makesymmetric(QXZ * aux12) + beta2s = cholqrsolve(aux14, QXZ * aux13, useqr) + beta2s = beta2s' + e2s = y - X * beta2s' + Ze2s = quadcross(Z, wf*wvar, e2s) + gbar = Ze2s / N + aux15 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux15 + } + else { + j=0 + } + } + _makesymmetric(V) + + st_matrix("r(beta)", beta) + st_matrix("r(S)", omega) + st_matrix("r(V)", V) + st_numscalar("r(lambda)", lambda) + st_numscalar("r(kclass)", k) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankV)",rankV) + st_numscalar("r(rankS)",rankS) + +} // end program s_liml + + +// *************** CUE ******************** // + +void s_gmmcue( string scalar ZZmatrix, + string scalar XZmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + string scalar bname, + string scalar b0name, + scalar center, + scalar dofminus, + scalar useqr) + +{ + + struct ms_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Pointers to views + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + py = &y + pX = &X + + if (b0name=="") { + +// CUE beta not supplied, so calculate/optimize + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + +// CUE is preceded by IV or 2-step GMM to get starting values. +// Stata convention is that parameter vectors are row vectors, and optimizers +// require this, so must conform to this in what follows. + + beta_init = st_matrix(bname) + +// What follows is how to set out an optimization in Stata. First, initialize +// the optimization structure in the variable S. Then tell Mata where the +// objective function is, that it's a minimization, that it's a "d0" type of +// objective function (no analytical derivatives or Hessians), and that the +// initial values for the parameter vector are in beta_init. Finally, optimize. + S = optimize_init() + + optimize_init_evaluator(S, &m_cuecrit()) + optimize_init_which(S, "min") + optimize_init_evaluatortype(S, "d0") + optimize_init_params(S, beta_init) +// CUE objective function takes 3 extra arguments: y, X and the structure with omega details + optimize_init_argument(S, 1, py) + optimize_init_argument(S, 2, pX) + optimize_init_argument(S, 3, vcvo) + optimize_init_argument(S, 4, useqr) + + beta = optimize(S) + +// The last evaluation of the GMM objective function is J. + j = optimize_result_value(S) + +// Call m_omega one last time to get CUE weighting matrix. + e[.,.] = y - X * beta' + omega = m_omega(vcvo) + } + else { +// CUE beta supplied, so obtain maximized GMM obj function at b0 + beta = st_matrix(b0name) + e[.,.] = y - X * beta' + omega = m_omega(vcvo) +// W = invsym(omega) + gbar = 1/N * quadcross(Z, wf*wvar, e) + j = N * gbar' * cholsolve(omega, gbar, useqr) +// j = N * gbar' * W * gbar + } + +// Bits and pieces + QXZ = st_matrix(XZmatrix)/N + + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + +// QXZ_W_QZX = QXZ * W * QXZ' +// _makesymmetric(QXZ_W_QZX) +// QXZ_W_QZXinv=invsym(QXZ_W_QZX) +// V = 1/N * QXZ_W_QZXinv + aux1 = cholsolve(omega, QXZ') + if (aux1[1,1]==.) { // omega not full rank; W=inv(omega) dubious, exit with error +errprintf("\nError: estimated covariance matrix of moment conditions not of full rank,") +errprintf("\n and optimal GMM weighting matrix not unique.") +errprintf("\nPossible causes:") +errprintf("\n collinearities in instruments (if -nocollin- option was used)") +errprintf("\n singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)") +errprintf("\n {help ivreg2##partial:partial} option may address problem.\n") + exit(506) + } + aux3 = makesymmetric(QXZ * aux1) + V = 1/N * invsym(aux3) + if (diag0cnt(V)) { // V not full rank, likely caused by collinearities; + // b dubious, exit with error +errprintf("\nError: estimated variance matrix of b not of full rank, and CUE estimates") +errprintf("\n unreliable; may be caused by collinearities\n") + exit(506) + } + W = invsym(omega) + + st_matrix("r(beta)", beta) + st_matrix("r(S)", omega) + st_matrix("r(W)", W) + st_matrix("r(V)", V) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + +} // end program s_gmmcue + +// CUE evaluator function. +// Handles only d0-type optimization; todo, g and H are just ignored. +// beta is the parameter set over which we optimize, and +// J is the objective function to minimize. + +void m_cuecrit(todo, beta, pointer py, pointer pX, struct ms_vcvorthog scalar vcvo, useqr, j, g, H) +{ + *vcvo.e[.,.] = *py - *pX * beta' + + omega = m_omega(vcvo) + +// Calculate gbar=Z'*e/N + gbar = 1/vcvo.N * quadcross(*vcvo.Z, vcvo.wf*(*vcvo.wvar), *vcvo.e) + aux1 = cholqrsolve(omega, gbar, useqr) + j = vcvo.N * gbar' * aux1 + +// old method +// W = invsym(omega) +// j = vcvo.N * gbar' * W * gbar + +} // end program CUE criterion function + + +// ************** ffirst-stage stats ************************************* + +void s_ffirst( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar ZYmatrix, + string scalar ZZinvmatrix, + string scalar XXinvmatrix, + string scalar XPZXinvmatrix, + string scalar X2X2matrix, + string scalar Z1X2matrix, + string scalar X2ymatrix, + string scalar ename, // Nx1 + string scalar ematnames, // Nx(K1+1) + string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + scalar N_clust, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar center, + scalar dofminus, + scalar sdofminus) + +{ + + struct ms_vcvorthog scalar vcvo + + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + Xnames = invtokens( (X1names, X2names), " ") + Znames = invtokens( (Z1names, X2names), " ") + + st_view(y, ., st_tsrevar(tokens(yname)), touse) + st_view(X1, ., st_tsrevar(tokens(X1names)), touse) + st_view(Z1, ., st_tsrevar(tokens(Z1names)), touse) + st_view(X, ., st_tsrevar(tokens(Xnames)), touse) + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + st_view(e, ., ename, touse) + st_view(emat, ., tokens(ematnames), touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + + vcvo.wvar = &wvar + vcvo.Z = &Z + vcvo.Znames = Znames + vcvo.ZZ = st_matrix(ZZmatrix) + + if ("X2names"~="") { + st_view(X2, ., st_tsrevar(tokens(X2names)), touse) + } + + K1=cols(X1) + K2=cols(X2) + K=K1+K2 + L1=cols(Z1) + L2=cols(X2) + L=L1+L2 + df = L1 + df_r = N-L + + ZZinv = st_matrix(ZZinvmatrix) + XXinv = st_matrix(XXinvmatrix) + XPZXinv = st_matrix(XPZXinvmatrix) + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QZX = st_matrix(XZmatrix)' / N + QZY = st_matrix(ZYmatrix) / N + QZZinv = ZZinv*N + QXXinv = XXinv*N + QX2X2 = st_matrix(X2X2matrix) / N + QZ1X2 = st_matrix(Z1X2matrix) / N + QX2y = st_matrix(X2ymatrix) / N + + sheaall = (diagonal(XXinv) :/ diagonal(XPZXinv)) // (X1, X2) in column vector + sheaall = (sheaall[(1::K1), 1 ])' // Just X1 in row vector + +// Full system of reduced form (col 1) and first-stage regressions + bz = cholsolve(QZZ, QZY) + Yhat = Z*bz + Xhat = Yhat[.,(2..(K1+1))], X2 +// VCV for full system + eall = (y, X1) - Yhat + ee = quadcross(eall, wf*wvar, eall) +// sigmas have large-sample dofminus correction incorporated but no small dof corrections + sigmasqall = ee / (N-dofminus) +// rmses have small dof corrections + rmseall = sqrt( ee / (N-L-dofminus-sdofminus) ) +// V has all the classical VCVs in block diagonals + V = sigmasqall # ZZinv +// For Wald test of excluded instruments + R = I(L1) , J(L1, L2, 0) +// For AP and SW stats + QXhXh = quadcross(Xhat, wf*wvar, Xhat) / N + QXhX1 = quadcross(Xhat, wf*wvar, X1 ) / N + +// VCV for system of first-stage eqns +// Can be robust; even if not, has correct off-block-diagonal covariances + vcvo.ename = ematnames + vcvo.e = &emat + emat[.,.] = eall + omegar = m_omega(vcvo) + Vr = makesymmetric(I(K1+1)#QZZinv * omegar * I(K1+1)#QZZinv) / N + +// AR statistics from RF (first column) + Rb = bz[ (1::L1), 1 ] + RVR = Vr[| 1,1 \ L1, L1 |] + ARWald = Rb' * cholsolve(RVR, Rb) + ARF = ARWald + ARdf = L1 + if (clustvarname=="") { + ARdf2 = (N-dofminus-L-sdofminus) + ARF = ARWald / (N-dofminus) * ARdf2 / ARdf + } + else { + ARdf2 = N_clust - 1 + ARF = ARWald / (N-1) * (N-L-sdofminus) /(N_clust) * ARdf2 / ARdf + } + ARFp = Ftail(ARdf, ARdf2, ARF) + ARchi2 = ARWald + ARchi2p = chi2tail(ARdf, ARchi2) + +// Stock-Wright LM S statistic +// Equivalent to J stat for model with coeff on endog=0 and with inexog partialled out +// = LM version of AR stat (matches weakiv) + if (K2>0) { + by = cholsolve(QX2X2, QX2y) + e[.,.] = y-X2*by + } + else { + e[.,.] = y + } +// initialize residual for VCV calc to be single Nx1 vector + vcvo.e = &e + vcvo.ename = ename +// get VCV and sstat=J + omega = m_omega(vcvo) + gbar = 1/N * quadcross(Z, wf*wvar, e) + sstat = N * gbar' * cholsolve(omega, gbar) + sstatdf = L1 + sstatp = chi2tail(sstatdf, sstat) + +// Prepare to loop over X1s for F, SW and AP stats +// initialize matrix to save first-stage results + firstmat=J(21,0,0) +// initialize residual for VCV calc to be single Nx1 vector + vcvo.e = &e + vcvo.ename = ename + + for (i=1; i<=K1; i++) { + +// RMSEs for first stage start in SECOND row/column (first has RF) + rmse = rmseall[i+1,i+1] +// Shea partial R2 + shea = sheaall[1,i] +// first-stage coeffs for ith X1. +// (nb: first column is reduced form eqn for y) + b=bz[., (i+1)] +// Classical Wald stat (chi2 here); also yields partial R2 +// Since r is an L1 x 1 zero vector, can use Rb instead of (Rb-r) + Rb = b[ (1::L1), . ] + RVR = V[| 1+i*L,1+i*L \ i*L+L1, i*L+L1 |] + Wald = Rb' * cholsolve(RVR, Rb) +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + pr2 = (Wald/(N-dofminus)) / (1 + (Wald/(N-dofminus))) + +// Robustify F stat if necessary. + if ((robust~="") | (clustvarname~="") | (kernel~="") | (sw~="")) { + RVR = Vr[| 1+i*L,1+i*L \ i*L+L1, i*L+L1 |] + Wald = Rb' * cholsolve(RVR, Rb) + } +// small dof adjustment is effectively additional L2, e.g., partialled-out regressors + df = L1 + if (clustvarname=="") { + df_r = (N-dofminus-L-sdofminus) + F = Wald / (N-dofminus) * df_r / df + } + else { + df_r = N_clust - 1 + F = Wald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / df + } + pvalue = Ftail(df, df_r, F) + +// If #endog=1, AP=SW=standard F stat + if (K1==1) { + Fdf1 = df + Fdf2 = df_r + SWF = F + SWFp = pvalue + SWchi2 = Wald + SWchi2p = chi2tail(Fdf1, SWchi2) + SWr2 = pr2 + APF = SWF + APFp = SWFp + APchi2 = SWchi2 + APchi2p = SWchi2p + APr2 = SWr2 + } + else { + +// Angrist-Pischke and Sanderson-Windmeijer stats etc. +// select matrix needed for both; will select all but the endog regressor of interest + selmat=J(1,K,1) + selmat[1,i]=0 // don't select endog regressor of interest + +// AP +// QXhXh is crossproduct of X1hats (fitted Xs) plus Z2s +// QXhX1 is crossproduct with X1s +// QXhXhi and QXhX1i remove the row/col for the endog regressor of interest + QXhXhi = select(select(QXhXh,selmat)', selmat) + QXhX1i = select(QXhX1[.,i], selmat') +// 1st step - in effect, 2nd stage of 2SLS using FITTED X1hats, and then get residuals e1 + b1=cholsolve(QXhXhi, QXhX1i) + QXhXhinv = invsym(QXhXhi) // Need this for V + b1=QXhXhinv*QXhX1i + e1 = X1[.,i] - select(Xhat,selmat)*b1 +// 2nd step - regress e1 on all Zs and test excluded ones + QZe1 = quadcross(Z, wf*wvar, e1 ) / N + b2=cholsolve(QZZ, QZe1) + APe2 = e1 - Z*b2 + ee = quadcross(APe2, wf*wvar, APe2) + sigmasq2 = ee / (N-dofminus) +// Classical V + Vi = sigmasq2 * QZZinv / N + APRb=b2[ (1::L1), .] + APRVR = Vi[ (1::L1), (1..L1) ] + APWald = APRb' * cholsolve(APRVR, APRb) +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + APr2 = (APWald/(N-dofminus)) / (1 + (APWald/(N-dofminus))) + +// Now SW stat +// Uses same 2SLS coeffs as AP but resids use ACTUAL X1 (not fitted X1) + e1 = X1[.,i] - select(X,selmat)*b1 +// next step - regress e on all Zs and test excluded ones + QZe1 = quadcross(Z, wf*wvar, e1 ) / N + b2=cholsolve(QZZ, QZe1) + SWe2 = e1 - Z*b2 + ee = quadcross(SWe2, wf*wvar, SWe2) + sigmasq2 = ee / (N-dofminus) + Vi = sigmasq2 * QZZinv / N + SWRb=b2[ (1::L1), .] + SWRVR = Vi[ (1::L1), (1..L1) ] + SWWald = SWRb' * cholsolve(SWRVR, SWRb) +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + SWr2 = (SWWald/(N-dofminus)) / (1 + (SWWald/(N-dofminus))) + +// Having calculated AP and SW R-sq based on non-robust Wald, now get robust Wald if needed. + if ((robust~="") | (clustvarname~="") | (kernel~="") | (sw~="")) { + e[.,1]=APe2 + omega=m_omega(vcvo) + Vi = makesymmetric(QZZinv * omega * QZZinv) / N + APRVR = Vi[ (1::L1), (1..L1) ] + APWald = APRb' * cholsolve(APRVR, APRb) // re-use APRb + e[.,1]=SWe2 + omega=m_omega(vcvo) + Vi = makesymmetric(QZZinv * omega * QZZinv) / N + SWRVR = Vi[ (1::L1), (1..L1) ] + SWWald = SWRb' * cholsolve(SWRVR, SWRb) // re-use SWRb + } + +// small dof adjustment is effectively additional L2, e.g., partialled-out regressors + Fdf1 = (L1-K1+1) + if (clustvarname=="") { + Fdf2 = (N-dofminus-L-sdofminus) + APF = APWald / (N-dofminus) * Fdf2 / Fdf1 + SWF = SWWald / (N-dofminus) * Fdf2 / Fdf1 + } + else { + Fdf2 = N_clust - 1 + APF = APWald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / Fdf1 + SWF = SWWald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / Fdf1 + } + APFp = Ftail(Fdf1, Fdf2, APF) + APchi2 = APWald + APchi2p = chi2tail(Fdf1, APchi2) + SWFp = Ftail(Fdf1, Fdf2, SWF) + SWchi2 = SWWald + SWchi2p = chi2tail(Fdf1, SWchi2) + } + +// Assemble results + firstmat = firstmat , /// + (rmse \ shea \ pr2 \ F \ df \ df_r \ pvalue /// + \ SWF \ Fdf1 \ Fdf2 \ SWFp \ SWchi2 \ SWchi2p \ SWr2 /// + \ APF \ Fdf1 \ Fdf2 \ APFp \ APchi2 \ APchi2p \ APr2) + } // end of loop for an X1 variable + + st_numscalar("r(rmse_rf)", rmseall[1,1]) + st_matrix("r(firstmat)", firstmat) + st_matrix("r(b)", bz) + st_matrix("r(V)", Vr) + st_matrix("r(S)", omegar) + st_numscalar("r(archi2)", ARchi2) + st_numscalar("r(archi2p)", ARchi2p) + st_numscalar("r(arf)", ARF) + st_numscalar("r(arfp)", ARFp) + st_numscalar("r(ardf)", ARdf) + st_numscalar("r(ardf_r)", ARdf2) + st_numscalar("r(sstat)",sstat) + st_numscalar("r(sstatp)",sstatp) + st_numscalar("r(sstatdf)",sstatdf) + +} // end program s_ffirst + +// ********************************************************************** + +void s_omega( + string scalar ZZmatrix, + string scalar ename, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar center, + scalar dofminus) +{ + + struct ms_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(e, ., vcvo.ename, touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + + ZZ = st_matrix(ZZmatrix) + + S=m_omega(vcvo) + + st_matrix("r(S)", S) +} // end of s_omega program + + +// Mata utility for sequential use of solvers +// Default is cholesky; +// if that fails, use QR; +// if overridden, use QR. + +function cholqrsolve ( numeric matrix A, + numeric matrix B, + | real scalar useqr) +{ + if (args()==2) useqr = 0 + + real matrix C + + if (!useqr) { + C = cholsolve(A, B) + if (C[1,1]==.) { + C = qrsolve(A, B) + } + } + else { + C = qrsolve(A, B) + } + + return(C) + +} + +end // end Mata section + +exit // exit before loading comments + +********************************** VERSION COMMENTS ********************************** +* Initial version cloned from official ivreg version 5.0.9 19Dec2001 +* 1.0.2: add logic for reg3. Sargan test +* 1.0.3: add prunelist to ensure that count of excluded exogeneous is correct +* 1.0.4: revise option to exog(), allow included exog to be specified as well +* 1.0.5: switch from reg3 to regress, many options and output changes +* 1.0.6: fixed treatment of nocons in Sargan and C-stat, and corrected problems +* relating to use of nocons combined with a constant as an IV +* 1.0.7: first option reports F-test of excluded exogenous; prunelist bug fix +* 1.0.8: dropped prunelist and switched to housekeeping of variable lists +* 1.0.9: added collinearity checks; C-stat calculated with recursive call; +* added ffirst option to report only F-test of excluded exogenous +* from 1st stage regressions +* 1.0.10: 1st stage regressions also report partial R2 of excluded exogenous +* 1.0.11: complete rewrite of collinearity approach - no longer uses calls to +* _rmcoll, does not track specific variables dropped; prunelist removed +* 1.0.12: reorganised display code and saved results to enable -replay()- +* 1.0.13: -robust- and -cluster- now imply -small- +* 1.0.14: fixed hascons bug; removed ivreg predict fn (it didn't work); allowed +* robust and cluster with z stats and correct dofs +* 1.0.15: implemented robust Sargan stat; changed to only F-stat, removed chi-sq; +* removed exog option (only orthog works) +* 1.0.16: added clusterised Sargan stat; robust Sargan handles collinearities; +* predict now works with standard SE options plus resids; fixed orthog() +* so it accepts time series operators etc. +* 1.0.17: fixed handling of weights. fw, aw, pw & iw all accepted. +* 1.0.18: fixed bug in robust Sargan code relating to time series variables. +* 1.0.19: fixed bugs in reporting ranks of X'X and Z'Z +* fixed bug in reporting presence of constant +* 1.0.20: added GMM option and replaced robust Sargan with (equivalent) J; +* added saved statistics of 1st stage regressions +* 1.0.21: added Cragg HOLS estimator, including allowing empty endog list; +* -regress- syntax now not allowed; revised code searching for "_cons" +* 1.0.22: modified cluster output message; fixed bug in replay for Sargan/Hansen stat; +* exactly identified Sargan/Hansen now exactly zero and p-value not saved as e(); +* cluster multiplier changed to 1 (from buggy multiplier), in keeping with +* eg Wooldridge 2002 p. 193. +* 1.0.23: fixed orthog option to prevent abort when restricted equation is underid. +* 1.0.24: fixed bug if 1st stage regressions yielded missing values for saving in e(). +* 1.0.25: Added Shea version of partial R2 +* 1.0.26: Replaced Shea algorithm with Godfrey algorithm +* 1.0.27: Main call to regress is OLS form if OLS or HOLS is specified; error variance +* in Sargan and C statistics use small-sample adjustment if -small- option is +* specified; dfn of S matrix now correctly divided by sample size +* 1.0.28: HAC covariance estimation implemented +* Symmetrize all matrices before calling syminv +* Added hack to catch F stats that ought to be missing but actually have a +* huge-but-not-missing value +* Fixed dof of F-stat - was using rank of ZZ, should have used rank of XX (couldn't use df_r +* because it isn't always saved. This is because saving df_r triggers small stats +* (t and F) even when -post- is called without dof() option, hence df_r saved only +* with -small- option and hence a separate saved macro Fdf2 is needed. +* Added rankS to saved macros +* Fixed trap for "no regressors specified" +* Added trap to catch gmm option with no excluded instruments +* Allow OLS syntax (no endog or excluded IVs specified) +* Fixed error messages and traps for rank-deficient robust cov matrix; includes +* singleton dummy possibility +* Capture error if posting estimated VCV that isn't pos def and report slightly +* more informative error message +* Checks 3 variable lists (endo, inexog, exexog) separately for collinearities +* Added AC (autocorrelation-consistent but conditionally-homoskedastic) option +* Sargan no longer has small-sample correction if -small- option +* robust, cluster, AC, HAC all passed on to first-stage F-stat +* bw must be < T +* 1.0.29 -orthog- also displays Hansen-Sargan of unrestricted equation +* Fixed collinearity check to include nocons as well as hascons +* Fixed small bug in Godfrey-Shea code - macros were global rather than local +* Fixed larger bug in Godfrey-Shea code - was using mixture of sigma-squares from IV and OLS +* with and without small-sample corrections +* Added liml and kclass +* 1.0.30 Changed order of insts macro to match saved matrices S and W +* 2.0.00 Collinearities no longer -qui- +* List of instruments tested in -orthog- option prettified +* 2.0.01 Fixed handling of nocons with no included exogenous, including LIML code +* 2.0.02 Allow C-test if unrestricted equation is just-identified. Implemented by +* saving Hansen-Sargan dof as = 0 in e() if just-identified. +* 2.0.03 Added score() option per latest revision to official ivreg +* 2.0.04 Changed score() option to pscore() per new official ivreg +* 2.0.05 Fixed est hold bug in first-stage regressions +* Fixed F-stat finite sample adjustment with cluster option to match official Stata +* Fixed F-stat so that it works with hascons (collinearity with constant is removed) +* Fixed bug in F-stat code - wasn't handling failed posting of vcv +* No longer allows/ignores nonsense options +* 2.0.06 Modified lsStop to sync with official ivreg 5.1.3 +* 2.0.07a Working version of CUE option +* Added sortpreserve, ivar and tvar options +* Fixed smalls bug in calculation of T for AC/HAC - wasn't using the last ob +* in QS kernel, and didn't take account of possible dropped observations +* 2.0.07b Fixed macro bug that truncated long varlists +* 2.0.07c Added dof option. +* Changed display of RMSE so that more digits are displayed (was %8.1g) +* Fixed small bug where cstat was local macro and should have been scalar +* Fixed bug where C stat failed with cluster. NB: wmatrix option and cluster are not compatible! +* 2.0.7d Fixed bug in dof option +* 2.1.0 Added first-stage identification, weak instruments, and redundancy stats +* 2.1.01 Tidying up cue option checks, reporting of cue in output header, etc. +* 2.1.02 Used Poskitt-Skeels (2002) result that C-D eval = cceval / (1-cceval) +* 2.1.03 Added saved lists of separate included and excluded exogenous IVs +* 2.1.04 Added Anderson-Rubin test of signif of endog regressors +* 2.1.05 Fix minor bugs relating to cluster and new first-stage stats +* 2.1.06 Fix bug in cue: capture estimates hold without corresponding capture on estimates unhold +* 2.1.07 Minor fix to ereturn local wexp, promote to version 8.2 +* 2.1.08 Added dofminus option, removed dof option. Added A-R test p-values to e(). +* Minor bug fix to A-R chi2 test - was N chi2, should have been N-L chi2. +* Changed output to remove potentially misleading refs to N-L etc. +* Bug fix to rhs count - sometimes regressors could have exact zero coeffs +* Bug fix related to cluster - if user omitted -robust-, orthog would use Sargan and not J +* Changed output of Shea R2 to make clearer that F and p-values do not refer to it +* Improved handling of collinearites to check across inexog, exexog and endo lists +* Total weight statement moved to follow summ command +* Added traps to catch errors if no room to save temporary estimations with _est hold +* Added -savefirst- option. Removed -hascons-, now synonymous with -nocons-. +* 2.1.09 Fixes to dof option with cluster so it no longer mimics incorrect areg behavior +* Local ivreg2cmd to allow testing under name ivreg2 +* If wmatrix supplied, used (previously not used if non-robust sargan stat generated) +* Allowed OLS using (=) syntax (empty endo and exexog lists) +* Clarified error message when S matrix is not of full rank +* cdchi2p, ardf, ardf_r added to saved macros +* first and ffirst replay() options; DispFirst and DispFFirst separately codes 1st stage output +* Added savefprefix, macro with saved first-stage equation names. +* Added version option. +* Added check for duplicate variables to collinearity checks +* Rewrote/simplified Godfrey-Shea partial r2 code +* 2.1.10 Added NOOUTput option +* Fixed rf bug so that first does not trigger unnecessary saved rf +* Fixed cue bug - was not starting with robust 2-step gmm if robust/cluster +* 2.1.11 Dropped incorrect/misleading dofminus adjustments in first-stage output summary +* 2.1.12 Collinearity check now checks across inexog/exexog/endog simultaneously +* 2.1.13 Added check to catch failed first-stage regressions +* Fixed misleading failed C-stat message +* 2.1.14 Fixed mishandling of missing values in AC (non-robust) block +* 2.1.15 Fixed bug in RF - was ignoring weights +* Added -endog- option +* Save W matrix for all cases; ensured copy is posted with wmatrix option so original isn't zapped +* Fixed cue bug - with robust, was entering IV block and overwriting correct VCV +* 2.1.16 Added -fwl- option +* Saved S is now robust cov matrix of orthog conditions if robust, whereas W is possibly non-robust +* weighting matrix used by estmator. inv(S)=W if estimator is efficient GMM. +* Removed pscore option (dropped by official ivreg). +* Fixed bug where -post- would fail because of missing values in vcv +* Remove hascons as synonym for nocons +* OLS now outputs 2nd footer with variable lists +* 2.1.17 Reorganization of code +* Added ll() macro +* Fixed N bug where weights meant a non-integer ob count that was rounded down +* Fixed -fwl- option so it correctly handles weights (must include when partialling-out) +* smatrix option takes over from wmatrix option. Consistent treatment of both. +* Saved smatrix and wmatrix now differ in case of inefficient GMM. +* Added title() and subtitle() options. +* b0 option returns a value for the Sargan/J stat even if exactly id'd. +* (Useful for S-stat = value of GMM objective function.) +* HAC and AC now allowed with LIML and k-class. +* Collinearity improvements: bug fixed because collinearity was mistakenly checked across +* inexog/exexog/endog simultaneously; endog predicted exactly by IVs => reclassified as inexog; +* _rmcollright enforces inexog>endo>exexog priority for collinearities, if Stata 9.2 or later. +* K-class, LIML now report Sargan and J. C-stat based on Sargan/J. LIML reports AR if homosked. +* nb: can always easily get a C-stat for LIML based on diff of two AR stats. +* Always save Sargan-Hansen as e(j); also save as e(sargan) if homoskedastic. +* Added Stock-Watson robust SEs options sw() +* 2.1.18 Added Cragg-Donald-Stock-Yogo weak ID statistic critical values to main output +* Save exexog_ct, inexog_ct and endog_ct as macros +* Stock-Watson robust SEs now assume ivar is group variable +* Option -sw- is standard SW. Option -swpsd- is PSD version a la page 6 point 10. +* Added -noid- option. Suppresses all first-stage and identification statistics. +* Internal calls to ivreg2 use noid option. +* Added hyperlinks to ivreg2.hlp and helpfile argument to display routines to enable this. +* 2.1.19 Added matrix rearrangement and checks for smatrix and wmatrix options +* Recursive calls to cstat simplified - no matrix rearrangement or separate robust/nonrobust needed +* Reintroduced weak ID stats to ffirst output +* Added robust ID stats to ffirst output for case of single endogenous regressor +* Fixed obscure bug in reporting 1st stage partial r2 - would report zero if no included exogenous vars +* Removed "HOLS" in main output (misleading if, e.g., estimation is AC but not HAC) +* Removed "ML" in main output if no endogenous regressors - now all ML is labelled LIML +* model=gmm is now model=gmm2s; wmatrix estimation is model=gmm +* wmatrix relates to gmm estimator; smatrix relates to gmm var-cov matrix; b0 behavior equiv to wmatrix +* b0 option implies nooutput and noid options +* Added nocollin option to skip collinearity checks +* Fixed minor display bug in ffirst output for endog vars with varnames > 12 characters +* Fixed bug in saved rf and first-stage results for vars with long varnames; uses permname +* Fixed bug in model df - had counted RHS, now calculates rank(V) since latter may be rank-deficient +* Rank of V now saved as macro rankV +* fwl() now allows partialling-out of just constant with _cons +* Added Stock-Wright S statistic (but adds overhead - calls preserve) +* Properties now include svyj. +* Noted only: fwl bug doesn't allow time-series operators. +* 2.1.20 Fixed Stock-Wright S stat bug - didn't allow time-series operators +* 2.1.21 Fixed Stock-Wright S stat to allow for no exog regressors cases +* 2.2.00 CUE partials out exog regressors, estimates endog coeffs, then exog regressors separately - faster +* gmm2s becomes standard option, gmm supported as legacy option +* 2.2.01 Added explanatory messages if gmm2s used. +* States if estimates efficient for/stats consistent for het, AC, etc. +* Fixed small bug that prevented "{help `helpfile'##fwl:fwl}" from displaying when -capture-d. +* Error message in footer about insuff rank of S changed to warning message with more informative message. +* Fixed bug in CUE with weights. +* 2.2.02 Removed CUE partialling-out; still available with fwl +* smatrix and wmatrix become documented options. e(model)="gmmw" means GMM with arbitrary W +* 2.2.03 Fixed bug in AC with aweights; was weighting zi'zi but not ei'ei. +* 2.2.04 Added abw code for bw(), removed properties(svyj) +* 2.2.05 Fixed bug in AC; need to clear variable vt1 at start of loop +* If iweights, N (#obs with precision) rounded to nearest integer to mimic official Stata treatment +* and therefore don't need N scalar at all - will be same as N +* Saves fwl_ct as macro. +* -ffirst- output, weak id stat, etc. now adjust for number of partialled-out variables. +* Related changes: df_m, df_r include adjustments for partialled-out variables. +* Option nofwlsmall introduced - suppresses above adjustments. Undocumented in ivreg2.hlp. +* Replaced ID tests based on canon corr with Kleibergen-Paap rk-based stats if not homoskedastic +* Replaced LR ID test stats with LM test stats. +* Checks that -ranktest- is installed. +* 2.2.06 Fixed bug with missing F df when cue called; updated required version of ranktest +* 2.2.07 Modified redundancy test statistic to match standard regression-based LM tests +* Change name of -fwl- option to -partial-. +* Use of b0 means e(model)=CUE. Added informative b0 option titles. b0 generates output but noid. +* Removed check for integer bandwidth if auto option used. +* 2.2.08 Add -nocollin- to internal calls and to -ivreg2_cue- to speed performance. +* 2.2.09 Per msg from Brian Poi, Alastair Hall verifies that Newey-West cited constant of 1.1447 +* is correct. Corrected mata abw() function. Require -ranktest- 1.1.03. +* 2.2.10 Added Angrist-Pischke multivariate f stats. Rewrite of first and ffirst output. +* Added Cragg-Donald to weak ID output even when non-iid. +* Fixed small bug in non-robust HAC code whereby extra obs could be used even if dep var missing. +* (required addition of L`tau'.(`s1resid') in creation of second touse variable) +* Fixed bugs that zapped varnames with "_cons" in them +* Changed tvar and ivar setup so that data must be tsset or xtset. +* Fixed bug in redundancy test stat when called by xtivreg2+cluster - no dofminus adj needed in this case +* Changed reporting so that gaps between panels are not reported as such. +* Added check that weight variable is not transformed by partialling out. +* Changed Stock-Wright S statistic so that it uses straight partialling-out of exog regressors +* (had been, in effect, doing 2SGMM partialling-out) +* Fixed bug where dropped collinear endogenous didn't get a warning or listing +* Removed N*CDEV Wald chi-sq statistic from ffirst output (LM stat enough) +* 3.0.00 Fully rewritten and Mata-ized code. Require min Stata 10.1 and ranktest 1.2.00. +* Mata support for Stock-Watson SEs for fixed effects estimator; doesn't support fweights. +* Changed handling of iweights yielding non-integer N so that (unlike official -regress-) all calcs +* for RMSE etc. use non-integer N and N is rounded down only at the end. +* Added support for Thompson/Cameron-Gelbach-Miller 2-level cluster-robust vcvs. +* 3.0.01 Now exits more gracefully if no regressors survive after collinearity checks +* 3.0.02 -capture- instead of -qui- before reduced form to suppress not-full-rank error warning +* Modified Stock-Wright code to partial out all incl Xs first, to reduce possibility of not-full-rank +* omega and missing sstat. Added check within Stock-Wright code to catch not-full-rank omega. +* Fixed bug where detailed first-stage stats with cluster were disrupted if data had been tsset +* using a different variables. +* Fixed bug that didn't allow regression on just a constant. +* Added trap for no observations. +* Added trap for auto bw with panel data - not allowed. +* 3.0.03 Fixed bug in m_omega that always used Stock-Watson spectral decomp to create invertible shat +* instead of only when (undocumented) spsd option is called. +* Fixed bug where, if matsize too small, exited with wrong error (mistakenly detected as collinearities) +* Removed inefficient call to -ranktest- that unnecessarily requested stats for all ranks, not just full. +* 3.0.04 Fixed coding error in m_omega for cluster+kernel. Was *vcvo.e[tmatrix[.,1]], should have been (*vcvo.e)[tmatrix[.,1]]. +* Fixed bug whereby clusters defined by strings were not handled correctly. +* Updated ranktest version check +* 3.0.05 Added check to catch unwanted transformations of time or panel variables by partial option. +* 3.0.06 Fixed partial bug - partialcons macro saved =0 unless _cons explicitly in partial() varlist +* 3.0.07 kclass was defaulting to LIML - fixed. +* Renamed spsd option to psda (a=abs) following Stock-Watson 2008. Added psd0 option following Politis 2007. +* Fixed bug that would prevent RF and first-stage with cluster and TS operators if cluster code changed sort order. +* Modified action if S matrix is not full rank and 2-step GMM chosen. Now continue but report problem in footer +* and do not report J stat etc. +* 3.0.08 Fixed cluster+bw; was not using all observations of all panel units if panel was unbalanced. +* Fixed inconsequential bug in m_omega that caused kernel loop to be entered (with no impact) even if kernel=="" +* Fixed small bug that compared bw to T instead of (correctly) to T/delta when checking that bw can't be too long. +* Added dkraay option = cluster on t var + kernel-robust +* Added kiefer option = truncated kernel, bw=T (max), and no robust +* Fixed minor reporting bug that reported time-series gaps in entire panel dataset rather than just portion touse-d. +* Recoded bw and kernel checks into subroutine vkernel. Allow non-integer bandwidth within check as in ranktest. +* 3.1.01 First ivreg2 version with accompanying Mata library (shared with -ranktest-). Mata library includes +* struct ms_vcvorthog, m_omega, m_calckw, s_vkernel. +* Fixed bug in 2-way cluster code (now in m_omega in Mata library) - would crash if K>1 (relevant for -ranktest- only). +* 3.1.02 Converted cdsy to Mata code and moved to Mata library. Standardized spelling/caps/etc. of QS as "Quadratic Spectral". +* 3.1.03 Improved partialling out in s_sstat and s_ffirst: replaced qrsolve with invsym. +* 3.1.04 Fixed minor bug in s_crossprod - would crash with L1=0 K1>0, and also with K=0 +* 3.1.05 Fixed minor bug in orthog - wasn't saving est results if eqn w/o suspect instruments did not execute properly +* Fixed minor bug in s_cccollin() - didn't catch perverse case of K1>0 (endog regressors) and L1=0 (no excl IVs) +* 3.1.06 Spelling fix for Danielle kernel, correct error check for bw vs T-1 +* 3.1.07 Fixed bug that would prevent save of e(sample) when partialling out just a constant +* 3.1.08 01Jan14. Fixed reporting bug with 2-way clustering and kernel-robust that would give wrong count for 2nd cluster variable. +* 3.1.09 13July14. _rmcollright under version control has serious bug for v10 and earlier. Replaced with canon corr approach. +* Fixed obscure bug in estimation sample - was not using obs when tsset tvar is missing, even if TS operators not used. +* Fixed bug in auto bw code so now ivreg2 and ivregress agree. Also, ivreg2 auto bw code handles gaps in TS correctly. +* 4.0.00 25Jan15. Promote to require Stata version 11.2 +* Rewrite of s_gmm1s, s_iegmm, s_egmm etc. to use matrix solvers rather than inversion. +* rankS and rankV now calculated along with estimators; rankS now always saved. +* Returned to use of _rmcollright to detect collinearities since bug was in Stata 10's _rmcollright and now not relevant. +* Added reporting of collinearities and duplicates in replay mode. +* Rewrite of legacy support for previous ivreg2x version. Main program calls ivreg2x depending on _caller(). +* Estimation and replay moved to ivreg211 subroutine above. +* 4.0.01 8Feb15. Fixed bug in default name and command used used for saved first and RF equations +* Fixed bug in saved command line (was ivreg211, should be ivreg2). +* 4.0.02 9Feb15. Changed forced exit at Stata <11 before continuing loading to forced exit pre-Mata code at Stata <9. +* 4.1.00 Substantial rewrite to allow factor variables. Now also accepts TS ops as well as FV ops in partial varlist. +* Rewrite included code for dropped/collinear/reclassified. +* Saved RF and 1st-stage estimations have "if e(sample)" instead of "if `touse'" in e(cmdline). +* Rewrite of s_gmm1s etc. to use qrsolve if weighting matrix not full rank or cholsolve fails +* Fixed bug in display subroutines that would display hyperlink to wrong (nonexistent) help file. +* 4.1.01 15Jun15. Fixed bug that did not allow dropped variables to be in partial(.) varlist. +* Major rewrite of parsing code and collinearity/dropped/reclassified code. +* Added support for display options noomitted, vsquish, noemptycells, baselevels, allbaselevels. +* Changed from _rmcoll/_rmcollright/_rmcoll2list to internal ivreg2_rmcollright2 +* Changed failure of ranktest to obtain id stats to non-fatal so that estimation proceeds. +* Removed recount via _rmcoll if noid option specified +* Added partial(_all) option. +* Improved checks of smatrix, wmatrix, b0 options +* Rewrite of first-stage and reduced form code; rewrite of replay(.) functionality +* Added option for displaying system of first-stage/reduced form eqns. +* Replaced AP first-stage test stats with SW (Sanderson-Windmeijer) first-stage stats +* Corrected S LM stat option; now calcuated in effect as J stat for case of no endog (i.e. b=0) +* with inexog partialled out i.e. LM version of AR stat; now matches weakiv +* Undocumented FV-related options: fvsep (expand endo, inexog, exexog separately) fvall (expand together) +* 4.1.02 17Jun15. Fixed bug in collinearity check - was ignoring weights. +* More informative error message if invalid matrix provided to smatrix(.) or wmatrix(.) options. +* Caught error if depvar was FV or TS var that expanded to >1 variable. +* 4.1.03 18Jun15. Fixed bug with robust + rf option. +* 4.1.04 18Jun15. Fixed bug in AR stat with dofminus option + cluster (was subtracting dof, shouldn't). +* 4.1.05 18Jun15. Added rmse, df_m, df_r to saved RF and first-stage equation results. +* 4.1.06 4July15. Replaced mvreg with Mata code for partialling out (big speed gains with many vars). +* Rewrote AddOmitted to avoid inefficient loop; replaced with Mata subscripting. +* Failure of id stats because of collinearities triggers error message only; estimation continues. +* Calculation of dofs etc. uses rankS and rankV instead of iv1_ct and rhs1_ct; +* counts are therefore correct even in presence of collinearities and use of nocollin option. +* nocollin options triggers use of QR instead of default Cholesky. +* rankxx and rankzz now based on diag0cnt of (XX)^-1 and (ZZ)^-1. +* CUE fails if either S or V not full rank; can happen if nocollin option used. +* Added undocumented useqr option to force use of QR instead of Cholesky. +* Misc other code tweaks to make results more robust to nocollin option. +* 4.1.07 12July15. Fixed bugs in calculation of rank(V) (had miscounted in some cases if omega not full rank) +* Changed calc of dofs etc. from rankS and rankV to rankzz and rankxx (had miscounted in some cases etc.). +* Restored warning message for all exog regressors case if S not full rank. +* 4.1.08 27July15. Replaced wordcount(.) function with word count macro in AddOmitted; +* AddOmitted called only if any omitted regressors to add. +* Added center option for centering moments. +* 4.1.09 20Aug15. Expanded error message for failure to save first-stage estimations (var name too long). +* Fixed bug when weighting used with new partial-out code (see 4.1.06 4July15). +* Tweaked code so that if called under Stata version < 11, main ivreg2.ado is exited immediately after +* loading parent ivreg2 program. Removed automatic use of QR solver when nocollin option used. +* Added saved condition numbers for XX and ZZ. +* e(cmdline) now saves original string including any "s (i.e., saves `0' instead of `*'). +* 4.1.10 Fixed bug with posting first-stage results if sort had been disrupted by Mata code. +* Fixed bug which mean endog(.) and orthog(.) varlists weren't saved or displayed. +* 4.1.11 22Nov19. Added caller(.) option to ivreg211 subroutine to pass version of parent Stata _caller(.). +* Local macro with this parent Stata version is `caller'. +* Changed calls to ranktest so that if parent Stata is less than version 16, +* ranktest is called under version control as version 11.2: ranktest ..., +* otherwise it is called as version `caller': ranktest ... . +* Added macro e(ranktestcmd); will be ranktest, or ranktest11, or .... diff --git a/110/replication_package/replication/ado/plus/i/ivreg2.sthlp b/110/replication_package/replication/ado/plus/i/ivreg2.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..870be3d0ce72591a4f7e1a6b301fc5cebf7b2172 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg2.sthlp @@ -0,0 +1,1787 @@ +{smcl} +{* 30July2015}{...} +{hline} +help for {hi:ivreg2} +{hline} + +{title:Extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression} + +{p 4}Full syntax + +{p 8 14}{cmd:ivreg2} {it:depvar} [{it:varlist1}] +{cmd:(}{it:varlist2}{cmd:=}{it:varlist_iv}{cmd:)} [{it:weight}] +[{cmd:if} {it:exp}] [{cmd:in} {it:range}] +{bind:[{cmd:,} {cmd:gmm2s}} +{cmd:bw(}{it:#}{cmd:)} +{cmd:kernel(}{it:string}{cmd:)} +{cmd:dkraay(}{it:integer}{cmd:)} +{cmd:kiefer} +{cmd:liml} +{cmd:fuller(}{it:#}{cmd:)} +{cmd:kclass(}{it:#}{cmd:)} +{cmd:coviv} +{cmd:cue} +{cmd:b0}{cmd:(}{it:matrix}{cmd:)} +{cmdab:r:obust} +{cmdab:cl:uster}{cmd:(}{it:varlist}{cmd:)} +{cmd:orthog(}{it:varlist_ex}{cmd:)} +{cmd:endog(}{it:varlist_en}{cmd:)} +{cmdab:red:undant(}{it:varlist_ex}{cmd:)} +{cmd:partial(}{it:varlist}{cmd:)} +{cmdab:sm:all} +{cmdab:noc:onstant} +{cmd:center} +{cmd:smatrix}{cmd:(}{it:matrix}{cmd:)} +{cmd:wmatrix}{cmd:(}{it:matrix}{cmd:)} +{cmd:first} {cmd:ffirst} {cmd:savefirst} {cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:sfirst} {cmd:savesfirst} {cmdab:savesfp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:rf} {cmd:saverf} {cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:nocollin} {cmd:noid} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmd:bvclean} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{bind:{cmd:plus} ]} + +{p 4}Replay syntax + +{p 8 14}{cmd:ivreg2} +{bind:[{cmd:,} {cmd:first}} {cmd:sfirst} +{cmd:ffirst} {cmd:rf} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{cmd:plus} ]} + +{p 4}Version syntax + +{p 8 14}{cmd:ivreg2}, {cmd:version} + +{p}{cmd:ivreg2} is compatible with Stata version 8 or later. +The most-up-to-date implementation of {cmd:ivreg2} requires +Stata version 11 or later. +If {cmd:ivreg2} is called under earlier versions of Stata, +it will run a legacy version {cmd:ivreg2x}. +See below under {help ivreg2##s_versions:Running ivreg2 under earlier versions of Stata} +for details. + +{p}{cmd:ivreg2} may be used with time-series or panel data, +in which case the data must be {cmd:tsset} +before using {cmd:ivreg2}; see help {help tsset}. + +{p}All {it:varlists} may contain time-series operators or factor variables; +see help {help varlist}. + +{p}{cmd:by}, {cmd:rolling}, {cmd:statsby}, {cmd:xi}, +{cmd:bootstrap} and {cmd:jackknife} are allowed; see help {help prefix}. + +{p}{cmd:aweight}s, {cmd:fweight}s, {cmd:iweight}s and {cmd:pweight}s +are allowed; see help {help weights}. + +{p}The syntax of {help predict} following {cmd:ivreg2} is + +{p 8 16}{cmd:predict} [{it:type}] {it:newvarname} [{cmd:if} {it:exp}] +[{cmd:in} {it:range}] [{cmd:,} {it:statistic}] + +{p}where {it:statistic} is + +{p 8 23}{cmd:xb}{space 11}fitted values; the default{p_end} +{p 8 23}{cmdab:r:esiduals}{space 4}residuals{p_end} +{p 8 23}{cmd:stdp}{space 9}standard error of the prediction{p_end} + +{p}These statistics are available both in and out of sample; +type "{cmd:predict} {it:...} {cmd:if e(sample)} {it:...}" +if wanted only for the estimation sample. + +{title:Contents} +{p 2}{help ivreg2##s_description:Description}{p_end} +{p 2}{help ivreg2##s_robust:Robust, cluster and 2-way cluster, AC, HAC, and cluster+HAC SEs and statistics}{p_end} +{p 2}{help ivreg2##s_gmm:GMM estimation}{p_end} +{p 2}{help ivreg2##s_liml:LIML, k-class and GMM-CUE estimation}{p_end} +{p 2}{help ivreg2##s_sumopt:Summary of robust, HAC, AC, GMM, LIML and CUE options}{p_end} +{p 2}{help ivreg2##s_overid:Testing overidentifying restrictions}{p_end} +{p 2}{help ivreg2##s_endog:Testing subsets of regressors and instruments for endogeneity}{p_end} +{p 2}{help ivreg2##s_relevance:Tests of under- and weak identification}{p_end} +{p 2}{help ivreg2##s_redundancy:Testing instrument redundancy}{p_end} +{p 2}{help ivreg2##s_first:First-stage regressions, identification, and weak-id-robust inference}{p_end} +{p 2}{help ivreg2##s_rf:Reduced form estimates}{p_end} +{p 2}{help ivreg2##s_partial:Partialling-out exogenous regressors}{p_end} +{p 2}{help ivreg2##s_ols:OLS and Heteroskedastic OLS (HOLS) estimation}{p_end} +{p 2}{help ivreg2##s_collin:Collinearities}{p_end} +{p 2}{help ivreg2##s_speed:Speed options: nocollin and noid}{p_end} +{p 2}{help ivreg2##s_small:Small sample corrections}{p_end} +{p 2}{help ivreg2##s_options:Options summary}{p_end} +{p 2}{help ivreg2##s_versions:Running ivreg2 under earlier versions of Stata}{p_end} +{p 2}{help ivreg2##s_macros:Remarks and saved results}{p_end} +{p 2}{help ivreg2##s_examples:Examples}{p_end} +{p 2}{help ivreg2##s_refs:References}{p_end} +{p 2}{help ivreg2##s_acknow:Acknowledgements}{p_end} +{p 2}{help ivreg2##s_citation:Authors}{p_end} +{p 2}{help ivreg2##s_citation:Citation of ivreg2}{p_end} + +{marker s_description}{title:Description} + +{p}{cmd:ivreg2} implements a range of single-equation estimation methods +for the linear regression model: OLS, instrumental +variables (IV, also known as two-stage least squares, 2SLS), +the generalized method of moments (GMM), +limited-information maximum likelihood (LIML), and k-class estimators. +In the language of IV/GMM, {it:varlist1} are the exogenous +regressors or "included instruments", +{it:varlist_iv} are the exogenous variables excluded +from the regression or "excluded instruments", +and {it:varlist2} the endogenous regressors that are being "instrumented". + +{p}{cmd:ivreg2} will also estimate linear regression models using +robust (heteroskedastic-consistent), +autocorrelation-consistent (AC), +heteroskedastic and autocorrelation-consistent (HAC) +and cluster-robust variance estimates. + +{p}{cmd:ivreg2} is an alternative to Stata's official {cmd:ivregress}. +Its features include: +two-step feasible GMM estimation ({cmd:gmm2s} option) +and continuously-updated GMM estimation ({cmd:cue} option); +LIML and k-class estimation; +automatic output of overidentification and underidentification test statistics; +C statistic test of exogeneity of subsets of instruments +({cmd:orthog()} option); +endogeneity tests of endogenous regressors +({cmd:endog()} option); +test of instrument redundancy +({cmd:redundant()} option); +kernel-based autocorrelation-consistent (AC) +and heteroskedastic and autocorrelation consistent (HAC) standard errors +and covariance estimation ({cmd:bw(}{it:#}{cmd:)} option), +with user-specified choice of kernel ({cmd:kernel()} option); +two-level {cmd:cluster}-robust standard errors and statistics; +default reporting of large-sample statistics +(z and chi-squared rather than t and F); +{cmd:small} option to report small-sample statistics; +first-stage regressions reported with various tests and statistics for +identification and instrument relevance; +{cmd:ffirst} option to report only these identification statistics +and not the first-stage regression results themselves. +{cmd:ivreg2} can also be used for ordinary least squares (OLS) estimation +using the same command syntax as official {cmd:regress} and {cmd:newey}. + +{marker s_robust}{dlgtab:Robust, cluster and 2-level cluster, AC, HAC, and cluster+HAC SEs and statistics} + +{p}The standard errors and test statistics reported by {cmd:ivreg2} can be made consistent +to a variety of violations of the assumption of i.i.d. errors. +When these options are combined with +either the {cmd:gmm2s} or {cmd:cue} options (see below), +the parameter estimators reported are also efficient +in the presence of the same violation of i.i.d. errors. + +{p}The options for SEs and statistics are:{break} +{bind:(1) {cmd:robust}} causes {cmd:ivreg2} to report SEs and statistics that are +robust to the presence of arbitrary heteroskedasticity.{break} +{bind:(2) {cmd:cluster}({it:varname})} SEs and statistics are robust to both +arbitrary heteroskedasticity and arbitrary intra-group correlation, +where {it:varname} identifies the group. +See the relevant Stata manual entries on obtaining robust covariance estimates +for further details.{break} +{bind:(3) {cmd:cluster}({it:varname1 varname2})} provides 2-way clustered SEs +and statistics (Cameron et al. 2006, Thompson 2009) +that are robust to arbitrary heteroskedasticity and intra-group correlation +with respect to 2 non-nested categories defined by {it:varname1} and {it:varname2}. +See below for a detailed description.{break} +{bind:(4) {cmd:bw(}{it:#}{cmd:)}} requests AC SEs and statistics that are +robust to arbitrary autocorrelation.{break} +{bind:(5) {cmd:bw(}{it:#}{cmd:)}} combined with {cmd:robust} +requests HAC SEs and statistics that are +robust to both arbitrary heteroskedasticity and arbitrary autocorrelation.{break} +{bind:(6) {cmd:bw(}{it:#}{cmd:)}} combined with {cmd:cluster}({it:varname}) +is allowed with either 1- or 2-level clustering if the data are panel data +that are {cmd:tsset} on the time variable {it:varname}. +Following Driscoll and Kray (1998), +the SEs and statistics reported will be robust to disturbances +that are common to panel units and that are persistent, i.e., autocorrelated.{break} +{bind:(7) {cmd:dkraay(}{it:#}{cmd:)}} is a shortcut for the Driscoll-Kraay SEs +for panel data in (6). +It is equivalent to clustering on the {cmd:tsset} time variable +and the bandwidth supplied as {it:#}. +The default kernel Bartlett kernel can be overridden with the {cmd:kernel} option.{break} +{bind:(8) {cmd:kiefer}} implements SEs and statistics for panel data +that are robust to arbitrary intra-group autocorrelation +(but {it:not} heteroskedasticity) as per Kiefer (1980). +It is equivalent to to specifying the truncated kernel with {cmd:kernel(tru)} +and {cmd:bw(}{it:#}{cmd:)} where {it:#} is the full length of the panel. + +{p}Details: + +{p}{cmd:cluster}({it:varname1 varname2}) provides 2-way cluster-robust SEs +and statistics as proposed by Cameron, Gelbach and Miller (2006) and Thompson (2009). +"Two-way cluster-robust" means the SEs and statistics +are robust to arbitrary within-group correlation in two distinct non-nested categories +defined by {it:varname1} and {it:varname2}. +A typical application would be panel data where one "category" is the panel +and the other "category" is time; +the resulting SEs are robust +to arbitrary within-panel autocorrelation (clustering on panel id) +and to arbitrary contemporaneous cross-panel correlation (clustering on time). +There is no point in using 2-way cluster-robust SEs if the categories are nested, +because the resulting SEs are equivalent to clustering on the larger category. +{it:varname1} and {it:varname2} do not have to +uniquely identify observations. +The order of {it:varname1} and {it:varname2} does not matter for the results, +but processing may be faster if the category with the larger number of categories +(typically the panel dimension) is listed first. + +{p}Cameron, Gelbach and Miller (2006) show how this approach can accommodate +multi-way clustering, where the number of different non-nested categories is arbitary. +Their Stata command {cmd:cgmreg} implements 2-way and multi-way clustering +for OLS estimation. +The two-way clustered variance-covariance estimator +is calculated using 3 different VCEs: one clustered on {it:varname1}, +the second clustered on {it:varname2}, and the third clustered on the +intersection of {it:varname1} and {it:varname2}. +Cameron et al. (2006, pp. 8-9) discuss two possible small-sample adjustments +using the number of clusters in each category. +{cmd:cgmreg} uses one method (adjusting the 3 VCEs separately based on +the number of clusters in the categories VCE clusters on); +{cmd:ivreg2} uses the second (adjusting the final 2-way cluster-robust VCE +using the smaller of the two numbers of clusters). +For this reason, {cmd:ivreg2} and {cmd:cgmreg} will produce slightly different SEs. +See also {help ivreg2##s_small:small sample corrections} below. + +{p}{cmd:ivreg2} allows a variety of options for kernel-based HAC and AC estimation. +The {cmd:bw(}{it:#}{cmd:)} option sets the bandwidth used in the estimation +and {cmd:kernel(}{it:string}{cmd:)} is the kernel used; +the default kernel is the Bartlett kernel, +also known in econometrics as Newey-West (see help {help newey}). +The full list of kernels available is (abbreviations in parentheses): +Bartlett (bar); Truncated (tru); Parzen (par); Tukey-Hanning (thann); +Tukey-Hamming (thamm); Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs). +When using the Bartlett, Parzen, or Quadratic spectral kernels, the automatic +bandwidth selection procedure of Newey and West (1994) can be chosen +by specifying {cmd:bw(}{it:auto}{cmd:)}. +{cmd:ivreg2} can also be used for kernel-based estimation +with panel data, i.e., a cross-section of time series. +Before using {cmd:ivreg2} for kernel-based estimation +of time series or panel data, +the data must be {cmd:tsset}; see help {help tsset}. + +{p}Following Driscoll and Kraay (1998), +{cmd:bw(}{it:#}{cmd:)} combined with {cmd:cluster}({it:varname}) +and applied to panel data produces SEs that are +robust to arbitary common autocorrelated disturbances. +The data must be {cmd:tsset} with the time variable specified as {it:varname}. +Driscoll-Kraay SEs also can be specified using the {cmd:dkraay(}{it:#}{cmd:)}} option, +where {it:#} is the bandwidth. +The default Bartlett kernel can be overridden with the {cmd:kernel} option. +Note that the Driscoll-Kraay variance-covariance estimator is a large-T estimator, +i.e., the panel should have a long-ish time-series dimension. + +{p}Used with 2-way clustering as per Thompson (2009), +{cmd:bw(}{it:#}{cmd:)} combined with {cmd:cluster}({it:varname}) +provides SEs and statistics that are robust +to autocorrelated within-panel disturbances (clustering on panel id) +and to autocorrelated across-panel disturbances (clustering on time +combined with kernel-based HAC). +The approach proposed by Thompson (2009) can be implemented in {cmd:ivreg2} +by choosing the truncated kernel {cmd:kernel(}{it:tru}{cmd:)} +and {cmd:bw(}{it:#}{cmd:)}, where the researcher knows or assumes +that the common autocorrelated disturbances can be ignored after {it:#} periods. + +{p}{cmd:Important:} Users should be aware of the asymptotic requirements +for the consistency of the chosen VCE. +In particular: consistency of the 1-way cluster-robust VCE requires +the number of clusters to go off to infinity; +consistency of the 2-way cluster-robust VCE requires the numbers of +clusters in both categories to go off to infinity; +consistency of kernel-robust VCEs requires the numbers of +observations in the time dimension to go off to infinity. +See Angrist and Pischke (2009), Cameron et al. (2006) and Thompson (2009) +for detailed discussions of the performance of the cluster-robust VCE +when the numbers of clusters is small. + +{marker s_gmm}{dlgtab:GMM estimation} + +{p}When combined with the above options, the {cmd:gmm2s} option generates +efficient estimates of the coefficients as well as consistent +estimates of the standard errors. +The {cmd:gmm2s} option implements the two-step efficient +generalized method of moments (GMM) estimator. +The efficient GMM estimator minimizes the GMM criterion function +J=N*g'*W*g, where N is the sample size, +g are the orthogonality or moment conditions +(specifying that all the exogenous variables, or instruments, +in the equation are uncorrelated with the error term) +and W is a weighting matrix. +In two-step efficient GMM, the efficient or optimal weighting matrix +is the inverse of an estimate of the covariance matrix of orthogonality conditions. +The efficiency gains of this estimator relative to the +traditional IV/2SLS estimator derive from the use of the optimal +weighting matrix, the overidentifying restrictions of the model, +and the relaxation of the i.i.d. assumption. +For an exactly-identified model, +the efficient GMM and traditional IV/2SLS estimators coincide, +and under the assumptions of conditional homoskedasticity and independence, +the efficient GMM estimator is the traditional IV/2SLS estimator. +For further details, see Hayashi (2000), pp. 206-13 and 226-27. + +{p}The {cmd:center} option specifies that the moments in the GMM weighting matrix +are centered so that they have mean zero. +There is some evidence that the use of centered moments leads to better +finite-sample performance; see e.g. Hall (2005), pp. 131-8 and 145-8. + +{p}The {cmd:wmatrix} option allows the user to specify a weighting matrix +rather than computing the optimal weighting matrix. +Estimation with the {cmd:wmatrix} option yields a possibly inefficient GMM estimator. +{cmd:ivreg2} will use this inefficient estimator as the first-step GMM estimator +in two-step efficient GMM when combined with the {cmd:gmm2s} option; +otherwise, {cmd:ivreg2} reports the regression results +using this inefficient GMM estimator. + +{p}The {cmd:smatrix} option allows the user to directly +specify the matrix S, the covariance matrix of orthogonality conditions. +{cmd:ivreg2} will use this matrix in the calculation of the variance-covariance +matrix of the estimator, the J statistic, +and, if the {cmd:gmm2s} option is specified, +the two-step efficient GMM coefficients. +The {cmd:smatrix} can be useful for guaranteeing a positive test statistic +in user-specified "GMM-distance tests" (see {help ivreg2##s_endog:below}). +For further details, see Hayashi (2000), pp. 220-24. + +{marker s_liml}{dlgtab:LIML, k-class and GMM-CUE estimation} + +{marker liml}{p} Maximum-likelihood estimation of a single equation of this form +(endogenous RHS variables and excluded instruments) +is known as limited-information maximum likelihood or LIML. +The overidentifying restrictions test +reported after LIML estimation is the Anderson-Rubin (1950) overidentification +statistic in a homoskedastic context. +LIML, OLS and IV/2SLS are examples of k-class estimators. +LIML is a k-class estimator with k=the LIML eigenvalue lambda; +2SLS is a k-class estimator with k=1; +OLS is a k-class esimator with k=0. +Estimators based on other values of k have been proposed. +Fuller's modified LIML (available with the {cmd:fuller(}{it:#}{cmd:)} option) +sets k = lambda - alpha/(N-L), where lambda is the LIML eigenvalue, +L = number of instruments (L1 excluded and L2 included), +and the Fuller parameter alpha is a user-specified positive constant. +Nagar's bias-adjusted 2SLS estimator can be obtained with the +{cmd:kclass(}{it:#}{cmd:)} option by setting +k = 1 + (L-K)/N, where L-K = number of overidentifying restrictions, +K = number of regressors (K1 endogenous and K2=L2 exogenous) +and N = the sample size. +For a discussion of LIML and k-class estimators, +see Davidson and MacKinnon (1993, pp. 644-51). + +{p} The GMM generalization of the LIML estimator +to the case of possibly heteroskedastic +and autocorrelated disturbances +is the "continuously-updated" GMM estimator or CUE +of Hansen, Heaton and Yaron (1996). +The CUE estimator directly maximizes the GMM objective function +J=N*g'*W(b_cue)*g, where W(b_cue) is an optimal weighting matrix +that depends on the estimated coefficients b_cue. +{cmd:cue}, combined with {cmd:robust}, {cmd:cluster}, and/or {cmd:bw}, +generates coefficient estimates that are efficient in the presence +of the corresponding deviations from homoskedasticity. +Specifying {cmd:cue} with no other options +is equivalent to the combination of the options {cmd:liml} and {cmd:coviv}. +The CUE estimator requires numerical optimization methods, +and the implementation here uses Mata's {cmd:optimize} routine. +The starting values are either IV or two-step efficient GMM +coefficient estimates. +If the user wants to evaluate the CUE objective function at +an arbitrary user-defined coefficient vector instead of having {cmd:ivreg2} +find the coefficient vector that minimizes the objective function, +the {cmd:b0(}{it:matrix}{cmd:)} option can be used. +The value of the CUE objective function at {cmd:b0} +is the Sargan or Hansen J statistic reported in the output. + +{marker s_sumopt}{dlgtab:Summary of robust, HAC, AC, GMM, LIML and CUE options} + + + +Estimator {col 20}No VCE option specificed {col 65}VCE option + option {col 60}{cmd:robust}, {cmd:cluster}, {cmd:bw}, {cmd:kernel} +{hline} +(none){col 15}IV/2SLS{col 60}IV/2SLS with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:liml}{col 15}LIML{col 60}LIML with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:gmm2s}{col 15}IV/2SLS{col 60}Two-step GMM with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:cue}{col 15}LIML{col 60}CUE GMM with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:kclass}{col 15}k-class estimator{col 60}k-class estimator with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:wmatrix}{col 15}Possibly inefficient GMM{col 60}Ineff GMM with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:gmm2s} + {col 15}Two-step GMM{col 60}Two-step GMM with +{cmd:wmatrix}{col 15}with user-specified first step{col 60}robust SEs +{col 15}SEs consistent under homoskedasticity + + +{p}With the {cmd:bw} or {cmd:bw} and {cmd:kernel} VCE options, +SEs are autocorrelation-robust (AC). +Combining the {cmd:robust} option with {cmd:bw}, SEs are heteroskedasticity- and +autocorrelation-robust (HAC). + +{p}For further details, see Hayashi (2000), pp. 206-13 and 226-27 +(on GMM estimation), Wooldridge (2002), p. 193 (on cluster-robust GMM), +and Hayashi (2000), pp. 406-10 or Cushing and McGarvey (1999) +(on kernel-based covariance estimation). + +{marker s_overid}{marker overidtests}{dlgtab:Testing overidentifying restrictions} + +{p}The Sargan-Hansen test is a test of overidentifying restrictions. +The joint null hypothesis is that the instruments are valid +instruments, i.e., uncorrelated with the error term, +and that the excluded instruments are correctly excluded from the estimated equation. +Under the null, the test statistic is distributed as chi-squared +in the number of (L-K) overidentifying restrictions. +A rejection casts doubt on the validity of the instruments. +For the efficient GMM estimator, the test statistic is +Hansen's J statistic, the minimized value of the GMM criterion function. +For the 2SLS estimator, the test statistic is Sargan's statistic, +typically calculated as N*R-squared from a regression of the IV residuals +on the full set of instruments. +Under the assumption of conditional homoskedasticity, +Hansen's J statistic becomes Sargan's statistic. +The J statistic is consistent in the presence of heteroskedasticity +and (for HAC-consistent estimation) autocorrelation; +Sargan's statistic is consistent if the disturbance is homoskedastic +and (for AC-consistent estimation) if it is also autocorrelated. +With {cmd:robust}, {cmd:bw} and/or {cmd:cluster}, +Hansen's J statistic is reported. +In the latter case the statistic allows observations +to be correlated within groups. +For further discussion see e.g. Hayashi (2000, pp. 227-8, 407, 417). + +{p}The Sargan statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg2} by the command {cmd:overid}. +The features of {cmd:ivreg2} that are unavailable in {cmd:overid} +are the J statistic and the C statistic; +the {cmd:overid} options unavailable in {cmd:ivreg2} +are various small-sample and pseudo-F versions of Sargan's statistic +and its close relative, Basmann's statistic. +See help {help overid} (if installed). + +{marker s_endog}{dlgtab:Testing subsets of regressors and instruments for endogeneity} + +{marker ctest}{p}The C statistic +(also known as a "GMM distance" +or "difference-in-Sargan" statistic) +implemented using the {cmd:orthog} option, +allows a test of a subset of the orthogonality conditions, i.e., +it is a test of the exogeneity of one or more instruments. +It is defined as +the difference of the Sargan-Hansen statistic +of the equation with the smaller set of instruments +(valid under both the null and alternative hypotheses) +and the equation with the full set of instruments, +i.e., including the instruments whose validity is suspect. +Under the null hypothesis that +both the smaller set of instruments +and the additional, suspect instruments are valid, +the C statistic is distributed as chi-squared +in the number of instruments tested. +Note that failure to reject the null hypothesis +requires that the full set of orthogonality conditions be valid; +the C statistic and the Sargan-Hansen test statistics +for the equations with both the smaller and full set of instruments +should all be small. +The instruments tested may be either excluded or included exogenous variables. +If excluded exogenous variables are being tested, +the equation that does not use these orthogonality conditions +omits the suspect instruments from the excluded instruments. +If included exogenous variables are being tested, +the equation that does not use these orthogonality conditions +treats the suspect instruments as included endogenous variables. +To guarantee that the C statistic is non-negative in finite samples, +the estimated covariance matrix of the full set orthogonality conditions +is used to calculate both Sargan-Hansen statistics +(in the case of simple IV/2SLS, this amounts to using the MSE +from the unrestricted equation to calculate both Sargan statistics). +If estimation is by LIML, the C statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For further discussion, see Hayashi (2000), pp. 218-22 and pp. 232-34. + +{marker endogtest}{p}Endogeneity tests of one or more endogenous regressors +can implemented using the {cmd:endog} option. +Under the null hypothesis that the specified endogenous regressors +can actually be treated as exogenous, the test statistic is distributed +as chi-squared with degrees of freedom equal to the number of regressors tested. +The endogeneity test implemented by {cmd:ivreg2}, is, like the C statistic, +defined as the difference of two Sargan-Hansen statistics: +one for the equation with the smaller set of instruments, +where the suspect regressor(s) are treated as endogenous, +and one for the equation with the larger set of instruments, +where the suspect regressors are treated as exogenous. +Also like the C statistic, the estimated covariance matrix used +guarantees a non-negative test statistic. +Under conditional homoskedasticity, +this endogeneity test statistic is numerically equal to +a Hausman test statistic; see Hayashi (2000, pp. 233-34). +The endogeneity test statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg2} by the command {cmd:ivendog}. +Unlike the Durbin-Wu-Hausman tests reported by {cmd:ivendog}, +the {cmd:endog} option of {cmd:ivreg2} can report test statistics +that are robust to various violations of conditional homoskedasticity; +the {cmd:ivendog} option unavailable in {cmd:ivreg2} +is the Wu-Hausman F-test version of the endogeneity test. +See help {help ivendog} (if installed). + +{marker s_relevance}{dlgtab:Tests of under- and weak identification} + +{marker idtest}{p}{cmd:ivreg2} automatically reports tests of +both underidentification and weak identification. +The underidentification test is an LM test of whether the equation is identified, +i.e., that the excluded instruments are "relevant", +meaning correlated with the endogenous regressors. +The test is essentially the test of the rank of a matrix: +under the null hypothesis that the equation is underidentified, +the matrix of reduced form coefficients on the L1 excluded instruments +has rank=K1-1 where K1=number of endogenous regressors. +Under the null, +the statistic is distributed as chi-squared +with degrees of freedom=(L1-K1+1). +A rejection of the null indicates that the matrix is full column rank, +i.e., the model is identified. + +{p}For a test of whether a particular endogenous regressor alone is identified, +see the discussion {help ivreg2##swstats:below} of the +Sanderson-Windmeijer (2015) and Angrist-Pischke (2009) procedures. + +{p}When errors are assumed to be i.i.d., +{cmd:ivreg2} automatically reports an LM version of +the Anderson (1951) canonical correlations test. +Denoting the minimum eigenvalue of the canonical correlations as CCEV, +the smallest canonical correlation between the K1 endogenous regressors +and the L1 excluded instruments +(after partialling out the K2=L2 exogenous regressors) +is sqrt(CCEV), +and the Anderson LM test statistic is N*CCEV, +i.e., N times the square of the smallest canonical correlation. +With the {cmd:first} or {cmd:ffirst} options, +{cmd:ivreg2} also reports the closely-related +Cragg-Donald (1993) Wald test statistic. +Again assuming i.i.d. errors, +and denoting the minimum eigenvalue of the Cragg-Donald statistic as CDEV, +CDEV=CCEV/(1-CCEV), +and the Cragg-Donald Wald statistic is N*CDEV. +Like the Anderson LM statistic, the Cragg-Donald Wald statistic +is distributed as chi-squred with (L1-K1+1) degrees of freedom. +Note that a result of rejection of the null +should be treated with caution, +because weak instrument problems may still be present. +See Hall et al. (1996) for a discussion of this test, +and below for discussion of testing for the presence of weak instruments. + +{p}When the i.i.d. assumption is dropped +and {cmd:ivreg2} reports heteroskedastic, AC, HAC +or cluster-robust statistics, +the Anderson LM and Cragg-Donald Wald statistics are no longer valid. +In these cases, {cmd:ivreg2} reports the LM and Wald versions +of the Kleibergen-Paap (2006) rk statistic, +also distributed as chi-squared with (L1-K1+1) degrees of freedom. +The rk statistic can be seen as a generalization of these tests +to the case of non-i.i.d. errors; +see Kleibergen and Paap (2006) for discussion, +and Kleibergen and Schaffer (2007) for a Stata implementation, {cmd:ranktest}. +{cmd:ivreg2} requires {cmd:ranktest} to be installed, +and will prompt the user to install it if necessary. +If {cmd:ivreg2} is invoked with the {cmd:robust} option, +the rk underidentification test statistics will be heteroskedastic-robust, +and similarly with {cmd:bw} and {cmd:cluster}. + +{marker widtest}{p}"Weak identification" arises when the excluded instruments are correlated +with the endogenous regressors, but only weakly. +Estimators can perform poorly when instruments are weak, +and different estimators are more robust to weak instruments (e.g., LIML) +than others (e.g., IV); +see, e.g., Stock and Yogo (2002, 2005) for further discussion. +When errors are assumed to be i.i.d., +the test for weak identification automatically reported +by {cmd:ivreg2} is an F version of the Cragg-Donald Wald statistic, (N-L)/L1*CDEV, +where L is the number of instruments and L1 is the number of excluded instruments. +Stock and Yogo (2005) have compiled critical values +for the Cragg-Donald F statistic for +several different estimators (IV, LIML, Fuller-LIML), +several different definitions of "perform poorly" (based on bias and test size), +and a range of configurations (up to 100 excluded instruments +and up to 2 or 3 endogenous regressors, +depending on the estimator). +{cmd:ivreg2} will report the Stock-Yogo critical values +if these are available; +missing values mean that the critical values +haven't been tabulated or aren't applicable. +See Stock and Yogo (2002, 2005) for details. + +{p}When the i.i.d. assumption is dropped +and {cmd:ivreg2} is invoked with the {cmd:robust}, {cmd:bw} or {cmd:cluster} options, +the Cragg-Donald-based weak instruments test is no longer valid. +{cmd:ivreg2} instead reports a correspondingly-robust +Kleibergen-Paap Wald rk F statistic. +The degrees of freedom adjustment for the rk statistic is (N-L)/L1, +as with the Cragg-Donald F statistic, +except in the cluster-robust case, +when the adjustment is N/(N-1) * (N_clust-1)/N_clust, +following the standard Stata small-sample adjustment for cluster-robust. In the case of two-way clustering, N_clust is the minimum of N_clust1 and N_clust2. +The critical values reported by {cmd:ivreg2} for the Kleibergen-Paap statistic +are the Stock-Yogo critical values for the Cragg-Donald i.i.d. case. +The critical values reported with 2-step GMM +are the Stock-Yogo IV critical values, +and the critical values reported with CUE +are the LIML critical values. + +{marker s_redundancy}{dlgtab:Testing instrument redundancy} + +{marker redtest}{p}The {cmd:redundant} option allows a test of +whether a subset of excluded instruments is "redundant". +Excluded instruments are redundant if the asymptotic efficiency +of the estimation is not improved by using them. +Breusch et al. (1999) show that the condition for the redundancy of a set of instruments +can be stated in several equivalent ways: +e.g., in the reduced form regressions of the endogenous regressors +on the full set of instruments, +the redundant instruments have statistically insignificant coefficients; +or the partial correlations between the endogenous regressors +and the instruments in question are zero. +{cmd:ivreg2} uses a formulation based on testing the rank +of the matrix cross-product between the endogenous regressors +and the possibly-redundant instruments after both have +all other instruments partialled-out; +{cmd:ranktest} is used to test whether the matrix has zero rank. +The test statistic is an LM test +and numerically equivalent to a regression-based LM test. +Under the null that the specified instruments are redundant, +the statistic is distributed as chi-squared +with degrees of freedom=(#endogenous regressors)*(#instruments tested). +Rejection of the null indicates that +the instruments are not redundant. +When the i.i.d. assumption is dropped +and {cmd:ivreg2} reports heteroskedastic, AC, HAC +or cluster-robust statistics, +the redundancy test statistic is similarly robust. +See Baum et al. (2007) for further discussion. + +{p}Calculation and reporting of all underidentification +and weak identification statistics +can be supressed with the {cmd:noid} option. + +{marker s_first}{dlgtab:First-stage regressions, identification, and weak-id-robust inference} + +{p}The {cmd:first}, {cmd:sfirst} and {cmd:ffirst} options report +various first-stage results and identification statistics. +The {cmd:first} option reports the individual first-stage regressions separately. +The {cmd:sfirst} option reports all the first-stage regressions jointly +in a single estimation table along with the reduced form equation +for the dependent variable (see {help ivreg2##s_rf:below}); +the output is similar in appearance and usage (e.g., in testing) +as that generated by Stata's {cmd:mvreg}. + +{marker swstats}{p}Tests of both underidentification and weak identification are reported +for each endogenous regressor separately, +using the method of Sanderson-Windmeijer (2015) +(a modification and improvement of the described by +Angrist and Pischke (2009), pp. 217-18, and implemented +in previous versions of {cmd:ivreg2}; +the AP test statistics remain available in the {cmd:e(first) matrix}). + +{p}The Sanderson-Windmeijer (SW) first-stage chi-squared and F statistics +are tests of underidentification and weak identification, respectively, +of individual endogenous regressors. +They are constructed by "partialling-out" linear projections of the +remaining endogenous regressors. +The SW chi-squared Wald statistic is distributed as chi2(L1-K1+1)) +under the null that the particular endogenous regressor +in question is unidentified. +In the special case of a single endogenous regressor, +the SW statistic reported is identical to underidentification statistics reported +in the {cmd:ffirst} output, +namely the Cragg-Donald Wald statistic (if i.i.d.) +or the Kleibergen-Paap rk Wald statistic (if robust, cluster-robust, AC or HAC +statistics have been requested); +see {help ivreg2##idtest:above}. +The SW first-stage F statistic is the F form of the same test statistic. +It can be used as a diagnostic for whether a particular endogenous regressor +is "weakly identified" (see {help ivreg2##widtest:above}). +For further details and discussion, see Sanderson and Windmeijer (2015). + +{p}The first-stage results are always reported with small-sample statistics, +to be consistent with the recommended use of the first-stage F-test as a diagnostic. +If the estimated equation is reported with robust standard errors, +the first-stage F-test is also robust. + +{p}A full set of first-stage statistics for each of the K1 endogenous regressors +is saved in the matrix e(first). +These include (a) the SW and AP F and chi-squared statistics; (b) the "partial R-squared" +(squared partial correlation) corresponding to the SW and SP statistics; +(c) Shea's (1997) partial R-squared measure (closely related to the SW and AP statistics, +but not amenable to formal testing); (d) the simple F and partial R-squared +statistics for each of the first-stage equations, +with no adjustments if there is more than one endogenous regressor. +In the special case of a single endogenous regressor, +these F statistics and partial R-squareds are identical. + +{marker wirobust}{p}The first-stage output also includes +two statistics that provide weak-instrument robust inference +for testing the significance of the endogenous regressors in the structural equation being estimated. +The first statistic is the Anderson-Rubin (1949) test +(not to be confused with the Anderson-Rubin overidentification test for LIML estimation; +see {help ivreg2##s_liml:above}). +The second is the closely related Stock-Wright (2000) S statistic. +The null hypothesis tested in both cases is that +the coefficients of the endogenous regressors in the structural equation are jointly equal to zero, +and, in addition, that the overidentifying restrictions are valid. +Both tests are robust to the presence of weak instruments. +The tests are equivalent to estimating the reduced form of the equation +(with the full set of instruments as regressors) +and testing that the coefficients of the excluded instruments are jointly equal to zero. +In the form reported by {cmd:ivreg2},the Anderson-Rubin statistic is a Wald test +and the Stock-Wright S statistic is an LM test. +Both statistics are distributed as chi-squared with L1 degrees of freedom, +where L1=number of excluded instruments. +The traditional F-stat version of the Anderson-Rubin test is also reported. +See Stock and Watson (2000), Dufour (2003), Chernozhukov and Hansen (2005) and Kleibergen (2007) +for further discussion. +For related alternative test statistics that are also robust to weak instruments, +see {help condivreg} and {help weakiv}, +and the corresponding discussions +in Moreira and Poi (2003) and Mikusheva and Poi (2006), +and in Finlay and Magnusson (2009), respectively. + +{p}The {cmd:savefirst} option requests that the individual first-stage regressions +be saved for later access using the {cmd:estimates} command. +If saved, they can also be displayed using {cmd:first} or {cmd:ffirst} and the {cmd:ivreg2} replay syntax. +The regressions are saved with the prefix "_ivreg2_", +unless the user specifies an alternative prefix with the +{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} option. +The {cmd:savesfirst} and {cmdab:savesfp:refix}{cmd:(}{it:prefix}{cmd:)} options +work similarly for the {cmd:sfirst} option if the user wishes to save +the first-stage and reduced form estimations as a single estimated system. + +{marker s_rf}{dlgtab:Reduced form estimates} + +{p}The {cmd:rf} option requests that the reduced form estimation of the equation be displayed. +The {cmd:saverf} option requests that the reduced form estimation is saved +for later access using the {cmd:estimates} command. +If saved, it can also be displayed using the {cmd:rf} and the {cmd:ivreg2} replay syntax. +The regression is saved with the prefix "_ivreg2_", +unless the user specifies an alternative prefix with the +{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} option. + +{marker s_partial}{dlgtab:Partialling-out exogenous regressors} + +{marker partial}{p}The {cmd:partial(}{it:varlist}{cmd:)} option requests that +the exogenous regressors in {it:varlist} are "partialled out" +from all the other variables (other regressors and excluded instruments) in the estimation. +If the equation includes a constant, it is also automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} are not calculated. +By the Frisch-Waugh-Lovell (FWL) theorem, in IV, +two-step GMM and LIML estimation the coefficients for the remaining regressors +are the same as those that would be obtained if the variables were not partialled out. +(NB: this does not hold for CUE or GMM iterated more than two steps.) +The {cmd:partial} option is most useful when using {cmd:cluster} +and #clusters < (#exogenous regressors + #excluded instruments). +In these circumstances, +the covariance matrix of orthogonality conditions S is not of full rank, +and efficient GMM and overidentification tests are infeasible +since the optimal weighting matrix W = {bind:S^-1} +cannot be calculated. +The problem can be addressed by using {cmd:partial} +to partial out enough exogenous regressors for S to have full rank. +A similar problem arises when the regressors include a variable that is a singleton dummy, +i.e., a variable with one 1 and N-1 zeros or vice versa, +if a robust covariance matrix is requested. +The singleton dummy causes the robust covariance matrix estimator +to be less than full rank. +In this case, partialling-out the variable with the singleton dummy solves the problem. +Specifying {cmd:partial(_cons)} will cause just the constant to be partialled-out, +i.e., the equation will be estimated in deviations-from-means form. +When {cmd:ivreg2} is invoked with {cmd:partial}, +it reports test statistics with the same small-sample adjustments +as if estimating without {cmd:partial}, +with the exception of the information in the output header +(the model F, R-sqs and total sums-of-squares +refer to the model after the variables are partialled-out). +Note that after estimation using the {cmd:partial} option, +the post-estimation {cmd:predict} can be used only to generate residuals. + +{marker s_ols}{dlgtab:OLS and Heteroskedastic OLS (HOLS) estimation} + +{p}{cmd:ivreg2} also allows straightforward OLS estimation +by using the same syntax as {cmd:regress}, i.e., +{it:ivreg2 depvar varlist1}. +This can be useful if the user wishes to use one of the +features of {cmd:ivreg2} in OLS regression, e.g., AC or +HAC standard errors. + +{p}If the list of endogenous variables {it:varlist2} is empty +but the list of excluded instruments {it:varlist_iv} is not, +and the option {cmd:gmm2s} is specified, +{cmd:ivreg2} calculates Cragg's "heteroskedastic OLS" (HOLS) estimator, +an estimator that is more efficient than OLS +in the presence of heteroskedasticity of unknown form +(see Davidson and MacKinnon (1993), pp. 599-600). +If the option {cmd:bw(}{it:#}{cmd:)} is specified, +the HOLS estimator is efficient in the presence of +arbitrary autocorrelation; +if both {cmd:bw(}{it:#}{cmd:)} and {cmd:robust} are specified +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and autocorrelation; +and if {cmd:cluster(}{it:varlist}{cmd:)} is used, +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and within-group correlation. +The efficiency gains of HOLS derive from the orthogonality conditions +of the excluded instruments listed in {it:varlist_iv}. +If no endogenous variables are specified and {cmd:gmm2s} is not specified, +{cmd:ivreg2} reports standard OLS coefficients. +The Sargan-Hansen statistic reported +when the list of endogenous variables {it:varlist2} is empty +is a Lagrange multiplier (LM) test +of the hypothesis that the excluded instruments {it:varlist_iv} are +correctly excluded from the restricted model. +If the estimation is LIML, the LM statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For more on LM tests, see e.g. Wooldridge (2002), pp. 58-60. +Note that because the approach of the HOLS estimator +has applications beyond heteroskedastic disturbances, +and to avoid confusion concerning the robustness of the estimates, +the estimators presented above as "HOLS" +are described in the output of {cmd:ivreg2} +as "2-Step GMM", "CUE", etc., as appropriate. + +{marker s_collin}{dlgtab:Collinearities} + +{p}{cmd:ivreg2} checks the lists of included instruments, +excluded instruments, and endogenous regressors +for collinearities and duplicates. If an endogenous regressor is +collinear with the instruments, it is reclassified as exogenous. If any +endogenous regressors are collinear with each other, some are dropped. +If there are any collinearities among the instruments, some are dropped; +excluded instruments are dropped before included instruments. +If any variables are dropped, a list of their names are saved +in the macros {cmd:e(collin)} and/or {cmd:e(dups)}. + +{p}Starting with {cmd:ivreg2} v4.1, +the Stata 11+ convention is followed and +omitted variables are reported in the regression output +and saved in the {cmd:e(b)} and {cmd:e(V)} macros. +These omitted variables, as well as other omitted variables +(e.g., empty factor variables) can be suppressed +by use of the {cmd:bvclean} option. +The Stata display options +{cmd:noomitted}, {cmd:vsquish}, {cmd:noemptycells}, {cmd:baselevels} and {cmd:allbaselevels} +are also supported; see {helpb ereturn##display_options:ereturn}. +Variable lists with collinear variables, duplicates marked with Stata's "o." operator, +and factor variable base variables +are saved in macros with a "0" appended to the corresponding macro names; +lists with these variables removed are saved in macros with a "1" appended. + +{p}Collinearity checks can be supressed with the {cmd:nocollin} option. + +{marker s_speed}{dlgtab:Speed options: nocollin and noid} + +{p}Two options are available for speeding execution. +{cmd:nocollin} specifies that the collinearity checks not be performed. +{cmd:noid} suspends calculation and reporting of +the underidentification and weak identification statistics +in the main output. + +{marker s_small}{dlgtab:Small sample corrections} + +{p}Mean square error = sqrt(RSS/(N-K)) if {cmd:small}, = sqrt(RSS/N) otherwise. + +{p}If {cmd:robust} is chosen, the finite sample adjustment +(see {hi:[R] regress}) to the robust variance-covariance matrix +qc = N/(N-K) if {cmd:small}, qc = 1 otherwise. + +{p}If {cmd:cluster} is chosen, the finite sample adjustment +qc = (N-1)/(N-K)*M/(M-1) if {cmd:small}, where M=number of clusters, +qc = 1 otherwise. +If 2-way clustering is used, M=min(M1,M2), +where M1=number of clusters in group 1 +and M2=number of clusters in group 2. + +{p}If the {cmd:partial(}{it:varlist}{cmd:)} option is used, +the partialled-out exogenous regressors are included in K. + +{p}The Sargan and C (difference-in-Sargan) statistics use +error variance = RSS/N, i.e., there is no small sample correction. + +{p}A full discussion of these computations and related topics +can be found in Baum, Schaffer, and Stillman (2003) and Baum, Schaffer and +Stillman (2007). Some features of the program postdate the former article and are described in the latter paper. +Some features, such as two-way clustering, postdate the latter article as well. + + +{marker s_options}{title:Options summary} + +{p 0 4}{cmd:gmm2s} requests the two-step efficient GMM estimator. +If no endogenous variables are specified, the estimator is Cragg's HOLS estimator. + +{p 0 4}{cmd:liml} requests the limited-information maximum likelihood estimator. + +{p 0 4}{cmd:fuller(}{it:#}{cmd:)} specifies that Fuller's modified LIML estimator +is calculated using the user-supplied Fuller parameter alpha, +a non-negative number. +Alpha=1 has been suggested as a good choice. + +{p 0 4}{cmd:kclass(}{it:#}{cmd:)} specifies that a general k-class estimator is calculated +using the user-supplied #, a non-negative number. + +{p 0 4}{cmd:coviv} specifies that the matrix used to calculate the +covariance matrix for the LIML or k-class estimator +is based on the 2SLS matrix, i.e., with k=1. +In this case the covariance matrix will differ from that calculated for the 2SLS +estimator only because the estimate of the error variance will differ. +The default is for the covariance matrix to be based on the LIML or k-class matrix. + +{p 0 4}{cmd:cue} requests the GMM continuously-updated estimator (CUE). + +{p 0 4}{cmd:b0(}{it:matrix}{cmd:)} specifies that the J statistic +(i.e., the value of the CUE objective function) +should be calculated for an arbitrary coefficient vector {cmd:b0}. +That vector must be provided as a matrix with appropriate row and column names. +Under- and weak-identification statistics are not reported +in the output. + +{p 0 4}{cmd:robust} specifies that the Eicker/Huber/White/sandwich estimator of +variance is to be used in place of the traditional calculation. {cmd:robust} +combined with {cmd:cluster()} further allows residuals which are not +independent within cluster (although they must be independent between +clusters). See {hi:[U] Obtaining robust variance estimates}. + +{p 0 4}{cmd:cluster}{cmd:(}{it:varlist}{cmd:)} specifies that the observations +are independent across groups (clusters) but not necessarily independent +within groups. +With 1-way clustering, {cmd:cluster}{cmd:(}{it:varname}{cmd:)} +specifies to which group each observation +belongs; e.g., {cmd:cluster(personid)} in data with repeated observations on +individuals. +With 2-way clustering, {cmd:cluster}{cmd:(}{it:varname1 varname2}{cmd:)} +specifies the two (non-nested) groups to which each observation belongs. +Specifying {cmd:cluster()} implies {cmd:robust}. + +{p 0 4}{cmd:bw(}{it:#}{cmd:)} impements AC or HAC covariance estimation +with bandwidth equal to {it:#}, where {it:#} is an integer greater than zero. +Specifying {cmd:robust} implements HAC covariance estimation; +omitting it implements AC covariance estimation. +If the Bartlett (default), Parzen or Quadratic Spectral kernels are selected, +the value {cmd:auto} may be given (rather than an integer) +to invoke Newey and West's (1994) automatic bandwidth selection procedure. + +{p 0 4}{cmd:kernel(}{it:string)}{cmd:)} specifies the kernel +to be used for AC and HAC covariance estimation; +the default kernel is Bartlett (also known in econometrics +as Newey-West). +The full list of kernels available is (abbreviations in parentheses): +Bartlett (bar); Truncated (tru); Parzen (par); Tukey-Hanning (thann); +Tukey-Hamming (thamm); Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs). + +{p 4 4}Note: in the cases of the Bartlett, Parzen, +and Tukey-Hanning/Hamming kernels, the number of lags used +to construct the kernel estimate equals the bandwidth minus one. +Stata's official {cmd:newey} implements +HAC standard errors based on the Bartlett kernel, +and requires the user to specify +the maximum number of lags used and not the bandwidth; +see help {help newey}. +If these kernels are used with {cmd:bw(1)}, +no lags are used and {cmd:ivreg2} will report the usual +Eicker/Huber/White/sandwich variance estimates. + +{p 0 4}{cmd:center} specifies that the moments used to construct +the efficient GMM weighting matrix are centered. +If used with an inefficient 1-step estimator, +the estimated coefficients and their standard errors are unaffected but +centered moments will be used in the reported Hansen J statistic. + +{p 0 4}{cmd:wmatrix(}{it:matrix}{cmd:)} specifies a user-supplied weighting matrix +in place of the computed optimal weighting matrix. +The matrix must be positive definite. +The user-supplied matrix must have the same row and column names +as the instrument variables in the regression model (or a subset thereof). + +{p 0 4}{cmd:smatrix(}{it:matrix}{cmd:)} specifies a user-supplied covariance matrix +of the orthogonality conditions to be used in calculating the covariance matrix of the estimator. +The matrix must be positive definite. +The user-supplied matrix must have the same row and column names +as the instrument variables in the regression model (or a subset thereof). + +{p 0 4}{cmd:orthog}{cmd:(}{it:varlist_ex}{cmd:)} requests that a C-statistic +be calculated as a test of the exogeneity of the instruments in {it:varlist_ex}. +These may be either included or excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instruments must still be identified. + +{p 0 4}{cmd:endog}{cmd:(}{it:varlist_en}{cmd:)} requests that a C-statistic +be calculated as a test of the endogeneity +of the endogenous regressors in {it:varlist_en}. + +{p 0 4}{cmd:redundant}{cmd:(}{it:varlist_ex}{cmd:)} requests an LM test +of the redundancy of the instruments in {it:varlist_ex}. +These must be excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instrumenst must still be identified. + +{p 0 4}{cmd:small} requests that small-sample statistics (F and t-statistics) +be reported instead of large-sample statistics (chi-squared and z-statistics). +Large-sample statistics are the default. +The exception is the statistic for the significance of the regression, +which is always reported as a small-sample F statistic. + +{p 0 4}{cmd:noconstant} suppresses the constant term (intercept) in the +regression. If {cmd:noconstant} is specified, the constant term is excluded +from both the final regression and the first-stage regression. To include a +constant in the first-stage when {cmd:noconstant} is specified, explicitly +include a variable containing all 1's in {it:varlist_iv}. + +{p 0 4}{cmd:first} requests that the full first-stage regression results be displayed, +along with the associated diagnostic and identification statistics. + +{p 0 4}{cmd:sfirst} requests that the first-stage and reduced form regressions +are reported as a single system of equations (i.e., in a single regression output table). + +{p 0 4}{cmd:ffirst} requests the first-stage diagnostic and identification statistics. +The results are saved in various e() macros. + +{p 0 4}{cmd:nocollin} suppresses the checks for collinearities +and duplicate variables. + +{p 0 4}{cmd:noid} suppresses the calculation and reporting +of underidentification and weak identification statistics. + +{p 0 4}{cmd:savefirst} requests that the first-stage regressions results +are saved for later access using the {cmd:estimates} command. +The names under which the first-stage regressions are saved +are the names of the endogenous regressors prefixed by "_ivreg2_". +If these use Stata's time-series operators, +the "." is replaced by a "_". + +{p 0 4}{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the first-stage regression results be saved using the user-specified prefix +instead of the default "_ivreg2_". + +{p 0 4}{cmd:rf} requests that the reduced-form estimation of the equation +be displayed. + +{p 0 4}{cmd:saverf} requests that the reduced-form estimation of the equation +be saved for later access using the {cmd:estimates} command. +The estimation is stored under the name of the dependent variable +prefixed by "_ivreg2_". +If this uses Stata's time-series operators, +the "." is replaced by a "_". + +{p 0 4}{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the reduced-form estimation be saved using the user-specified prefix +instead of the default "_ivreg2_". + +{p 0 4}{cmd:sfirst} requests that the first-stage and reduced form equations +are estimated and displayed as a single system of equations. + +{p 0 4}{cmd:savesfirst} requests that the system of first-stage +and reduced form estimations be saved for later access +using the {cmd:estimates} command. +The estimation is stored under the name of the dependent variable +prefixed by "_ivreg2_sfirst_". +If this uses Stata's time-series operators, +the "." is replaced by a "_". + +{p 0 4}{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the reduced-form estimation be saved using the user-specified prefix +instead of the default "_ivreg2_sfirst_". + +{p 0 4}{cmd:partial(}{it:varlist}{cmd:)} requests that +the exogenous regressors in {it:varlist} be partialled out +from the other variables in the equation. +If the equation includes a constant, +it is automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} +are not calculated. + +{p 0 4}{cmd:level(}{it:#}{cmd:)} specifies the confidence level, in percent, +for confidence intervals of the coefficients; see help {help level}. + +{p 0 4}{cmd:bvclean} specifies that omitted variables (including factor base variables) +are not reported in the estimation output and are not saved in the +{cmd:e(b)} and {cmd:e(V)} macros. + +{p 0 4}{cmd:noheader}, {cmd:eform()}, {cmd:depname()} and {cmd:plus} +are for ado-file writers; see {hi:[R] ivreg} and {hi:[R] regress}. + +{p 0 4}{cmd:nofooter} suppresses the display of the footer containing +identification and overidentification statistics, +exogeneity and endogeneity tests, +lists of endogenous variables and instruments, etc. + +{p 0 4}{cmd:version} causes {cmd:ivreg2} to display its current version number +and to leave it in the macro {cmd:e(version)}. +It cannot be used with any other options. +and will clear any existing {cmd:e()} saved results. + + +{marker s_versions}{title:Running ivreg2 under earlier versions of Stata} + +{p}The most-up-to-date implementation of {cmd:ivreg2} requires Stata version 11 or later. +If {cmd:ivreg2} is called under earlier versions of Stata, +it will automatically run a legacy version {cmd:ivreg2x}, +where "x" denotes the required Stata version. +These versions of {cmd:ivreg2} - {cmd:ivreg28}, {cmd:ivreg29} and {cmd:ivreg210} - +are self-contained and require a minimum of Stata version 8/9/10, respectively. +"Self-contained" means these legacy versions (unlike the main up-to-date {cmd:ivreg2} code) +do not require access to any external Mata library or user-written Stata routines. +These legacy versions are installed with the {cmd:ivreg2} package, +can also be called directly from the Stata command line or in do files, +and come with their own help files. + +{p}For example, if a user has Stata 8 installed and calls {cmd:ivreg2}, +it will invoke the legacy version {cmd:ivreg28}. +If a user has a later version of Stata +but wants to run the Stata 8 legacy version of {cmd:ivreg2}, +s/he can estimate either calling {cmd:ivreg28} directly +or by calling {cmd:ivreg2} under version control (i.e., "{cmd:version 8: ivreg2}"). +To see what options are/aren't available with this particular legacy version, +the user can see {helpb help ivreg28}. + + +{marker s_macros}{title:Remarks and saved results} + +{p}{cmd:ivreg2} does not report an ANOVA table. +Instead, it reports the RSS and both the centered and uncentered TSS. +It also reports both the centered and uncentered R-squared. +NB: the TSS and R-squared reported by official {cmd:ivreg} is centered +if a constant is included in the regression, and uncentered otherwise. + + +{p}{cmd:ivreg2} saves the following results in {cmd:e()}: + +Scalars +{col 4}{cmd:e(N)}{col 18}Number of observations +{col 4}{cmd:e(yy)}{col 18}Total sum of squares (SS), uncentered (y'y) +{col 4}{cmd:e(yyc)}{col 18}Total SS, centered (y'y - ((1'y)^2)/n) +{col 4}{cmd:e(rss)}{col 18}Residual SS +{col 4}{cmd:e(mss)}{col 18}Model SS =yyc-rss if the eqn has a constant, =yy-rss otherwise +{col 4}{cmd:e(df_m)}{col 18}Model degrees of freedom +{col 4}{cmd:e(df_r)}{col 18}Residual degrees of freedom +{col 4}{cmd:e(r2u)}{col 18}Uncentered R-squared, 1-rss/yy +{col 4}{cmd:e(r2c)}{col 18}Centered R-squared, 1-rss/yyc +{col 4}{cmd:e(r2)}{col 18}Centered R-squared if the eqn has a constant, uncentered otherwise +{col 4}{cmd:e(r2_a)}{col 18}Adjusted R-squared +{col 4}{cmd:e(ll)}{col 18}Log likelihood +{col 4}{cmd:e(rankxx)}{col 18}Rank of the matrix of observations on rhs variables=K +{col 4}{cmd:e(rankzz)}{col 18}Rank of the matrix of observations on instruments=L +{col 4}{cmd:e(rankV)}{col 18}Rank of covariance matrix V of coefficients +{col 4}{cmd:e(rankS)}{col 18}Rank of covariance matrix S of orthogonality conditions +{col 4}{cmd:e(rmse)}{col 18}root mean square error=sqrt(rss/(N-K)) if -small-, =sqrt(rss/N) if not +{col 4}{cmd:e(F)}{col 18}F statistic +{col 4}{cmd:e(N_clust)}{col 18}Number of clusters (or min(N_clust1,N_clust2) if 2-way clustering) +{col 4}{cmd:e(N_clust1)}{col 18}Number of clusters in dimension 1 (if 2-way clustering) +{col 4}{cmd:e(N_clust2)}{col 18}Number of clusters in dimension 2 (if 2-way clustering) +{col 4}{cmd:e(bw)}{col 18}Bandwidth +{col 4}{cmd:e(lambda)}{col 18}LIML eigenvalue +{col 4}{cmd:e(kclass)}{col 18}k in k-class estimation +{col 4}{cmd:e(fuller)}{col 18}Fuller parameter alpha +{col 4}{cmd:e(sargan)}{col 18}Sargan statistic +{col 4}{cmd:e(sarganp)}{col 18}p-value of Sargan statistic +{col 4}{cmd:e(sargandf)}{col 18}dof of Sargan statistic = degree of overidentification = L-K +{col 4}{cmd:e(j)}{col 18}Hansen J statistic +{col 4}{cmd:e(jp)}{col 18}p-value of Hansen J statistic +{col 4}{cmd:e(jdf)}{col 18}dof of Hansen J statistic = degree of overidentification = L-K +{col 4}{cmd:e(arubin)}{col 18}Anderson-Rubin overidentification LR statistic N*ln(lambda) +{col 4}{cmd:e(arubinp)}{col 18}p-value of Anderson-Rubin overidentification LR statistic +{col 4}{cmd:e(arubin_lin)}{col 18}Anderson-Rubin linearized overidentification statistic N*(lambda-1) +{col 4}{cmd:e(arubin_linp)}{col 18}p-value of Anderson-Rubin linearized overidentification statistic +{col 4}{cmd:e(arubindf)}{col 18}dof of A-R overid statistic = degree of overidentification = L-K +{col 4}{cmd:e(idstat)}{col 18}LM test statistic for underidentification (Anderson or Kleibergen-Paap) +{col 4}{cmd:e(idp)}{col 18}p-value of underidentification LM statistic +{col 4}{cmd:e(iddf)}{col 18}dof of underidentification LM statistic +{col 4}{cmd:e(widstat)}{col 18}F statistic for weak identification (Cragg-Donald or Kleibergen-Paap) +{col 4}{cmd:e(arf)}{col 18}Anderson-Rubin F-test of significance of endogenous regressors +{col 4}{cmd:e(arfp)}{col 18}p-value of Anderson-Rubin F-test of endogenous regressors +{col 4}{cmd:e(archi2)}{col 18}Anderson-Rubin chi-sq test of significance of endogenous regressors +{col 4}{cmd:e(archi2p)}{col 18}p-value of Anderson-Rubin chi-sq test of endogenous regressors +{col 4}{cmd:e(ardf)}{col 18}degrees of freedom of Anderson-Rubin tests of endogenous regressors +{col 4}{cmd:e(ardf_r)}{col 18}denominator degrees of freedom of AR F-test of endogenous regressors +{col 4}{cmd:e(redstat)}{col 18}LM statistic for instrument redundancy +{col 4}{cmd:e(redp)}{col 18}p-value of LM statistic for instrument redundancy +{col 4}{cmd:e(reddf)}{col 18}dof of LM statistic for instrument redundancy +{col 4}{cmd:e(cstat)}{col 18}GMM distance test statistic of exogeneity +{col 4}{cmd:e(cstatp)}{col 18}p-value of GMM distance test statistic of exogeneity +{col 4}{cmd:e(cstatdf)}{col 18}Degrees of freedom of GMM distance test statistic of exogeneity +{col 4}{cmd:e(estat)}{col 18}GMM distance test statistic of endogeneity +{col 4}{cmd:e(estatp)}{col 18}p-value of GMM distance test statistic of endogeneity +{col 4}{cmd:e(estatdf)}{col 18}Degrees of freedom of GMM distance test statistic of endogeneity +{col 4}{cmd:e(cons)}{col 18}1 when equation has a Stata-supplied constant; 0 otherwise +{col 4}{cmd:e(center)}{col 18}1 when moments are mean-centered; 0 otherwise +{col 4}{cmd:e(partialcons)}{col 18}as above but prior to partialling-out (see {cmd:e(partial)}) +{col 4}{cmd:e(partial_ct)}{col 18}Number of partialled-out variables (see {cmd:e(partial)}) + +Macros +{col 4}{cmd:e(cmd)}{col 18}ivreg2 +{col 4}{cmd:e(cmdline)}{col 18}Command line invoking ivreg2 +{col 4}{cmd:e(ivreg2cmd)}{col 18}Version of ivreg2 (ivreg2, ivreg28, ivreg29, etc.) +{col 4}{cmd:e(version)}{col 18}Version number of ivreg2 +{col 4}{cmd:e(model)}{col 18}ols, iv, gmm, liml, or kclass +{col 4}{cmd:e(depvar)}{col 18}Name of dependent variable +{col 4}{cmd:e(instd)}{col 18}Instrumented (RHS endogenous) variables +{col 4}{cmd:e(insts)}{col 18}Instruments +{col 4}{cmd:e(inexog)}{col 18}Included instruments (regressors) +{col 4}{cmd:e(exexog)}{col 18}Excluded instruments +{col 4}{cmd:e(collin)}{col 18}Variables dropped because of collinearities +{col 4}{cmd:e(dups)}{col 18}Duplicate variables +{col 4}{cmd:e(ecollin)}{col 18}Endogenous variables reclassified as exogenous because of +{col 20}collinearities with instruments +{col 4}{cmd:e(clist)}{col 18}Instruments tested for orthogonality +{col 4}{cmd:e(redlist)}{col 18}Instruments tested for redundancy +{col 4}{cmd:e(partial)}{col 18}Partialled-out exogenous regressors +{col 4}{cmd:e(small)}{col 18}small +{col 4}{cmd:e(wtype)}{col 18}weight type +{col 4}{cmd:e(wexp)}{col 18}weight expression +{col 4}{cmd:e(clustvar)}{col 18}Name of cluster variable +{col 4}{cmd:e(vcetype)}{col 18}Covariance estimation method +{col 4}{cmd:e(kernel)}{col 18}Kernel +{col 4}{cmd:e(tvar)}{col 18}Time variable +{col 4}{cmd:e(ivar)}{col 18}Panel variable +{col 4}{cmd:e(firsteqs)}{col 18}Names of stored first-stage equations +{col 4}{cmd:e(rfeq)}{col 18}Name of stored reduced form equation +{col 4}{cmd:e(sfirsteq)}{col 18}Name of stored system of first-stage and reduced form equations +{col 4}{cmd:e(predict)}{col 18}Program used to implement predict + +Matrices +{col 4}{cmd:e(b)}{col 18}Coefficient vector +{col 4}{cmd:e(V)}{col 18}Variance-covariance matrix of the estimators +{col 4}{cmd:e(S)}{col 18}Covariance matrix of orthogonality conditions +{col 4}{cmd:e(W)}{col 18}GMM weighting matrix (=inverse of S if efficient GMM estimator) +{col 4}{cmd:e(first)}{col 18}First-stage regression results +{col 4}{cmd:e(ccev)}{col 18}Eigenvalues corresponding to the Anderson canonical correlations test +{col 4}{cmd:e(cdev)}{col 18}Eigenvalues corresponding to the Cragg-Donald test + +Functions +{col 4}{cmd:e(sample)}{col 18}Marks estimation sample + + + +{marker s_examples}{title:Examples} + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta" : . use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta }{p_end} +{p 8 12}(Wages of Very Young Men, Zvi Griliches, J.Pol.Ec. 1976) + +{col 0}(Instrumental variables. Examples follow Hayashi 2000, p. 255.) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt)} + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), small ffirst" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), small ffirst} + +{col 0}(Testing for the presence of heteroskedasticity in IV/GMM estimation) + +{p 8 12}{stata "ivhettest, fitlev" : . ivhettest, fitlev} + +{col 0}(Two-step GMM efficient in the presence of arbitrary heteroskedasticity) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s robust" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s robust} + +{p 0}(GMM with user-specified first-step weighting matrix or matrix of orthogonality conditions) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), robust" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), robust} + +{p 8 12}{stata "predict double uhat if e(sample), resid" : . predict double uhat if e(sample), resid} + +{p 8 12}{stata "mat accum S = `e(insts)' [iw=uhat^2]" : . mat accum S = `e(insts)' [iw=uhat^2]} + +{p 8 12}{stata "mat S = 1/`e(N)' * S" : . mat S = 1/`e(N)' * S} + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s robust smatrix(S)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s robust smatrix(S)} + +{p 8 12}{stata "mat W = invsym(S)" : . mat W = invsym(S)} + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s robust wmatrix(W)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s robust wmatrix(W)} + +{p 0}(Equivalence of J statistic and Wald tests of included regressors, irrespective of instrument choice (Ahn, 1997)) + +{p 8 12}{stata "ivreg2 lw (iq=med kww age), gmm2s" : . ivreg2 lw (iq=med kww age), gmm2s} + +{p 8 12}{stata "mat S0 = e(S)" : . mat S0 = e(S)} + +{p 8 12}{stata "qui ivreg2 lw (iq=kww) med age, gmm2s smatrix(S0)" : . qui ivreg2 lw (iq=kww) med age, gmm2s smatrix(S0)} + +{p 8 12}{stata "test med age" : . test med age} + +{p 8 12}{stata "qui ivreg2 lw (iq=med) kww age, gmm2s smatrix(S0)" : . qui ivreg2 lw (iq=med) kww age, gmm2s smatrix(S0)} + +{p 8 12}{stata "test kww age" : . test kww age} + +{p 8 12}{stata "qui ivreg2 lw (iq=age) med kww, gmm2s smatrix(S0)" : . qui ivreg2 lw (iq=age) med kww, gmm2s smatrix(S0)} + +{p 8 12}{stata "test med kww" : . test med kww} + +{p 0}(Continuously-updated GMM (CUE) efficient in the presence of arbitrary heteroskedasticity. NB: may require 30+ iterations.) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), cue robust" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), cue robust} + +{col 0}(Sargan-Basmann tests of overidentifying restrictions for IV estimation) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt)} + +{p 8 12}{stata "overid, all" : . overid, all} + +{col 0}(Tests of exogeneity and endogeneity) + +{col 0}(Test the exogeneity of one regressor) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s orthog(s)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s orthog(s)} + +{col 0}(Test the exogeneity of two excluded instruments) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s orthog(age mrt)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age mrt), gmm2s orthog(age mrt)} + +{col 0}(Frisch-Waugh-Lovell (FWL): equivalence of estimations with and without partialling-out) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns i.year (iq=kww age), cluster(year)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age), cluster(year)} + +{p 8 12}{stata "ivreg2 lw s expr tenure rns i.year (iq=kww age), cluster(year) partial(i.year)" : . ivreg2 lw s expr tenure rns smsa i.year (iq=med kww age), cluster(year) partial(i.year)} + +{col 0}({cmd:partial()}: efficient GMM with #clusters<#instruments feasible after partialling-out) + +{p 8 12}{stata "ivreg2 lw s expr tenure rns i.year (iq=kww age), cluster(year) partial(i.year) gmm2s" : . ivreg2 lw s expr tenure rns smsa (iq=med kww age), cluster(year) partial(i.year) gmm2s} + +{col 0}(Examples following Wooldridge 2002, pp.59, 61) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta" : . use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta } + +{col 0}(Equivalence of DWH endogeneity test when regressor is endogenous...) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6)" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6)} + +{p 8 12}{stata "ivendog educ" :. ivendog educ} + +{col 0}(... endogeneity test using the {cmd:endog} option) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)} + +{col 0}(...and C-test of exogeneity when regressor is exogenous, using the {cmd:orthog} option) + +{p 8 12}{stata "ivreg2 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)" : . ivreg2 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)} + +{col 0}(Heteroskedastic Ordinary Least Squares, HOLS) + +{p 8 12}{stata "ivreg2 lwage exper expersq educ (=age kidslt6 kidsge6), gmm2s" : . ivreg2 lwage exper expersq educ (=age kidslt6 kidsge6), gmm2s} + +{col 0}(Equivalence of Cragg-Donald Wald F statistic and F-test from first-stage regression +{col 0}in special case of single endogenous regressor. Also illustrates {cmd:first}, {cmd:sfirst} +{col 0}and {cmd:savefirst} options.) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), first sfirst savefirst" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), first sfirst savefirst} + +{p 8 12}{stata "di e(widstat)" : . di e(widstat)} + +{p 8 12}{stata "estimates restore _ivreg2_educ" : . estimates restore _ivreg2_educ} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Equivalence of Kleibergen-Paap robust rk Wald F statistic and F-test from first-stage +{col 0}regression in special case of single endogenous regressor.) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust savefirst" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust savefirst} + +{p 8 12}{stata "di e(widstat)" : . di e(widstat)} + +{p 8 12}{stata "estimates restore _ivreg2_educ" : . estimates restore _ivreg2_educ} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Equivalence of Kleibergen-Paap robust rk LM statistic for identification and LM test +{col 0}of joint significance of excluded instruments in first-stage regression in special +{col 0}case of single endogenous regressor. Also illustrates use of {cmd:ivreg2} to perform an +{col 0}LM test in OLS estimation.) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust} + +{p 8 12}{stata "di e(idstat)" : . di e(idstat)} + +{p 8 12}{stata "ivreg2 educ exper expersq (=age kidslt6 kidsge6) if e(sample), robust" : . ivreg2 educ exper expersq (=age kidslt6 kidsge6) if e(sample), robust} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(Equivalence of an LM test of an excluded instrument for redundancy and an LM test of +{col 0}significance from first-stage regression in special case of single endogenous regressor.) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust redundant(age)" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust redundant(age)} + +{p 8 12}{stata "di e(redstat)" : . di e(redstat)} + +{p 8 12}{stata "ivreg2 educ exper expersq kidslt6 kidsge6 (=age) if e(sample), robust" : . ivreg2 educ exper expersq kidslt6 kidsge6 (=age) if e(sample), robust} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(Weak-instrument robust inference: Anderson-Rubin Wald F and chi-sq and +{col 0}Stock-Wright S statistics. Also illusrates use of {cmd:saverf} option.) + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust ffirst saverf" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust ffirst saverf} + +{p 8 12}{stata "di e(arf)" : . di e(arf)} + +{p 8 12}{stata "di e(archi2)" : . di e(archi2)} + +{p 8 12}{stata "di e(sstat)" : . di e(sstat)} + +{col 0}(Obtaining the Anderson-Rubin Wald F statistic from the reduced-form estimation) + +{p 8 12}{stata "estimates restore _ivreg2_lwage" : . estimates restore _ivreg2_lwage} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Obtaining the Anderson-Rubin Wald chi-sq statistic from the reduced-form estimation. +{col 0}Use {cmd:ivreg2} without {cmd:small} to obtain large-sample test statistic.) + +{p 8 12}{stata "ivreg2 lwage exper expersq age kidslt6 kidsge6, robust" : . ivreg2 lwage exper expersq age kidslt6 kidsge6, robust} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(chi2)" : . di r(chi2)} + +{col 0}(Obtaining the Stock-Wright S statistic as the value of the GMM CUE objective function. +{col 0}Also illustrates use of {cmd:b0} option. Coefficients on included exogenous regressors +{col 0}are OLS coefficients, which is equivalent to partialling them out before obtaining +{col 0}the value of the CUE objective function.) + +{p 8 12}{stata "mat b = 0" : . mat b = 0} + +{p 8 12}{stata "mat colnames b = educ" : . mat colnames b = educ} + +{p 8 12}{stata "qui ivreg2 lwage exper expersq" : . qui ivreg2 lwage exper expersq} + +{p 8 12}{stata "mat b = b, e(b)" : . mat b = b, e(b)} + +{p 8 12}{stata "ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust b0(b)" : . ivreg2 lwage exper expersq (educ=age kidslt6 kidsge6), robust b0(b)} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(LIML and k-class estimation using Klein data) + +{col 9}{stata "webuse klein" :. webuse klein} +{col 9}{stata "tsset yr" :. tsset yr} + +{col 0}(LIML estimates of Klein's consumption function) + +{p 8 12}{stata "ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml" :. ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml} + +{col 0}(Equivalence of LIML and CUE+homoskedasticity+independence) + +{p 8 12}{stata "ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml coviv" :. ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml coviv} + +{p 8 12}{stata "ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), cue" :. ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), cue} + +{col 0}(Fuller's modified LIML with alpha=1) + +{p 8 12}{stata "ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), fuller(1)" :. ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), fuller(1)} + +{col 0}(k-class estimation with Nagar's bias-adjusted IV, k=1+(L-K)/N=1+4/21=1.19) + +{p 8 12}{stata "ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), kclass(1.19)" :. ivreg2 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), kclass(1.19)} + +{col 0}(Kernel-based covariance estimation using time-series data) + +{col 9}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta" :. use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta} +{col 9}{stata "tsset year, yearly" :. tsset year, yearly} + +{col 0}(Autocorrelation-consistent (AC) inference in an OLS Regression) + +{p 8 12}{stata "ivreg2 cinf unem, bw(3)" :. ivreg2 cinf unem, bw(3)} + +{p 8 12}{stata "ivreg2 cinf unem, kernel(qs) bw(auto)" :. ivreg2 cinf unem, kernel(qs) bw(auto)} + +{col 0}(Heteroskedastic and autocorrelation-consistent (HAC) inference in an OLS regression) + +{p 8 12}{stata "ivreg2 cinf unem, bw(3) kernel(bartlett) robust small" :. ivreg2 cinf unem, bw(3) kernel(bartlett) robust small} + +{p 8 12}{stata "newey cinf unem, lag(2)" :. newey cinf unem, lag(2)} + +{col 0}(AC and HAC in IV and GMM estimation) + +{p 8 12}{stata "ivreg2 cinf (unem = l(1/3).unem), bw(3)" :. ivreg2 cinf (unem = l(1/3).unem), bw(3)} + +{p 8 12}{stata "ivreg2 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(thann)" :. ivreg2 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(thann)} + +{p 8 12}{stata "ivreg2 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(qs) robust orthog(l1.unem)" :. ivreg2 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(qs) robust orthog(l1.unem)} + +{col 0}(Examples using Large N, Small T Panel Data) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta" : . use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta }{p_end} +{p 8 12}{stata "tsset id year" :. tsset id year} + +{col 0}(Two-step effic. GMM in the presence of arbitrary heteroskedasticity and autocorrelation) + +{p 8 12}{stata "ivreg2 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm2s cluster(id)": . ivreg2 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm2s cluster(id)} + +{col 0}(Kiefer (1980) SEs - robust to arbitrary serial correlation but not heteroskedasticity) + +{p 8 12}{stata "ivreg2 n w k, kiefer": . ivreg2 n w k, kiefer} + +{p 8 12}{stata "ivreg2 n w k, bw(8) kernel(tru)": . ivreg2 n w k, bw(8) kernel(tru)} + +{col 0}(Equivalence of cluster-robust and kernel-robust with truncated kernel and max bandwidth) + +{p 8 12}{stata "ivreg2 n w k, cluster(id)": . ivreg2 n w k, cluster(id)} + +{p 8 12}{stata "ivreg2 n w k, bw(8) kernel(tru) robust": . ivreg2 n w k, bw(8) kernel(tru) robust} + +{col 0}(Examples using factor variables) + +{p 8 12}{stata "sysuse auto" : . sysuse auto }{p_end} + +{p 8 12}{stata "ivreg2 price i.foreign i.rep78": . ivreg2 price i.foreign i.rep78} + +{p 8 12}{stata "ivreg2 price i.rep78 (foreign = weight turn trunk)": . ivreg2 price i.rep78 (foreign = weight turn trunk) } + +{p 8 12}{stata "ivreg2 price i.rep78 (c.mpg#c.mpg = weight length turn)": . ivreg2 price i.rep78 (c.mpg#c.mpg = weight length turn)} + +{col 0}(Examples using Small N, Large T Panel Data. NB: T is actually not very large - only +{col 0}20 - so results should be interpreted with caution) + +{p 8 12}{stata "webuse grunfeld" : . webuse grunfeld }{p_end} +{p 8 12}{stata "tsset" : . tsset }{p_end} + +{col 0}(Autocorrelation-consistent (AC) inference) + +{p 8 12}{stata "ivreg2 invest mvalue kstock, bw(1) kernel(tru)": . ivreg2 invest mvalue kstock, bw(1) kernel(tru)} + +{col 0}(Heteroskedastic and autocorrelation-consistent (HAC) inference) + +{p 8 12}{stata "ivreg2 invest mvalue kstock, robust bw(1) kernel(tru)": . ivreg2 invest mvalue kstock, robust bw(1) kernel(tru)} + +{col 0}(HAC inference, SEs also robust to disturbances correlated across panels) + +{p 8 12}{stata "ivreg2 invest mvalue kstock, robust cluster(year) bw(1) kernel(tru)": . ivreg2 invest mvalue kstock, robust cluster(year) bw(1) kernel(tru)} + +{col 0}(Equivalence of Driscoll-Kraay SEs as implemented by {cmd:ivreg2} and {cmd:xtscc}) +{col 0}(See Hoeschle (2007) for discussion of {cmd:xtscc}) + +{p 8 12}{stata "ivreg2 invest mvalue kstock, dkraay(2) small": . ivreg2 invest mvalue kstock, dkraay(2) small} + +{p 8 12}{stata "ivreg2 invest mvalue kstock, cluster(year) bw(2) small": . ivreg2 invest mvalue kstock, cluster(year) bw(2) small} + +{p 8 12}{stata "xtscc invest mvalue kstock, lag(1)": . xtscc invest mvalue kstock, lag(1)} + +{col 0}(Examples using Large N, Large T Panel Data. NB: T is again not very large - only +{col 0}20 - so results should be interpreted with caution) + +{p 8 12}{stata "webuse nlswork" : . webuse nlswork }{p_end} +{p 8 12}{stata "tsset" : . tsset }{p_end} + +{col 0}(One-way cluster-robust: SEs robust to arbitrary heteroskedasticity and within-panel +{col 0}autocorrelation) + +{p 8 12}{stata "ivreg2 ln_w grade age ttl_exp tenure, cluster(idcode)": . ivreg2 ln_w grade age ttl_exp tenure, cluster(idcode) }{p_end} + +{col 0}(Two-way cluster-robust: SEs robust to arbitrary heteroskedasticity and within-panel +{col 0}autocorrelation, and contemporaneous cross-panel correlation, i.e., the cross-panel +{col 0}correlation is not autocorrelated) + +{p 8 12}{stata "ivreg2 ln_w grade age ttl_exp tenure, cluster(idcode year)": . ivreg2 ln_w grade age ttl_exp tenure, cluster(idcode year) }{p_end} + +{col 0}(Two-way cluster-robust: SEs robust to arbitrary heteroskedasticity and within-panel +{col 0}autocorrelation and cross-panel autocorrelated disturbances that disappear after 2 lags) + +{p 8 12}{stata "ivreg2 ln_w grade age ttl_exp tenure, cluster(idcode year) bw(2) kernel(tru) ": . ivreg2 ln_w grade age ttl_exp tenure, cluster(idcode year) bw(2) kernel(tru) }{p_end} + + + +{marker s_refs}{title:References} + +{p 0 4}Ahn, Seung C. 1997. Orthogonality tests in linear models. Oxford Bulletin +of Economics and Statistics, Vol. 59, pp. 183-186. + +{p 0 4}Anderson, T.W. 1951. Estimating linear restrictions on regression coefficients +for multivariate normal distributions. Annals of Mathematical Statistics, Vol. 22, pp. 327-51. + +{p 0 4}Anderson, T. W. and H. Rubin. 1949. Estimation of the parameters of a single equation +in a complete system of stochastic equations. Annals of Mathematical Statistics, Vol. 20, +pp. 46-63. + +{p 0 4}Anderson, T. W. and H. Rubin. 1950. The asymptotic properties of estimates of the parameters of a single +equation in a complete system of stochastic equations. Annals of Mathematical Statistics, +Vol. 21, pp. 570-82. + +{p 0 4}Angrist, J.D. and Pischke, J.-S. 2009. Mostly Harmless Econometrics: An Empiricist's Companion. +Princeton: Princeton University Press. + +{p 0 4}Baum, C.F., Schaffer, M.E., and Stillman, S. 2003. Instrumental Variables and GMM: +Estimation and Testing. The Stata Journal, Vol. 3, No. 1, pp. 1-31. +{browse "http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html":http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html}. +Working paper version: Boston College Department of Economics Working Paper No. 545. +{browse "http://ideas.repec.org/p/boc/bocoec/545.html":http://ideas.repec.org/p/boc/bocoec/545.html}. +Citations in {browse "http://scholar.google.com/scholar?oi=bibs&hl=en&cites=9432785573549481148":published work}. + +{p 0 4}Baum, C. F., Schaffer, M.E., and Stillman, S. 2007. Enhanced routines for instrumental variables/GMM estimation and testing. +The Stata Journal, Vol. 7, No. 4, pp. 465-506. +{browse "http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html":http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html}. +Working paper version: Boston College Department of Economics Working Paper No. 667. +{browse "http://ideas.repec.org/p/boc/bocoec/667.html":http://ideas.repec.org/p/boc/bocoec/667.html}. +Citations in {browse "http://scholar.google.com/scholar?oi=bibs&hl=en&cites=1691909976816211536":published work}. + +{p 0 4}Breusch, T., Qian, H., Schmidt, P. and Wyhowski, D. 1999. +Redundancy of moment conditions. +Journal of Econometrics, Vol. 9, pp. 89-111. + +{p 0 4}Cameron, A.C., Gelbach, J.B. and Miller, D.L. 2006. +Robust Inference with Multi-Way Clustering. +NBER Technical Working paper 327. +{browse "http://www.nber.org/papers/t0327":http://www.nber.org/papers/t0327}. +Forthcoming in the Journal of Business and Economic Statistics. +{cmd:cgmreg} is available at +{browse "http://www.econ.ucdavis.edu/faculty/dlmiller/statafiles":http://www.econ.ucdavis.edu/faculty/dlmiller/statafiles}. + +{p 0 4}Chernozhukov, V. and Hansen, C. 2005. The Reduced Form: +A Simple Approach to Inference with Weak Instruments. +Working paper, University of Chicago, Graduate School of Business. + +{p 0 4}Cragg, J.G. and Donald, S.G. 1993. Testing Identfiability and Specification in +Instrumental Variables Models. Econometric Theory, Vol. 9, pp. 222-240. + +{p 0 4}Cushing, M.J. and McGarvey, M.G. 1999. Covariance Matrix Estimation. +In L. Matyas (ed.), Generalized Methods of Moments Estimation. +Cambridge: Cambridge University Press. + +{p 0 4}Davidson, R. and MacKinnon, J. 1993. Estimation and Inference in Econometrics. +1993. New York: Oxford University Press. + +{p 0 4}Driscoll, J.C. and Kraay, A. 1998. Consistent Covariance Matrix Estimation With Spatially Dependent Panel Data. +Review of Economics and Statistics. Vol. 80, No. 4, pp. 549-560. + +{p 0 4}Dufour, J.M. 2003. Identification, Weak Instruments and Statistical Inference +in Econometrics. Canadian Journal of Economics, Vol. 36, No. 4, pp. 767-808. +Working paper version: CIRANO Working Paper 2003s-49. +{browse "http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf":http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf}. + +{p 0 4}Finlay, K., and Magnusson, L.M. 2009. Implementing Weak-Instrument Robust Tests +for a General Class of Instrumental-Variables Models. +The Stata Journal, Vol. 9, No. 3, pp. 398-421. +{browse "http://www.stata-journal.com/article.html?article=st0171":http://www.stata-journal.com/article.html?article=st0171}. + +{p 0 4}Hall, A.R. Generalized Method of Moments. 2005. Oxford: Oxford University Press. + +{p 0 4}Hall, A.R., Rudebusch, G.D. and Wilcox, D.W. 1996. Judging Instrument Relevance in +Instrumental Variables Estimation. International Economic Review, Vol. 37, No. 2, pp. 283-298. + +{p 0 4}Hayashi, F. Econometrics. 2000. Princeton: Princeton University Press. + +{p 0 4}Hansen, L.P., Heaton, J., and Yaron, A. 1996. Finite Sample Properties +of Some Alternative GMM Estimators. Journal of Business and Economic Statistics, Vol. 14, No. 3, pp. 262-280. + +{p 0 4}Hoechle, D. 2007. Robust Standard Errors for Panel Regressions with Cross�sectional Dependence. +Stata Journal, Vol. 7, No. 3, pp. 281-312. +{browse "http://www.stata-journal.com/article.html?article=st0128":http://www.stata-journal.com/article.html?article=st0128}. + +{p 0 4}Kiefer, N.M. 1980. Estimation of Fixed Effect Models for Time Series of Cross-Sections with +Arbitrary Intertemporal Covariance. Journal of Econometrics, Vol. 14, No. 2, pp. 195-202. + +{p 0 4}Kleibergen, F. 2007. Generalizing Weak Instrument Robust Statistics Towards Multiple Parameters, Unrestricted Covariance Matrices and Identification Statistics. Journal of Econometrics, forthcoming. + +{p 0 4}Kleibergen, F. and Paap, R. 2006. Generalized Reduced Rank Tests Using the Singular Value Decomposition. +Journal of Econometrics, Vol. 133, pp. 97-126. + +{p 0 4}Kleibergen, F. and Schaffer, M.E. 2007. ranktest: Stata module for testing the rank +of a matrix using the Kleibergen-Paap rk statistic. +{browse "http://ideas.repec.org/c/boc/bocode/s456865.html":http://ideas.repec.org/c/boc/bocode/s456865.html}. + +{p 0 4}Mikusheva, A. and Poi, B.P. 2006. +Tests and Confidence Sets with Correct Size When Instruments are Potentially Weak. The Stata Journal, Vol. 6, No. 3, pp. 335-347. + +{p 0 4}Moreira, M.J. and Poi, B.P. 2003. Implementing Tests with the Correct Size in the Simultaneous Equations Model. The Stata Journal, Vol. 3, No. 1, pp. 57-70. + +{p 0 4}Newey, W.K. and K.D. West, 1994. Automatic Lag Selection in Covariance Matrix Estimation. Review of Economic Studies, Vol. 61, No. 4, pp. 631-653. + +{p 0 4}Sanderson, E. and F. Windmeijer, 2015. A Weak Instrument F-Test in Linear IV Models with Multiple Endogenous Variables. +Journal of Econometrics (forthcoming). +Working paper version: University of Bristol Discussion Paper 14/644. +{browse "http://ideas.repec.org/p/bri/uobdis/14-644.html":http://ideas.repec.org/p/bri/uobdis/14-644.html}. + +{p 0 4}Shea, J. 1997. Instrument Relevance in Multivariate Linear Models: A Simple Measure. +Review of Economics and Statistics, Vol. 49, No. 2, pp. 348-352. + +{p 0 4}Stock, J.H. and Wright, J.H. 2000. GMM with Weak Identification. +Econometrica, Vol. 68, No. 5, September, pp. 1055-1096. + +{p 0 4}Stock, J.H. and Yogo, M. 2005. Testing for Weak Instruments in Linear IV Regression. In D.W.K. Andrews and J.H. Stock, eds. Identification and Inference for Econometric Models: Essays in Honor of Thomas Rothenberg. Cambridge: Cambridge University Press, 2005, pp. 80�108. +Working paper version: NBER Technical Working Paper 284. +{browse "http://www.nber.org/papers/T0284":http://www.nber.org/papers/T0284}. + +{p 0 4}Thompson, S.B. 2009. Simple Formulas for Standard Errors that Cluster by Both Firm and Time. +{browse "http://ssrn.com/abstract=914002":http://ssrn.com/abstract=914002}. + +{p 0 4}Wooldridge, J.M. 2002. Econometric Analysis of Cross Section and Panel Data. Cambridge, MA: MIT Press. + + +{marker s_acknow}{title:Acknowledgements} + +{p}We would like to thanks various colleagues who helped us along the way, including +David Drukker, +Frank Kleibergen, +Austin Nichols, +Brian Poi, +Vince Wiggins, +and, not least, the users of {cmd:ivreg2} +who have provided suggestions, +spotted bugs, +and helped test the package. +We are also grateful to Jim Stock and Moto Yogo for permission to reproduce +their critical values for the Cragg-Donald statistic. + +{marker s_citation}{title:Citation of ivreg2} + +{p}{cmd:ivreg2} is not an official Stata command. It is a free contribution +to the research community, like a paper. Please cite it as such: {p_end} + +{phang}Baum, C.F., Schaffer, M.E., Stillman, S. 2010. +ivreg2: Stata module for extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression. +{browse "http://ideas.repec.org/c/boc/bocode/s425401.html":http://ideas.repec.org/c/boc/bocode/s425401.html}{p_end} + +{title:Authors} + + Christopher F Baum, Boston College, USA + baum@bc.edu + + Mark E Schaffer, Heriot-Watt University, UK + m.e.schaffer@hw.ac.uk + + Steven Stillman, Motu Economic and Public Policy Research + stillman@motu.org.nz + + +{title:Also see} + +{p 1 14}Articles:{it:Stata Journal}, volume 3, number 1: {browse "http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html":st0030}{p_end} +{p 10 14}{it:Stata Journal}, volume 7, number 4: {browse "http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html":st0030_3}{p_end} + +{p 1 14}Manual: {hi:[U] 23 Estimation and post-estimation commands}{p_end} +{p 10 14}{hi:[U] 29 Overview of model estimation in Stata}{p_end} +{p 10 14}{hi:[R] ivreg}{p_end} + +{p 1 10}On-line: help for {help ivregress}, {help ivreg}, {help newey}; +{help overid}, {help ivendog}, {help ivhettest}, {help ivreset}, +{help xtivreg2}, {help xtoverid}, {help ranktest}, +{help condivreg} (if installed); +{help weakiv} (if installed); +{help cgmreg} (if installed); +{help xtscc} (if installed); +{help est}, {help postest}; +{help regress}{p_end} diff --git a/110/replication_package/replication/ado/plus/i/ivreg210.ado b/110/replication_package/replication/ado/plus/i/ivreg210.ado new file mode 100644 index 0000000000000000000000000000000000000000..8e7b331a57ad627e4f461534d283d52c461e1ce5 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg210.ado @@ -0,0 +1,6951 @@ +*! ivreg210 3.1.10 19Jan2015 +*! authors cfb & mes +*! see end of file for version comments + +* Variable naming: +* lhs = LHS endogenous +* endo = X1, RHS endogenous (instrumented) = #K1 +* inexog = X2 = Z2 = included exogenous (instruments) = #K2 = #L2 +* exexog = Z1 = excluded exogenous (instruments) = #L1 +* iv = {inexog exexog} = all instruments +* rhs = {endo inexog} = RHS regressors +* 1 at the end of the name means the varlist after duplicates and collinearities removed +* ..1_ct at the end means a straight count of the list +* .._ct at the end means ..1_ct with any additional detected cnts removed +* dofminus is large-sample adjustment (e.g., #fixed effects) +* sdofminus is small-sample adjustment (e.g., #partialled-out regressors) + +*************************************** START **************************************** +********************************* livreg2.mlib CODE ********************************** +* Code from: +* livreg2 1.1.07 13july2014 +* authors cfb & mes +* compiled in Stata 9.2 +* Mata library for ivreg2 and ranktest. +* Introduced with ivreg2 v 3.1.01 and ranktest v 1.3.01. +* Imported into ivreg210 so that ivreg210 is free-standing. +* See end of file for version notes. + +version 9.2 +mata: + +// ********* struct ms_ivreg210_vcvorthog - shared by ivreg2 and ranktest ******************* // +struct ms_ivreg210_vcvorthog { + string scalar ename, Znames, touse, weight, wvarname + string scalar robust, clustvarname, clustvarname2, clustvarname3, kernel + string scalar sw, psd, ivarname, tvarname, tindexname + real scalar wf, N, bw, tdelta, dofminus + real matrix ZZ + pointer matrix e + pointer matrix Z + pointer matrix wvar +} + +// ********* s_ivreg210_vkernel - shared by ivreg2 and ranktest ******************* // +// Program checks whether kernel and bw choices are valid. +// s_ivreg210_vkernel is called from Stata. +// Arguments are the kernel name (req), bandwidth (req) and ivar name (opt). +// All 3 are strings. +// Returns results in r() macros. +// r(kernel) - name of kernel (string) +// r(bw) - bandwidth (scalar) + +void s_ivreg210_vkernel( string scalar kernel, + string scalar bwstring, + string scalar ivar + ) +{ + +// Check bandwidth + if (bwstring=="auto") { + bw=-1 + } + else { + bw=strtoreal(bwstring) + if (bw==.) { + printf("{err}bandwidth option bw() required for HAC-robust estimation\n") + exit(102) + } + if (bw<=0) { + printf("{err}invalid bandwidth in option bw() - must be real > 0\n") + exit(198) + } + } + +// Check ivar + if (bwstring=="auto" & ivar~="") { + printf("{err}Automatic bandwidth selection not available for panel data\n") + exit(198) + } + +// Check kernel +// Valid kernel list is abbrev, full name, whether special case if bw=1 +// First in list is default kernel = Barlett + vklist = ( ("", "bartlett", "0") + \ ("bar", "bartlett", "0") + \ ("bartlett", "bartlett", "0") + \ ("par", "parzen", "0") + \ ("parzen", "parzen", "0") + \ ("tru", "truncated", "1") + \ ("truncated", "truncated", "1") + \ ("thann", "tukey-hanning", "0") + \ ("tukey-hanning", "tukey-hanning", "0") + \ ("thamm", "tukey-hamming", "0") + \ ("tukey-hamming", "tukey-hamming", "0") + \ ("qua", "quadratic spectral", "1") + \ ("qs", "quadratic spectral", "1") + \ ("quadratic-spectral", "quadratic spectral", "1") + \ ("quadratic spectral", "quadratic spectral", "1") + \ ("dan", "danielle", "1") + \ ("danielle", "danielle", "1") + \ ("ten", "tent", "1") + \ ("tent", "tent", "1") + ) + kname=strltrim(strlower(kernel)) + pos = (vklist[.,1] :== kname) + +// Exit with error if not in list + if (sum(pos)==0) { + printf("{err}invalid kernel\n") + exit(198) + } + + vkname=strproper(select(vklist[.,2],pos)) + st_global("r(kernel)", vkname) + st_numscalar("r(bw)",bw) + +// Warn if kernel is type where bw=1 means no lags are used + if (bw==1 & select(vklist[.,3],pos)=="0") { + printf("{result}Note: kernel=%s", vkname) + printf("{result} and bw=1 implies zero lags used. Standard errors and\n") + printf("{result} test statistics are not autocorrelation-consistent.\n") + } +} // end of program s_ivreg210_vkernel + +// ********* m_ivreg210_omega - shared by ivreg2 and ranktest ********************* // + +// NB: ivreg2 always calls m_ivreg210_omega with e as column vector, i.e., K=1 // +// ranktest can call m_ivreg210_omega with e as matrix, i.e., K>=1 // + +real matrix m_ivreg210_omega(struct ms_ivreg210_vcvorthog scalar vcvo) +{ + if (vcvo.clustvarname~="") { + st_view(clustvar, ., vcvo.clustvarname, vcvo.touse) + info = panelsetup(clustvar, 1) + N_clust=rows(info) + if (vcvo.clustvarname2~="") { + st_view(clustvar2, ., vcvo.clustvarname2, vcvo.touse) + if (vcvo.kernel=="") { + st_view(clustvar3, ., vcvo.clustvarname3, vcvo.touse) // needed only if not panel tsset + } + } + } + + if (vcvo.kernel~="") { + st_view(t, ., st_tsrevar(vcvo.tvarname), vcvo.touse) + T=max(t)-min(t)+1 + } + + if ((vcvo.kernel=="Bartlett") | (vcvo.kernel=="Parzen") | (vcvo.kernel=="Truncated") /// + | (vcvo.kernel=="Tukey-Hanning")| (vcvo.kernel=="Tukey-Hamming")) { + window="lag" + } + else if ((vcvo.kernel=="Quadratic Spectral") | (vcvo.kernel=="Danielle") | (vcvo.kernel=="Tent")) { + window="spectral" + } + else if (vcvo.kernel~="") { +// Should never reach this point +printf("\n{error:Error: invalid kernel}\n") + exit(error(3351)) + } + + L=cols(*vcvo.Z) + K=cols(*vcvo.e) // ivreg2 always calls with K=1; ranktest may call with K>=1. + +// Covariance matrices +// shat * 1/N is same as estimated S matrix of orthog conditions + +// Block for homoskedastic and AC. dof correction if any incorporated into sigma estimates. + if ((vcvo.robust=="") & (vcvo.clustvarname=="")) { +// ZZ is already calculated as an external + ee = quadcross(*vcvo.e, vcvo.wf*(*vcvo.wvar), *vcvo.e) + sigma2=ee/(vcvo.N-vcvo.dofminus) + shat=sigma2#vcvo.ZZ + if (vcvo.kernel~="") { + if (window=="spectral") { + TAU=T/vcvo.tdelta-1 + } + else { + TAU=vcvo.bw + } + tnow=st_data(., vcvo.tindexname) + for (tau=1; tau<=TAU; tau++) { + kw = m_ivreg210_calckw(tau, vcvo.bw, vcvo.kernel) + if (kw~=0) { // zero weight possible with some kernels + // save an unnecessary loop if kw=0 + // remember, kw<0 possible with some kernels! + lstau = "L"+strofreal(tau) + tlag=st_data(., lstau+"."+vcvo.tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix +// if no lags exist, tmatrix has zero rows. + if (rows(tmatrix)>0) { +// col 1 of tmatrix has row numbers of all rows of data with this time period that have a corresponding lag +// col 2 of tmatrix has row numbers of all rows of data with lag tau that have a corresponding ob this time period. +// Should never happen that fweights or iweights make it here, +// but if they did the next line would be sqrt(wvari)*sqrt(wvari1) [with no wf since not needed for fw or iw] + wv = (*vcvo.wvar)[tmatrix[.,1]] /// + :* (*vcvo.wvar)[tmatrix[.,2]]*(vcvo.wf^2) // inner weighting matrix for quadcross + sigmahat = quadcross((*vcvo.e)[tmatrix[.,1],.], wv ,(*vcvo.e)[tmatrix[.,2],.]) /// + / (vcvo.N-vcvo.dofminus) // large dof correction + ZZhat = quadcross((*vcvo.Z)[tmatrix[.,1],.], wv, (*vcvo.Z)[tmatrix[.,2],.]) + ghat = sigmahat#ZZhat + shat=shat+kw*(ghat+ghat') + } + } // end non-zero kernel weight block + } // end tau loop + } // end kernel code +// Note large dof correction (if there is one) has already been incorporated + shat=shat/vcvo.N + } // end homoskedastic, AC code + +// Block for robust HC and HAC but not Stock-Watson and single clustering. +// Need to enter for double-clustering if one cluster is time. + if ( (vcvo.robust~="") & (vcvo.sw=="") & ((vcvo.clustvarname=="") /// + | ((vcvo.clustvarname2~="") & (vcvo.kernel~=""))) ) { + if (K==1) { // simple/fast where e is a column vector + if ((vcvo.weight=="fweight") | (vcvo.weight=="iweight")) { + wv = (*vcvo.e:^2) :* *vcvo.wvar + } + else { + wv = (*vcvo.e :* *vcvo.wvar * vcvo.wf):^2 // wf needed for aweights and pweights + } + shat=quadcross(*vcvo.Z, wv, *vcvo.Z) // basic Eicker-Huber-White-sandwich-robust vce + } + else { // e is a matrix so must loop + shat=J(L*K,L*K,0) + for (i=1; i<=rows(*vcvo.e); i++) { + eZi=((*vcvo.e)[i,.])#((*vcvo.Z)[i,.]) + if ((vcvo.weight=="fweight") | (vcvo.weight=="iweight")) { +// wvar is a column vector. wf not needed for fw and iw (=1 by dfn so redundant). + shat=shat+quadcross(eZi,eZi)*((*vcvo.wvar)[i]) + } + else { + shat=shat+quadcross(eZi,eZi)*((*vcvo.wvar)[i] * vcvo.wf)^2 // **** ADDED *vcvo.wf + } + } + } + if (vcvo.kernel~="") { +// Spectral windows require looping through all T-1 autocovariances + if (window=="spectral") { + TAU=T/vcvo.tdelta-1 + } + else { + TAU=vcvo.bw + } + tnow=st_data(., vcvo.tindexname) + for (tau=1; tau<=TAU; tau++) { + kw = m_ivreg210_calckw(tau, vcvo.bw, vcvo.kernel) + if (kw~=0) { // zero weight possible with some kernels + // save an unnecessary loop if kw=0 + // remember, kw<0 possible with some kernels! + lstau = "L"+strofreal(tau) + tlag=st_data(., lstau+"."+vcvo.tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix + +// col 1 of tmatrix has row numbers of all rows of data with this time period that have a corresponding lag +// col 2 of tmatrix has row numbers of all rows of data with lag tau that have a corresponding ob this time period. +// if no lags exist, tmatrix has zero rows + if (rows(tmatrix)>0) { + if (K==1) { // simple/fast where e is a column vector +// wv is inner weighting matrix for quadcross + wv = (*vcvo.e)[tmatrix[.,1]] :* (*vcvo.e)[tmatrix[.,2]] /// + :* (*vcvo.wvar)[tmatrix[.,1]] :* (*vcvo.wvar)[tmatrix[.,2]] * (vcvo.wf^2) + ghat = quadcross((*vcvo.Z)[tmatrix[.,1],.], wv, (*vcvo.Z)[tmatrix[.,2],.]) + } + else { // e is a matrix so must loop + ghat=J(L*K,L*K,0) + for (i=1; i<=rows(tmatrix); i++) { + wvari =(*vcvo.wvar)[tmatrix[i,1]] + wvari1=(*vcvo.wvar)[tmatrix[i,2]] + ei =(*vcvo.e)[tmatrix[i,1],.] + ei1 =(*vcvo.e)[tmatrix[i,2],.] + Zi =(*vcvo.Z)[tmatrix[i,1],.] + Zi1 =(*vcvo.Z)[tmatrix[i,2],.] + eZi =ei#Zi + eZi1=ei1#Zi1 +// Should never happen that fweights or iweights make it here, but if they did +// the next line would be ghat=ghat+eZi'*eZi1*sqrt(wvari)*sqrt(wvari1) +// [without *vcvo.wf since wf=1 for fw and iw] + ghat=ghat+quadcross(eZi,eZi1)*wvari*wvari1 * (vcvo.wf^2) // ADDED * (vcvo.wf^2) + } + } + shat=shat+kw*(ghat+ghat') + } // end non-zero-obs accumulation block + } // end non-zero kernel weight block + } // end tau loop + } // end kernel code +// Incorporate large dof correction if there is one + shat=shat/(vcvo.N-vcvo.dofminus) + } // end HC/HAC code + + if (vcvo.clustvarname~="") { +// Block for cluster-robust +// 2-level clustering: S = S(level 1) + S(level 2) - S(level 3 = intersection of levels 1 & 2) +// Prepare shat3 if 2-level clustering + if (vcvo.clustvarname2~="") { + if (vcvo.kernel~="") { // second cluster variable is time + // shat3 was already calculated above as shat + shat3=shat*(vcvo.N-vcvo.dofminus) + } + else { // calculate shat3 + // data were sorted on clustvar3-clustvar1 so + // clustvar3 is nested in clustvar1 and Mata panel functions + // work for both. + info3 = panelsetup(clustvar3, 1) + if (rows(info3)==rows(*vcvo.e)) { // intersection of levels 1 & 2 are all single obs + // so no need to loop through row by row + if (K==1) { // simple/fast where e is a column vector + wv = (*vcvo.e :* *vcvo.wvar * vcvo.wf):^2 + shat3=quadcross(*vcvo.Z, wv, *vcvo.Z) // basic Eicker-Huber-White-sandwich-robust vce + } + else { // e is a matrix so must loop + shat3=J(L*K,L*K,0) + for (i=1; i<=rows(*vcvo.e); i++) { + eZi=((*vcvo.e)[i,.])#((*vcvo.Z)[i,.]) + shat3=shat3+quadcross(eZi,eZi)*((*vcvo.wvar)[i] * vcvo.wf)^2 // **** ADDED *vcvo.wf + } + } + } + else { // intersection of levels 1 & 2 includes some groups of obs + N_clust3=rows(info3) + shat3=J(L*K,L*K,0) + for (i=1; i<=N_clust3; i++) { + esub=panelsubmatrix(*vcvo.e,i,info3) + Zsub=panelsubmatrix(*vcvo.Z,i,info3) + wsub=panelsubmatrix(*vcvo.wvar,i,info3) + wv = esub :* wsub * vcvo.wf + if (K==1) { // simple/fast where e is a column vector + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { + eZ = J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.] * vcvo.wf // **** ADDED *vcvo.wf + } + } + shat3=shat3+quadcross(eZ,eZ) + } + } + } + } + +// 1st level of clustering, no kernel-robust +// Entered unless 1-level clustering and kernel-robust + if (!((vcvo.kernel~="") & (vcvo.clustvarname2==""))) { + shat=J(L*K,L*K,0) + for (i=1; i<=N_clust; i++) { // loop through clusters, adding Z'ee'Z + // for indiv cluster in each loop + esub=panelsubmatrix(*vcvo.e,i,info) + Zsub=panelsubmatrix(*vcvo.Z,i,info) + wsub=panelsubmatrix(*vcvo.wvar,i,info) + if (K==1) { // simple/fast if e is a column vector + wv = esub :* wsub * vcvo.wf + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { + eZ=J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.]*vcvo.wf // **** ADDED *vcvo.wf + } + } + shat=shat+quadcross(eZ,eZ) + } // end loop through clusters + } + +// 2-level clustering, no kernel-robust + if ((vcvo.clustvarname2~="") & (vcvo.kernel=="")) { + imax=max(clustvar2) // clustvar2 is numbered 1..N_clust2 + shat2=J(L*K,L*K,0) + for (i=1; i<=imax; i++) { // loop through clusters, adding Z'ee'Z + // for indiv cluster in each loop + svar=(clustvar2:==i) // mimics panelsubmatrix but doesn't require sorted data + esub=select(*vcvo.e,svar) // it is, however, noticably slower. + Zsub=select(*vcvo.Z,svar) + wsub=select(*vcvo.wvar,svar) + if (K==1) { // simple/fast if e is a column vector + wv = esub :* wsub * vcvo.wf + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { + eZ=J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.]*vcvo.wf // **** ADDED *vcvo.wf + } + } + shat2=shat2+quadcross(eZ,eZ) + } + } + +// 1st level of cluster, kernel-robust OR +// 2-level clustering, kernel-robust and time is 2nd cluster variable + if (vcvo.kernel~="") { + shat2=J(L*K,L*K,0) +// First, standard cluster-robust, i.e., no lags. + i=min(t) + while (i<=max(t)) { // loop through all T clusters, adding Z'ee'Z + // for indiv cluster in each loop + eZ=J(1,L*K,0) + svar=(t:==i) // select obs with t=i + if (colsum(svar)>0) { // there are obs with t=i + esub=select(*vcvo.e,svar) + Zsub=select(*vcvo.Z,svar) + wsub=select(*vcvo.wvar,svar) + if (K==1) { // simple/fast if e is a column vector + wv = esub :* wsub * vcvo.wf + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { +// MISSING LINE IS NEXT + eZ=J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.]*vcvo.wf // **** ADDED *vcvo.wf + } + } + shat2=shat2+quadcross(eZ,eZ) + } + i=i+vcvo.tdelta + } // end i loop through all T clusters + +// Spectral windows require looping through all T-1 autocovariances + if (window=="spectral") { + TAU=T/vcvo.tdelta-1 + } + else { + TAU=vcvo.bw + } + + for (tau=1; tau<=TAU; tau++) { + kw = m_ivreg210_calckw(tau, vcvo.bw, vcvo.kernel) // zero weight possible with some kernels + // save an unnecessary loop if kw=0 + // remember, kw<0 possible with some kernels! + if (kw~=0) { + i=min(t)+tau*vcvo.tdelta // Loop through all possible ts (time clusters) + while (i<=max(t)) { // Start at earliest possible t + svar=t[.,]:==i // svar is current, svar1 is tau-th lag + svar1=t[.,]:==(i-tau*vcvo.tdelta) // tau*vcvo.tdelta is usually just tau + if ((colsum(svar)>0) // there are current & lagged obs + & (colsum(svar1)>0)) { + wv = select((*vcvo.e),svar) :* select((*vcvo.wvar),svar) * vcvo.wf + wv1 = select((*vcvo.e),svar1) :* select((*vcvo.wvar),svar1) * vcvo.wf + Zsub =select((*vcvo.Z),svar) + Zsub1=select((*vcvo.Z),svar1) + if (K==1) { // simple/fast, e is column vector + eZsub = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + eZsub1= quadcross(1, wv1, Zsub1) // equivalent to colsum(wv :* Zsub) + } + else { + eZsub=J(1,L*K,0) + for (j=1; j<=rows(Zsub); j++) { + wvj =wv[j,.] + Zj =Zsub[j,.] + eZsub=eZsub+(wvj#Zj) + } + eZsub1=J(1,L*K,0) + for (j=1; j<=rows(Zsub1); j++) { + wv1j =wv1[j,.] + Z1j =Zsub1[j,.] + eZsub1=eZsub1+(wv1j#Z1j) + } + } + ghat=quadcross(eZsub,eZsub1) + shat2=shat2+kw*(ghat+ghat') + } + i=i+vcvo.tdelta + } + } // end non-zero kernel weight block + } // end tau loop + +// If 1-level clustering, shat2 just calculated above is actually the desired shat + if (vcvo.clustvarname2=="") { + shat=shat2 + } + } + +// 2-level clustering, completion +// Cameron-Gelbach-Miller/Thompson method: +// Add 2 cluster variance matrices and subtract 3rd + if (vcvo.clustvarname2~="") { + shat = shat+shat2-shat3 + } + +// Note no dof correction required for cluster-robust + shat=shat/vcvo.N + } // end cluster-robust code + + if (vcvo.sw~="") { +// Stock-Watson adjustment. Calculate Bhat in their equation (6). Also need T=panel length. +// They define for balanced panels. Since T is not constant for unbalanced panels, need +// to incorporate panel-varying 1/T, 1/(T-1) and 1/(T-2) as weights in summation. + + st_view(ivar, ., st_tsrevar(vcvo.ivarname), vcvo.touse) + info_ivar = panelsetup(ivar, 1) + + shat=J(L*K,L*K,0) + bhat=J(L*K,L*K,0) + N_panels=0 + for (i=1; i<=rows(info_ivar); i++) { + esub=panelsubmatrix(*vcvo.e,i,info_ivar) + Zsub=panelsubmatrix(*vcvo.Z,i,info_ivar) + wsub=panelsubmatrix(*vcvo.wvar,i,info_ivar) + Tsub=rows(esub) + if (Tsub>2) { // SW cov estimator defined only for T>2 + N_panels=N_panels+1 + sigmahatsub=J(K,K,0) + ZZsub=J(L*K,L*K,0) + shatsub=J(L*K,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZi=esub[j,1]#Zsub[j,.] + if ((vcvo.weight=="fweight") | (vcvo.weight=="iweight")) { + shatsub=shatsub+quadcross(eZi,eZi)*wsub[j]*vcvo.wf + sigmahatsub=sigmahatsub + quadcross(esub[j,1],esub[j,1])*wsub[j]*vcvo.wf + ZZsub=ZZsub+quadcross(Zsub[j,.],Zsub[j,.])*wsub[j]*vcvo.wf + } + else { + shatsub=shatsub+quadcross(eZi,eZi)*((wsub[j]*vcvo.wf)^2) + sigmahatsub=sigmahatsub + quadcross(esub[j,1],esub[j,1])*((wsub[j]*vcvo.wf)^2) + ZZsub=ZZsub+quadcross(Zsub[j,.],Zsub[j,.])*((wsub[j]*vcvo.wf)^2) + } + } // end loop through j obs of panel i + shat=shat + shatsub*(Tsub-1)/(Tsub-2) + bhat=bhat + ZZsub/Tsub#sigmahatsub/(Tsub-1)/(Tsub-2) + } + } // end loop through i panels + +// Note that Stock-Watson incorporate an N-n-k degrees of freedom correction in their eqn 4 +// for what we call shat. We use only an N-n degrees of freedom correction, i.e., we ignore +// the k regressors. This is because this is an estimate of S, the VCV of orthogonality conditions, +// independently of its use to obtain an estimate of the variance of beta. Makes no diff aysmptotically. +// Ignore dofminus correction since this is explicitly handled here. +// Use number of valid panels in denominator (SW cov estimator defined only for panels with T>2). + shat=shat/(vcvo.N-N_panels) + bhat=bhat/N_panels + shat=shat-bhat + } // end Stock-Watson block + + _makesymmetric(shat) + +// shat may not be positive-definite. Use spectral decomposition to obtain an invertable version. +// Extract Eigenvector and Eigenvalues, replace EVs, and reassemble shat. +// psda option: Stock-Watson 2008 Econometrica, Remark 8, say replace neg EVs with abs(EVs). +// psd0 option: Politis (2007) says replace neg EVs with zeros. + if (vcvo.psd~="") { + symeigensystem(shat,Evec,Eval) + if (vcvo.psd=="psda") { + Eval = abs(Eval) + } + else { + Eval = Eval + (abs(Eval) - Eval)/2 + } + shat = Evec*diag(Eval)*Evec' + _makesymmetric(shat) + } + + return(shat) + +} // end of program m_ivreg210_omega + +// *********************************************************************** // +// ********* m_ivreg210_calckw - shared by ivreg2 and ranktest ********************* // +// *********************************************************************** // + +real scalar m_ivreg210_calckw( real scalar tau, + real scalar bw, + string scalar kernel) + { + karg = tau / bw + if (kernel=="Truncated") { + kw=1 + } + if (kernel=="Bartlett") { + kw=(1-karg) + } + if (kernel=="Parzen") { + if (karg <= 0.5) { + kw = 1-6*karg^2+6*karg^3 + } + else { + kw = 2*(1-karg)^3 + } + } + if (kernel=="Tukey-Hanning") { + kw=0.5+0.5*cos(pi()*karg) + } + if (kernel=="Tukey-Hamming") { + kw=0.54+0.46*cos(pi()*karg) + } + if (kernel=="Tent") { + kw=2*(1-cos(tau*karg)) / (karg^2) + } + if (kernel=="Danielle") { + kw=sin(pi()*karg) / (pi()*karg) + } + if (kernel=="Quadratic Spectral") { + kw=25/(12*pi()^2*karg^2) /* + */ * ( sin(6*pi()*karg/5)/(6*pi()*karg/5) /* + */ - cos(6*pi()*karg/5) ) + } + return(kw) + } // end kw + +// *********************************************************************** // +// ********* END CODE SHARED BY ivreg2 AND ranktest ******************** // +// *********************************************************************** // + +// cdsy: used by ivreg2 + +void s_ivreg210_cdsy( string scalar temp, scalar choice) +{ +string scalar s_ivbias5, s_ivbias10, s_ivbias20, s_ivbias30 +string scalar s_ivsize10, s_ivsize15, s_ivsize20, s_ivsize25 +string scalar s_fullrel5, s_fullrel10, s_fullrel20, s_fullrel30 +string scalar s_fullmax5, s_fullmax10, s_fullmax20, s_fullmax30 +string scalar s_limlsize10, s_limlsize15, s_limlsize20, s_limlsize25 + +s_ivbias5 = +". , . , . \ . , . , . \ 13.91 , . , . \ 16.85 , 11.04 , . \ 18.37 , 13.97 , 9.53 \ 19.28 , 15.72 , 12.20 \ 19.86 , 16.88 , 13.95 \ 20.25 , 17.70 , 15.18 \ 20.53 , 18.30 , 16.10 \ 20.74 , 18.76 , 16.80 \ 20.90 , 19.12 , 17.35 \ 21.01 , 19.40 , 17.80 \ 21.10 , 19.64 , 18.17 \ 21.18 , 19.83 , 18.47 \ 21.23 , 19.98 , 18.73 \ 21.28 , 20.12 , 18.94 \ 21.31 , 20.23 , 19.13 \ 21.34 , 20.33 , 19.29 \ 21.36 , 20.41 , 19.44 \ 21.38 , 20.48 , 19.56 \ 21.39 , 20.54 , 19.67 \ 21.40 , 20.60 , 19.77 \ 21.41 , 20.65 , 19.86 \ 21.41 , 20.69 , 19.94 \ 21.42 , 20.73 , 20.01 \ 21.42 , 20.76 , 20.07 \ 21.42 , 20.79 , 20.13 \ 21.42 , 20.82 , 20.18 \ 21.42 , 20.84 , 20.23 \ 21.42 , 20.86 , 20.27 \ 21.41 , 20.88 , 20.31 \ 21.41 , 20.90 , 20.35 \ 21.41 , 20.91 , 20.38 \ 21.40 , 20.93 , 20.41 \ 21.40 , 20.94 , 20.44 \ 21.39 , 20.95 , 20.47 \ 21.39 , 20.96 , 20.49 \ 21.38 , 20.97 , 20.51 \ 21.38 , 20.98 , 20.54 \ 21.37 , 20.99 , 20.56 \ 21.37 , 20.99 , 20.57 \ 21.36 , 21.00 , 20.59 \ 21.35 , 21.00 , 20.61 \ 21.35 , 21.01 , 20.62 \ 21.34 , 21.01 , 20.64 \ 21.34 , 21.02 , 20.65 \ 21.33 , 21.02 , 20.66 \ 21.32 , 21.02 , 20.67 \ 21.32 , 21.03 , 20.68 \ 21.31 , 21.03 , 20.69 \ 21.31 , 21.03 , 20.70 \ 21.30 , 21.03 , 20.71 \ 21.30 , 21.03 , 20.72 \ 21.29 , 21.03 , 20.73 \ 21.28 , 21.03 , 20.73 \ 21.28 , 21.04 , 20.74 \ 21.27 , 21.04 , 20.75 \ 21.27 , 21.04 , 20.75 \ 21.26 , 21.04 , 20.76 \ 21.26 , 21.04 , 20.76 \ 21.25 , 21.04 , 20.77 \ 21.24 , 21.04 , 20.77 \ 21.24 , 21.04 , 20.78 \ 21.23 , 21.04 , 20.78 \ 21.23 , 21.03 , 20.79 \ 21.22 , 21.03 , 20.79 \ 21.22 , 21.03 , 20.79 \ 21.21 , 21.03 , 20.80 \ 21.21 , 21.03 , 20.80 \ 21.20 , 21.03 , 20.80 \ 21.20 , 21.03 , 20.80 \ 21.19 , 21.03 , 20.81 \ 21.19 , 21.03 , 20.81 \ 21.18 , 21.03 , 20.81 \ 21.18 , 21.02 , 20.81 \ 21.17 , 21.02 , 20.82 \ 21.17 , 21.02 , 20.82 \ 21.16 , 21.02 , 20.82 \ 21.16 , 21.02 , 20.82 \ 21.15 , 21.02 , 20.82 \ 21.15 , 21.02 , 20.82 \ 21.15 , 21.02 , 20.83 \ 21.14 , 21.01 , 20.83 \ 21.14 , 21.01 , 20.83 \ 21.13 , 21.01 , 20.83 \ 21.13 , 21.01 , 20.83 \ 21.12 , 21.01 , 20.84 \ 21.12 , 21.01 , 20.84 \ 21.11 , 21.01 , 20.84 \ 21.11 , 21.01 , 20.84 \ 21.10 , 21.00 , 20.84 \ 21.10 , 21.00 , 20.84 \ 21.09 , 21.00 , 20.85 \ 21.09 , 21.00 , 20.85 \ 21.08 , 21.00 , 20.85 \ 21.08 , 21.00 , 20.85 \ 21.07 , 21.00 , 20.85 \ 21.07 , 20.99 , 20.86 \ 21.06 , 20.99 , 20.86 \ 21.06 , 20.99 , 20.86 \" +ivbias5 = strtoreal(colshape(colshape(tokens(s_ivbias5), 2)[.,1], 3)) + +s_ivbias10 = +". , . , . \ . , . , . \ 9.08 , . , . \ 10.27 , 7.56 , . \ 10.83 , 8.78 , 6.61 \ 11.12 , 9.48 , 7.77 \ 11.29 , 9.92 , 8.5 \ 11.39 , 10.22 , 9.01 \ 11.46 , 10.43 , 9.37 \ 11.49 , 10.58 , 9.64 \ 11.51 , 10.69 , 9.85 \ 11.52 , 10.78 , 10.01 \ 11.52 , 10.84 , 10.14 \ 11.52 , 10.89 , 10.25 \ 11.51 , 10.93 , 10.33 \ 11.5 , 10.96 , 10.41 \ 11.49 , 10.99 , 10.47 \ 11.48 , 11 , 10.52 \ 11.46 , 11.02 , 10.56 \ 11.45 , 11.03 , 10.6 \ 11.44 , 11.04 , 10.63 \ 11.42 , 11.05 , 10.65 \ 11.41 , 11.05 , 10.68 \ 11.4 , 11.05 , 10.7 \ 11.38 , 11.06 , 10.71 \ 11.37 , 11.06 , 10.73 \ 11.36 , 11.06 , 10.74 \ 11.34 , 11.05 , 10.75 \ 11.33 , 11.05 , 10.76 \ 11.32 , 11.05 , 10.77 \ 11.3 , 11.05 , 10.78 \ 11.29 , 11.05 , 10.79 \ 11.28 , 11.04 , 10.79 \ 11.27 , 11.04 , 10.8 \ 11.26 , 11.04 , 10.8 \ 11.25 , 11.03 , 10.8 \ 11.24 , 11.03 , 10.81 \ 11.23 , 11.02 , 10.81 \ 11.22 , 11.02 , 10.81 \ 11.21 , 11.02 , 10.81 \ 11.2 , 11.01 , 10.81 \ 11.19 , 11.01 , 10.81 \ 11.18 , 11 , 10.81 \ 11.17 , 11 , 10.81 \ 11.16 , 10.99 , 10.81 \ 11.15 , 10.99 , 10.81 \ 11.14 , 10.98 , 10.81 \ 11.13 , 10.98 , 10.81 \ 11.13 , 10.98 , 10.81 \ 11.12 , 10.97 , 10.81 \ 11.11 , 10.97 , 10.81 \ 11.1 , 10.96 , 10.81 \ 11.1 , 10.96 , 10.81 \ 11.09 , 10.95 , 10.81 \ 11.08 , 10.95 , 10.81 \ 11.07 , 10.94 , 10.8 \ 11.07 , 10.94 , 10.8 \ 11.06 , 10.94 , 10.8 \ 11.05 , 10.93 , 10.8 \ 11.05 , 10.93 , 10.8 \ 11.04 , 10.92 , 10.8 \ 11.03 , 10.92 , 10.79 \ 11.03 , 10.92 , 10.79 \ 11.02 , 10.91 , 10.79 \ 11.02 , 10.91 , 10.79 \ 11.01 , 10.9 , 10.79 \ 11 , 10.9 , 10.79 \ 11 , 10.9 , 10.78 \ 10.99 , 10.89 , 10.78 \ 10.99 , 10.89 , 10.78 \ 10.98 , 10.89 , 10.78 \ 10.98 , 10.88 , 10.78 \ 10.97 , 10.88 , 10.77 \ 10.97 , 10.88 , 10.77 \ 10.96 , 10.87 , 10.77 \ 10.96 , 10.87 , 10.77 \ 10.95 , 10.86 , 10.77 \ 10.95 , 10.86 , 10.76 \ 10.94 , 10.86 , 10.76 \ 10.94 , 10.85 , 10.76 \ 10.93 , 10.85 , 10.76 \ 10.93 , 10.85 , 10.76 \ 10.92 , 10.84 , 10.75 \ 10.92 , 10.84 , 10.75 \ 10.91 , 10.84 , 10.75 \ 10.91 , 10.84 , 10.75 \ 10.91 , 10.83 , 10.75 \ 10.9 , 10.83 , 10.74 \ 10.9 , 10.83 , 10.74 \ 10.89 , 10.82 , 10.74 \ 10.89 , 10.82 , 10.74 \ 10.89 , 10.82 , 10.74 \ 10.88 , 10.81 , 10.74 \ 10.88 , 10.81 , 10.73 \ 10.87 , 10.81 , 10.73 \ 10.87 , 10.81 , 10.73 \ 10.87 , 10.8 , 10.73 \ 10.86 , 10.8 , 10.73 \ 10.86 , 10.8 , 10.72 \ 10.86 , 10.8 , 10.72 \" +ivbias10 = strtoreal(colshape(colshape(tokens(s_ivbias10), 2)[.,1], 3)) + +s_ivbias20 = +" . , . , . \ . , . , . \ 6.46 , . , . \ 6.71 , 5.57 , . \ 6.77 , 5.91 , 4.99 \ 6.76 , 6.08 , 5.35 \ 6.73 , 6.16 , 5.56 \ 6.69 , 6.20 , 5.69 \ 6.65 , 6.22 , 5.78 \ 6.61 , 6.23 , 5.83 \ 6.56 , 6.23 , 5.87 \ 6.53 , 6.22 , 5.90 \ 6.49 , 6.21 , 5.92 \ 6.45 , 6.20 , 5.93 \ 6.42 , 6.19 , 5.94 \ 6.39 , 6.17 , 5.94 \ 6.36 , 6.16 , 5.94 \ 6.33 , 6.14 , 5.94 \ 6.31 , 6.13 , 5.94 \ 6.28 , 6.11 , 5.93 \ 6.26 , 6.10 , 5.93 \ 6.24 , 6.08 , 5.92 \ 6.22 , 6.07 , 5.92 \ 6.20 , 6.06 , 5.91 \ 6.18 , 6.05 , 5.90 \ 6.16 , 6.03 , 5.90 \ 6.14 , 6.02 , 5.89 \ 6.13 , 6.01 , 5.88 \ 6.11 , 6.00 , 5.88 \ 6.09 , 5.99 , 5.87 \ 6.08 , 5.98 , 5.87 \ 6.07 , 5.97 , 5.86 \ 6.05 , 5.96 , 5.85 \ 6.04 , 5.95 , 5.85 \ 6.03 , 5.94 , 5.84 \ 6.01 , 5.93 , 5.83 \ 6.00 , 5.92 , 5.83 \ 5.99 , 5.91 , 5.82 \ 5.98 , 5.90 , 5.82 \ 5.97 , 5.89 , 5.81 \ 5.96 , 5.89 , 5.80 \ 5.95 , 5.88 , 5.80 \ 5.94 , 5.87 , 5.79 \ 5.93 , 5.86 , 5.79 \ 5.92 , 5.86 , 5.78 \ 5.91 , 5.85 , 5.78 \ 5.91 , 5.84 , 5.77 \ 5.90 , 5.83 , 5.77 \ 5.89 , 5.83 , 5.76 \ 5.88 , 5.82 , 5.76 \ 5.87 , 5.82 , 5.75 \ 5.87 , 5.81 , 5.75 \ 5.86 , 5.80 , 5.74 \ 5.85 , 5.80 , 5.74 \ 5.85 , 5.79 , 5.73 \ 5.84 , 5.79 , 5.73 \ 5.83 , 5.78 , 5.72 \ 5.83 , 5.78 , 5.72 \ 5.82 , 5.77 , 5.72 \ 5.81 , 5.77 , 5.71 \ 5.81 , 5.76 , 5.71 \ 5.80 , 5.76 , 5.70 \ 5.80 , 5.75 , 5.70 \ 5.79 , 5.75 , 5.70 \ 5.78 , 5.74 , 5.69 \ 5.78 , 5.74 , 5.69 \ 5.77 , 5.73 , 5.68 \ 5.77 , 5.73 , 5.68 \ 5.76 , 5.72 , 5.68 \ 5.76 , 5.72 , 5.67 \ 5.75 , 5.72 , 5.67 \ 5.75 , 5.71 , 5.67 \ 5.75 , 5.71 , 5.66 \ 5.74 , 5.70 , 5.66 \ 5.74 , 5.70 , 5.66 \ 5.73 , 5.70 , 5.65 \ 5.73 , 5.69 , 5.65 \ 5.72 , 5.69 , 5.65 \ 5.72 , 5.68 , 5.65 \ 5.71 , 5.68 , 5.64 \ 5.71 , 5.68 , 5.64 \ 5.71 , 5.67 , 5.64 \ 5.70 , 5.67 , 5.63 \ 5.70 , 5.67 , 5.63 \ 5.70 , 5.66 , 5.63 \ 5.69 , 5.66 , 5.62 \ 5.69 , 5.66 , 5.62 \ 5.68 , 5.65 , 5.62 \ 5.68 , 5.65 , 5.62 \ 5.68 , 5.65 , 5.61 \ 5.67 , 5.65 , 5.61 \ 5.67 , 5.64 , 5.61 \ 5.67 , 5.64 , 5.61 \ 5.66 , 5.64 , 5.60 \ 5.66 , 5.63 , 5.60 \ 5.66 , 5.63 , 5.60 \ 5.65 , 5.63 , 5.60 \ 5.65 , 5.63 , 5.59 \ 5.65 , 5.62 , 5.59 \ 5.65 , 5.62 , 5.59 \" +ivbias20 = strtoreal(colshape(colshape(tokens(s_ivbias20), 2)[.,1], 3)) + +s_ivbias30 = +" . , . , . \ . , . , . \ 5.39 , . , . \ 5.34 , 4.73 , . \ 5.25 , 4.79 , 4.30 \ 5.15 , 4.78 , 4.40 \ 5.07 , 4.76 , 4.44 \ 4.99 , 4.73 , 4.46 \ 4.92 , 4.69 , 4.46 \ 4.86 , 4.66 , 4.45 \ 4.80 , 4.62 , 4.44 \ 4.75 , 4.59 , 4.42 \ 4.71 , 4.56 , 4.41 \ 4.67 , 4.53 , 4.39 \ 4.63 , 4.50 , 4.37 \ 4.59 , 4.48 , 4.36 \ 4.56 , 4.45 , 4.34 \ 4.53 , 4.43 , 4.32 \ 4.51 , 4.41 , 4.31 \ 4.48 , 4.39 , 4.29 \ 4.46 , 4.37 , 4.28 \ 4.43 , 4.35 , 4.27 \ 4.41 , 4.33 , 4.25 \ 4.39 , 4.32 , 4.24 \ 4.37 , 4.30 , 4.23 \ 4.35 , 4.29 , 4.21 \ 4.34 , 4.27 , 4.20 \ 4.32 , 4.26 , 4.19 \ 4.31 , 4.24 , 4.18 \ 4.29 , 4.23 , 4.17 \ 4.28 , 4.22 , 4.16 \ 4.26 , 4.21 , 4.15 \ 4.25 , 4.20 , 4.14 \ 4.24 , 4.19 , 4.13 \ 4.23 , 4.18 , 4.13 \ 4.22 , 4.17 , 4.12 \ 4.20 , 4.16 , 4.11 \ 4.19 , 4.15 , 4.10 \ 4.18 , 4.14 , 4.09 \ 4.17 , 4.13 , 4.09 \ 4.16 , 4.12 , 4.08 \ 4.15 , 4.11 , 4.07 \ 4.15 , 4.11 , 4.07 \ 4.14 , 4.10 , 4.06 \ 4.13 , 4.09 , 4.05 \ 4.12 , 4.08 , 4.05 \ 4.11 , 4.08 , 4.04 \ 4.11 , 4.07 , 4.03 \ 4.10 , 4.06 , 4.03 \ 4.09 , 4.06 , 4.02 \ 4.08 , 4.05 , 4.02 \ 4.08 , 4.05 , 4.01 \ 4.07 , 4.04 , 4.01 \ 4.06 , 4.03 , 4.00 \ 4.06 , 4.03 , 4.00 \ 4.05 , 4.02 , 3.99 \ 4.05 , 4.02 , 3.99 \ 4.04 , 4.01 , 3.98 \ 4.04 , 4.01 , 3.98 \ 4.03 , 4.00 , 3.97 \ 4.02 , 4.00 , 3.97 \ 4.02 , 3.99 , 3.96 \ 4.01 , 3.99 , 3.96 \ 4.01 , 3.98 , 3.96 \ 4.00 , 3.98 , 3.95 \ 4.00 , 3.97 , 3.95 \ 3.99 , 3.97 , 3.94 \ 3.99 , 3.97 , 3.94 \ 3.99 , 3.96 , 3.94 \ 3.98 , 3.96 , 3.93 \ 3.98 , 3.95 , 3.93 \ 3.97 , 3.95 , 3.93 \ 3.97 , 3.95 , 3.92 \ 3.96 , 3.94 , 3.92 \ 3.96 , 3.94 , 3.92 \ 3.96 , 3.93 , 3.91 \ 3.95 , 3.93 , 3.91 \ 3.95 , 3.93 , 3.91 \ 3.95 , 3.92 , 3.90 \ 3.94 , 3.92 , 3.90 \ 3.94 , 3.92 , 3.90 \ 3.93 , 3.91 , 3.89 \ 3.93 , 3.91 , 3.89 \ 3.93 , 3.91 , 3.89 \ 3.92 , 3.91 , 3.89 \ 3.92 , 3.90 , 3.88 \ 3.92 , 3.90 , 3.88 \ 3.91 , 3.90 , 3.88 \ 3.91 , 3.89 , 3.87 \ 3.91 , 3.89 , 3.87 \ 3.91 , 3.89 , 3.87 \ 3.90 , 3.89 , 3.87 \ 3.90 , 3.88 , 3.86 \ 3.90 , 3.88 , 3.86 \ 3.89 , 3.88 , 3.86 \ 3.89 , 3.87 , 3.86 \ 3.89 , 3.87 , 3.85 \ 3.89 , 3.87 , 3.85 \ 3.88 , 3.87 , 3.85 \ 3.88 , 3.86 , 3.85 \" +ivbias30 = strtoreal(colshape(colshape(tokens(s_ivbias30), 2)[.,1], 3)) + + +s_ivsize10 = +"16.38 , . \ 19.93 , 7.03 \ 22.3 , 13.43 \ 24.58 , 16.87 \ 26.87 , 19.45 \ 29.18 , 21.68 \ 31.5 , 23.72 \ 33.84 , 25.64 \ 36.19 , 27.51 \ 38.54 , 29.32 \ 40.9 , 31.11 \ 43.27 , 32.88 \ 45.64 , 34.62 \ 48.01 , 36.36 \ 50.39 , 38.08 \ 52.77 , 39.8 \ 55.15 , 41.51 \ 57.53 , 43.22 \ 59.92 , 44.92 \ 62.3 , 46.62 \ 64.69 , 48.31 \ 67.07 , 50.01 \ 69.46 , 51.7 \ 71.85 , 53.39 \ 74.24 , 55.07 \ 76.62 , 56.76 \ 79.01 , 58.45 \ 81.4 , 60.13 \ 83.79 , 61.82 \ 86.17 , 63.51 \ 88.56 , 65.19 \ 90.95 , 66.88 \ 93.33 , 68.56 \ 95.72 , 70.25 \ 98.11 , 71.94 \ 100.5 , 73.62 \ 102.88 , 75.31 \ 105.27 , 76.99 \ 107.66 , 78.68 \ 110.04 , 80.37 \ 112.43 , 82.05 \ 114.82 , 83.74 \ 117.21 , 85.42 \ 119.59 , 87.11 \ 121.98 , 88.8 \ 124.37 , 90.48 \ 126.75 , 92.17 \ 129.14 , 93.85 \ 131.53 , 95.54 \ 133.92 , 97.23 \ 136.3 , 98.91 \ 138.69 , 100.6 \ 141.08 , 102.29 \ 143.47 , 103.97 \ 145.85 , 105.66 \ 148.24 , 107.34 \ 150.63 , 109.03 \ 153.01 , 110.72 \ 155.4 , 112.4 \ 157.79 , 114.09 \ 160.18 , 115.77 \ 162.56 , 117.46 \ 164.95 , 119.15 \ 167.34 , 120.83 \ 169.72 , 122.52 \ 172.11 , 124.2 \ 174.5 , 125.89 \ 176.89 , 127.58 \ 179.27 , 129.26 \ 181.66 , 130.95 \ 184.05 , 132.63 \ 186.44 , 134.32 \ 188.82 , 136.01 \ 191.21 , 137.69 \ 193.6 , 139.38 \ 195.98 , 141.07 \ 198.37 , 142.75 \ 200.76 , 144.44 \ 203.15 , 146.12 \ 205.53 , 147.81 \ 207.92 , 149.5 \ 210.31 , 151.18 \ 212.69 , 152.87 \ 215.08 , 154.55 \ 217.47 , 156.24 \ 219.86 , 157.93 \ 222.24 , 159.61 \ 224.63 , 161.3 \ 227.02 , 162.98 \ 229.41 , 164.67 \ 231.79 , 166.36 \ 234.18 , 168.04 \ 236.57 , 169.73 \ 238.95 , 171.41 \ 241.34 , 173.1 \ 243.73 , 174.79 \ 246.12 , 176.47 \ 248.5 , 178.16 \ 250.89 , 179.84 \ 253.28 , 181.53 \" +ivsize10 = strtoreal(colshape(colshape(tokens(s_ivsize10), 2)[.,1], 2)) + +s_ivsize15 = + "8.96 , . \ 11.59 , 4.58 \ 12.83 , 8.18 \ 13.96 , 9.93 \ 15.09 , 11.22 \ 16.23 , 12.33 \ 17.38 , 13.34 \ 18.54 , 14.31 \ 19.71 , 15.24 \ 20.88 , 16.16 \ 22.06 , 17.06 \ 23.24 , 17.95 \ 24.42 , 18.84 \ 25.61 , 19.72 \ 26.80 , 20.60 \ 27.99 , 21.48 \ 29.19 , 22.35 \ 30.38 , 23.22 \ 31.58 , 24.09 \ 32.77 , 24.96 \ 33.97 , 25.82 \ 35.17 , 26.69 \ 36.37 , 27.56 \ 37.57 , 28.42 \ 38.77 , 29.29 \ 39.97 , 30.15 \ 41.17 , 31.02 \ 42.37 , 31.88 \ 43.57 , 32.74 \ 44.78 , 33.61 \ 45.98 , 34.47 \ 47.18 , 35.33 \ 48.38 , 36.19 \ 49.59 , 37.06 \ 50.79 , 37.92 \ 51.99 , 38.78 \ 53.19 , 39.64 \ 54.40 , 40.50 \ 55.60 , 41.37 \ 56.80 , 42.23 \ 58.01 , 43.09 \ 59.21 , 43.95 \ 60.41 , 44.81 \ 61.61 , 45.68 \ 62.82 , 46.54 \ 64.02 , 47.40 \ 65.22 , 48.26 \ 66.42 , 49.12 \ 67.63 , 49.99 \ 68.83 , 50.85 \ 70.03 , 51.71 \ 71.24 , 52.57 \ 72.44 , 53.43 \ 73.64 , 54.30 \ 74.84 , 55.16 \ 76.05 , 56.02 \ 77.25 , 56.88 \ 78.45 , 57.74 \ 79.66 , 58.61 \ 80.86 , 59.47 \ 82.06 , 60.33 \ 83.26 , 61.19 \ 84.47 , 62.05 \ 85.67 , 62.92 \ 86.87 , 63.78 \ 88.07 , 64.64 \ 89.28 , 65.50 \ 90.48 , 66.36 \ 91.68 , 67.22 \ 92.89 , 68.09 \ 94.09 , 68.95 \ 95.29 , 69.81 \ 96.49 , 70.67 \ 97.70 , 71.53 \ 98.90 , 72.40 \ 100.10 , 73.26 \ 101.30 , 74.12 \ 102.51 , 74.98 \ 103.71 , 75.84 \ 104.91 , 76.71 \ 106.12 , 77.57 \ 107.32 , 78.43 \ 108.52 , 79.29 \ 109.72 , 80.15 \ 110.93 , 81.02 \ 112.13 , 81.88 \ 113.33 , 82.74 \ 114.53 , 83.60 \ 115.74 , 84.46 \ 116.94 , 85.33 \ 118.14 , 86.19 \ 119.35 , 87.05 \ 120.55 , 87.91 \ 121.75 , 88.77 \ 122.95 , 89.64 \ 124.16 , 90.50 \ 125.36 , 91.36 \ 126.56 , 92.22 \ 127.76 , 93.08 \ 128.97 , 93.95 \" +ivsize15 = strtoreal(colshape(colshape(tokens(s_ivsize15), 2)[.,1], 2)) + +s_ivsize20 = + " 6.66 , . \ 8.75 , 3.95 \ 9.54 , 6.40 \ 10.26 , 7.54 \ 10.98 , 8.38 \ 11.72 , 9.10 \ 12.48 , 9.77 \ 13.24 , 10.41 \ 14.01 , 11.03 \ 14.78 , 11.65 \ 15.56 , 12.25 \ 16.35 , 12.86 \ 17.14 , 13.45 \ 17.93 , 14.05 \ 18.72 , 14.65 \ 19.51 , 15.24 \ 20.31 , 15.83 \ 21.10 , 16.42 \ 21.90 , 17.02 \ 22.70 , 17.61 \ 23.50 , 18.20 \ 24.30 , 18.79 \ 25.10 , 19.38 \ 25.90 , 19.97 \ 26.71 , 20.56 \ 27.51 , 21.15 \ 28.31 , 21.74 \ 29.12 , 22.33 \ 29.92 , 22.92 \ 30.72 , 23.51 \ 31.53 , 24.10 \ 32.33 , 24.69 \ 33.14 , 25.28 \ 33.94 , 25.87 \ 34.75 , 26.46 \ 35.55 , 27.05 \ 36.36 , 27.64 \ 37.17 , 28.23 \ 37.97 , 28.82 \ 38.78 , 29.41 \ 39.58 , 30.00 \ 40.39 , 30.59 \ 41.20 , 31.18 \ 42.00 , 31.77 \ 42.81 , 32.36 \ 43.62 , 32.95 \ 44.42 , 33.54 \ 45.23 , 34.13 \ 46.03 , 34.72 \ 46.84 , 35.31 \ 47.65 , 35.90 \ 48.45 , 36.49 \ 49.26 , 37.08 \ 50.06 , 37.67 \ 50.87 , 38.26 \ 51.68 , 38.85 \ 52.48 , 39.44 \ 53.29 , 40.02 \ 54.09 , 40.61 \ 54.90 , 41.20 \ 55.71 , 41.79 \ 56.51 , 42.38 \ 57.32 , 42.97 \ 58.13 , 43.56 \ 58.93 , 44.15 \ 59.74 , 44.74 \ 60.54 , 45.33 \ 61.35 , 45.92 \ 62.16 , 46.51 \ 62.96 , 47.10 \ 63.77 , 47.69 \ 64.57 , 48.28 \ 65.38 , 48.87 \ 66.19 , 49.46 \ 66.99 , 50.05 \ 67.80 , 50.64 \ 68.60 , 51.23 \ 69.41 , 51.82 \ 70.22 , 52.41 \ 71.02 , 53.00 \ 71.83 , 53.59 \ 72.64 , 54.18 \ 73.44 , 54.77 \ 74.25 , 55.36 \ 75.05 , 55.95 \ 75.86 , 56.54 \ 76.67 , 57.13 \ 77.47 , 57.72 \ 78.28 , 58.31 \ 79.08 , 58.90 \ 79.89 , 59.49 \ 80.70 , 60.08 \ 81.50 , 60.67 \ 82.31 , 61.26 \ 83.12 , 61.85 \ 83.92 , 62.44 \ 84.73 , 63.03 \ 85.53 , 63.62 \ 86.34 , 64.21 \ 87.15 , 64.80 \" +ivsize20 = strtoreal(colshape(colshape(tokens(s_ivsize20), 2)[.,1], 2)) + +s_ivsize25 = + " 5.53 , . \ 7.25 , 3.63 \ 7.80 , 5.45 \ 8.31 , 6.28 \ 8.84 , 6.89 \ 9.38 , 7.42 \ 9.93 , 7.91 \ 10.50 , 8.39 \ 11.07 , 8.85 \ 11.65 , 9.31 \ 12.23 , 9.77 \ 12.82 , 10.22 \ 13.41 , 10.68 \ 14.00 , 11.13 \ 14.60 , 11.58 \ 15.19 , 12.03 \ 15.79 , 12.49 \ 16.39 , 12.94 \ 16.99 , 13.39 \ 17.60 , 13.84 \ 18.20 , 14.29 \ 18.80 , 14.74 \ 19.41 , 15.19 \ 20.01 , 15.64 \ 20.61 , 16.10 \ 21.22 , 16.55 \ 21.83 , 17.00 \ 22.43 , 17.45 \ 23.04 , 17.90 \ 23.65 , 18.35 \ 24.25 , 18.81 \ 24.86 , 19.26 \ 25.47 , 19.71 \ 26.08 , 20.16 \ 26.68 , 20.61 \ 27.29 , 21.06 \ 27.90 , 21.52 \ 28.51 , 21.97 \ 29.12 , 22.42 \ 29.73 , 22.87 \ 30.33 , 23.32 \ 30.94 , 23.78 \ 31.55 , 24.23 \ 32.16 , 24.68 \ 32.77 , 25.13 \ 33.38 , 25.58 \ 33.99 , 26.04 \ 34.60 , 26.49 \ 35.21 , 26.94 \ 35.82 , 27.39 \ 36.43 , 27.85 \ 37.04 , 28.30 \ 37.65 , 28.75 \ 38.25 , 29.20 \ 38.86 , 29.66 \ 39.47 , 30.11 \ 40.08 , 30.56 \ 40.69 , 31.01 \ 41.30 , 31.47 \ 41.91 , 31.92 \ 42.52 , 32.37 \ 43.13 , 32.82 \ 43.74 , 33.27 \ 44.35 , 33.73 \ 44.96 , 34.18 \ 45.57 , 34.63 \ 46.18 , 35.08 \ 46.78 , 35.54 \ 47.39 , 35.99 \ 48.00 , 36.44 \ 48.61 , 36.89 \ 49.22 , 37.35 \ 49.83 , 37.80 \ 50.44 , 38.25 \ 51.05 , 38.70 \ 51.66 , 39.16 \ 52.27 , 39.61 \ 52.88 , 40.06 \ 53.49 , 40.51 \ 54.10 , 40.96 \ 54.71 , 41.42 \ 55.32 , 41.87 \ 55.92 , 42.32 \ 56.53 , 42.77 \ 57.14 , 43.23 \ 57.75 , 43.68 \ 58.36 , 44.13 \ 58.97 , 44.58 \ 59.58 , 45.04 \ 60.19 , 45.49 \ 60.80 , 45.94 \ 61.41 , 46.39 \ 62.02 , 46.85 \ 62.63 , 47.30 \ 63.24 , 47.75 \ 63.85 , 48.20 \ 64.45 , 48.65 \ 65.06 , 49.11 \ 65.67 , 49.56 \ 66.28 , 50.01 \" +ivsize25 = strtoreal(colshape(colshape(tokens(s_ivsize25), 2)[.,1], 2)) + + +s_fullrel5 = +" 24.09 , . \ 13.46 , 15.50 \ 9.61 , 10.83 \ 7.63 , 8.53 \ 6.42 , 7.16 \ 5.61 , 6.24 \ 5.02 , 5.59 \ 4.58 , 5.10 \ 4.23 , 4.71 \ 3.96 , 4.41 \ 3.73 , 4.15 \ 3.54 , 3.94 \ 3.38 , 3.76 \ 3.24 , 3.60 \ 3.12 , 3.47 \ 3.01 , 3.35 \ 2.92 , 3.24 \ 2.84 , 3.15 \ 2.76 , 3.06 \ 2.69 , 2.98 \ 2.63 , 2.91 \ 2.58 , 2.85 \ 2.52 , 2.79 \ 2.48 , 2.73 \ 2.43 , 2.68 \ 2.39 , 2.63 \ 2.36 , 2.59 \ 2.32 , 2.55 \ 2.29 , 2.51 \ 2.26 , 2.47 \ 2.23 , 2.44 \ 2.20 , 2.41 \ 2.18 , 2.37 \ 2.16 , 2.35 \ 2.13 , 2.32 \ 2.11 , 2.29 \ 2.09 , 2.27 \ 2.07 , 2.24 \ 2.05 , 2.22 \ 2.04 , 2.20 \ 2.02 , 2.18 \ 2.00 , 2.16 \ 1.99 , 2.14 \ 1.97 , 2.12 \ 1.96 , 2.10 \ 1.94 , 2.09 \ 1.93 , 2.07 \ 1.92 , 2.05 \ 1.91 , 2.04 \ 1.89 , 2.02 \ 1.88 , 2.01 \ 1.87 , 2.00 \ 1.86 , 1.98 \ 1.85 , 1.97 \ 1.84 , 1.96 \ 1.83 , 1.95 \ 1.82 , 1.94 \ 1.81 , 1.92 \ 1.80 , 1.91 \ 1.79 , 1.90 \ 1.79 , 1.89 \ 1.78 , 1.88 \ 1.77 , 1.87 \ 1.76 , 1.87 \ 1.75 , 1.86 \ 1.75 , 1.85 \ 1.74 , 1.84 \ 1.73 , 1.83 \ 1.72 , 1.83 \ 1.72 , 1.82 \ 1.71 , 1.81 \ 1.70 , 1.80 \ 1.70 , 1.80 \ 1.69 , 1.79 \ 1.68 , 1.79 \ 1.68 , 1.78 \ 1.67 , 1.77 \ 1.67 , 1.77 \ 1.66 , 1.76 \ 1.65 , 1.76 \ 1.65 , 1.75 \ 1.64 , 1.75 \ 1.64 , 1.74 \ 1.63 , 1.74 \ 1.63 , 1.73 \ 1.62 , 1.73 \ 1.61 , 1.73 \ 1.61 , 1.72 \ 1.60 , 1.72 \ 1.60 , 1.71 \ 1.59 , 1.71 \ 1.59 , 1.71 \ 1.58 , 1.71 \ 1.58 , 1.70 \ 1.57 , 1.70 \ 1.57 , 1.70 \ 1.56 , 1.69 \ 1.56 , 1.69 \ 1.55 , 1.69 \ 1.55 , 1.69 )" +fullrel5 = strtoreal(colshape(colshape(tokens(s_fullrel5), 2)[.,1], 2)) + +s_fullrel10 = + " 19.36 , . \ 10.89 , 12.55 \ 7.90 , 8.96 \ 6.37 , 7.15 \ 5.44 , 6.07 \ 4.81 , 5.34 \ 4.35 , 4.82 \ 4.01 , 4.43 \ 3.74 , 4.12 \ 3.52 , 3.87 \ 3.34 , 3.67 \ 3.19 , 3.49 \ 3.06 , 3.35 \ 2.95 , 3.22 \ 2.85 , 3.11 \ 2.76 , 3.01 \ 2.69 , 2.92 \ 2.62 , 2.84 \ 2.56 , 2.77 \ 2.50 , 2.71 \ 2.45 , 2.65 \ 2.40 , 2.60 \ 2.36 , 2.55 \ 2.32 , 2.50 \ 2.28 , 2.46 \ 2.24 , 2.42 \ 2.21 , 2.38 \ 2.18 , 2.35 \ 2.15 , 2.31 \ 2.12 , 2.28 \ 2.10 , 2.25 \ 2.07 , 2.23 \ 2.05 , 2.20 \ 2.03 , 2.17 \ 2.01 , 2.15 \ 1.99 , 2.13 \ 1.97 , 2.11 \ 1.95 , 2.09 \ 1.93 , 2.07 \ 1.92 , 2.05 \ 1.90 , 2.03 \ 1.88 , 2.01 \ 1.87 , 2.00 \ 1.86 , 1.98 \ 1.84 , 1.96 \ 1.83 , 1.95 \ 1.82 , 1.93 \ 1.81 , 1.92 \ 1.79 , 1.91 \ 1.78 , 1.89 \ 1.77 , 1.88 \ 1.76 , 1.87 \ 1.75 , 1.86 \ 1.74 , 1.85 \ 1.73 , 1.84 \ 1.72 , 1.83 \ 1.71 , 1.82 \ 1.70 , 1.81 \ 1.70 , 1.80 \ 1.69 , 1.79 \ 1.68 , 1.78 \ 1.67 , 1.77 \ 1.67 , 1.76 \ 1.66 , 1.75 \ 1.65 , 1.75 \ 1.64 , 1.74 \ 1.64 , 1.73 \ 1.63 , 1.72 \ 1.63 , 1.72 \ 1.62 , 1.71 \ 1.61 , 1.70 \ 1.61 , 1.70 \ 1.60 , 1.69 \ 1.60 , 1.68 \ 1.59 , 1.68 \ 1.59 , 1.67 \ 1.58 , 1.67 \ 1.58 , 1.66 \ 1.57 , 1.66 \ 1.57 , 1.65 \ 1.56 , 1.65 \ 1.56 , 1.64 \ 1.56 , 1.64 \ 1.55 , 1.63 \ 1.55 , 1.63 \ 1.54 , 1.62 \ 1.54 , 1.62 \ 1.54 , 1.62 \ 1.53 , 1.61 \ 1.53 , 1.61 \ 1.53 , 1.61 \ 1.52 , 1.60 \ 1.52 , 1.60 \ 1.52 , 1.60 \ 1.52 , 1.59 \ 1.51 , 1.59 \ 1.51 , 1.59 \ 1.51 , 1.59 \ 1.51 , 1.58 \ 1.50 , 1.58 )" +fullrel10 = strtoreal(colshape(colshape(tokens(s_fullrel10), 2)[.,1], 2)) + +s_fullrel20 = +" 15.64 , . \ 9.00 , 9.72 \ 6.61 , 7.18 \ 5.38 , 5.85 \ 4.62 , 5.04 \ 4.11 , 4.48 \ 3.75 , 4.08 \ 3.47 , 3.77 \ 3.25 , 3.53 \ 3.07 , 3.33 \ 2.92 , 3.17 \ 2.80 , 3.04 \ 2.70 , 2.92 \ 2.61 , 2.82 \ 2.53 , 2.73 \ 2.46 , 2.65 \ 2.39 , 2.58 \ 2.34 , 2.52 \ 2.29 , 2.46 \ 2.24 , 2.41 \ 2.20 , 2.36 \ 2.16 , 2.32 \ 2.13 , 2.28 \ 2.10 , 2.24 \ 2.06 , 2.21 \ 2.04 , 2.18 \ 2.01 , 2.15 \ 1.99 , 2.12 \ 1.96 , 2.09 \ 1.94 , 2.07 \ 1.92 , 2.04 \ 1.90 , 2.02 \ 1.88 , 2.00 \ 1.87 , 1.98 \ 1.85 , 1.96 \ 1.83 , 1.94 \ 1.82 , 1.93 \ 1.80 , 1.91 \ 1.79 , 1.89 \ 1.78 , 1.88 \ 1.76 , 1.86 \ 1.75 , 1.85 \ 1.74 , 1.84 \ 1.73 , 1.82 \ 1.72 , 1.81 \ 1.71 , 1.80 \ 1.70 , 1.79 \ 1.69 , 1.78 \ 1.68 , 1.77 \ 1.67 , 1.76 \ 1.66 , 1.75 \ 1.65 , 1.74 \ 1.65 , 1.73 \ 1.64 , 1.72 \ 1.63 , 1.71 \ 1.62 , 1.70 \ 1.62 , 1.69 \ 1.61 , 1.68 \ 1.60 , 1.68 \ 1.60 , 1.67 \ 1.59 , 1.66 \ 1.58 , 1.65 \ 1.58 , 1.65 \ 1.57 , 1.64 \ 1.57 , 1.63 \ 1.56 , 1.63 \ 1.56 , 1.62 \ 1.55 , 1.62 \ 1.55 , 1.61 \ 1.54 , 1.60 \ 1.54 , 1.60 \ 1.53 , 1.59 \ 1.53 , 1.59 \ 1.52 , 1.58 \ 1.52 , 1.58 \ 1.51 , 1.57 \ 1.51 , 1.57 \ 1.51 , 1.56 \ 1.50 , 1.56 \ 1.50 , 1.56 \ 1.49 , 1.55 \ 1.49 , 1.55 \ 1.49 , 1.54 \ 1.48 , 1.54 \ 1.48 , 1.54 \ 1.48 , 1.53 \ 1.47 , 1.53 \ 1.47 , 1.53 \ 1.47 , 1.52 \ 1.46 , 1.52 \ 1.46 , 1.52 \ 1.46 , 1.51 \ 1.46 , 1.51 \ 1.45 , 1.51 \ 1.45 , 1.50 \ 1.45 , 1.50 \ 1.45 , 1.50 \ 1.44 , 1.50 \ 1.44 , 1.49 \ 1.44 , 1.49 )" +fullrel20 = strtoreal(colshape(colshape(tokens(s_fullrel20), 2)[.,1], 2)) + +s_fullrel30 = + " 12.71 , . \ 7.49 , 8.03 \ 5.60 , 6.15 \ 4.63 , 5.10 \ 4.03 , 4.44 \ 3.63 , 3.98 \ 3.33 , 3.65 \ 3.11 , 3.39 \ 2.93 , 3.19 \ 2.79 , 3.02 \ 2.67 , 2.88 \ 2.57 , 2.77 \ 2.48 , 2.67 \ 2.41 , 2.58 \ 2.34 , 2.51 \ 2.28 , 2.44 \ 2.23 , 2.38 \ 2.18 , 2.33 \ 2.14 , 2.28 \ 2.10 , 2.23 \ 2.07 , 2.19 \ 2.04 , 2.16 \ 2.01 , 2.12 \ 1.98 , 2.09 \ 1.95 , 2.06 \ 1.93 , 2.03 \ 1.90 , 2.01 \ 1.88 , 1.98 \ 1.86 , 1.96 \ 1.84 , 1.94 \ 1.83 , 1.92 \ 1.81 , 1.90 \ 1.79 , 1.88 \ 1.78 , 1.87 \ 1.76 , 1.85 \ 1.75 , 1.83 \ 1.74 , 1.82 \ 1.72 , 1.80 \ 1.71 , 1.79 \ 1.70 , 1.78 \ 1.69 , 1.77 \ 1.68 , 1.75 \ 1.67 , 1.74 \ 1.66 , 1.73 \ 1.65 , 1.72 \ 1.64 , 1.71 \ 1.63 , 1.70 \ 1.62 , 1.69 \ 1.61 , 1.68 \ 1.60 , 1.67 \ 1.60 , 1.66 \ 1.59 , 1.66 \ 1.58 , 1.65 \ 1.57 , 1.64 \ 1.57 , 1.63 \ 1.56 , 1.63 \ 1.55 , 1.62 \ 1.55 , 1.61 \ 1.54 , 1.61 \ 1.54 , 1.60 \ 1.53 , 1.59 \ 1.53 , 1.59 \ 1.52 , 1.58 \ 1.51 , 1.57 \ 1.51 , 1.57 \ 1.50 , 1.56 \ 1.50 , 1.56 \ 1.50 , 1.55 \ 1.49 , 1.55 \ 1.49 , 1.54 \ 1.48 , 1.54 \ 1.48 , 1.53 \ 1.47 , 1.53 \ 1.47 , 1.52 \ 1.47 , 1.52 \ 1.46 , 1.52 \ 1.46 , 1.51 \ 1.46 , 1.51 \ 1.45 , 1.50 \ 1.45 , 1.50 \ 1.45 , 1.50 \ 1.44 , 1.49 \ 1.44 , 1.49 \ 1.44 , 1.48 \ 1.43 , 1.48 \ 1.43 , 1.48 \ 1.43 , 1.47 \ 1.43 , 1.47 \ 1.42 , 1.47 \ 1.42 , 1.47 \ 1.42 , 1.46 \ 1.42 , 1.46 \ 1.41 , 1.46 \ 1.41 , 1.45 \ 1.41 , 1.45 \ 1.41 , 1.45 \ 1.41 , 1.45 \ 1.40 , 1.44 \ 1.40 , 1.44 \ 1.40 , 1.44 \" +fullrel30 = strtoreal(colshape(colshape(tokens(s_fullrel30), 2)[.,1], 2)) + + +s_fullmax5 = + " 23.81 , . \ 12.38 , 14.19 \ 8.66 , 10.00 \ 6.81 , 7.88 \ 5.71 , 6.60 \ 4.98 , 5.74 \ 4.45 , 5.13 \ 4.06 , 4.66 \ 3.76 , 4.30 \ 3.51 , 4.01 \ 3.31 , 3.77 \ 3.15 , 3.57 \ 3.00 , 3.41 \ 2.88 , 3.26 \ 2.78 , 3.13 \ 2.69 , 3.02 \ 2.61 , 2.92 \ 2.53 , 2.84 \ 2.47 , 2.76 \ 2.41 , 2.69 \ 2.36 , 2.62 \ 2.31 , 2.56 \ 2.27 , 2.51 \ 2.23 , 2.46 \ 2.19 , 2.42 \ 2.15 , 2.37 \ 2.12 , 2.33 \ 2.09 , 2.30 \ 2.07 , 2.26 \ 2.04 , 2.23 \ 2.02 , 2.20 \ 1.99 , 2.17 \ 1.97 , 2.14 \ 1.95 , 2.12 \ 1.93 , 2.10 \ 1.91 , 2.07 \ 1.90 , 2.05 \ 1.88 , 2.03 \ 1.87 , 2.01 \ 1.85 , 1.99 \ 1.84 , 1.98 \ 1.82 , 1.96 \ 1.81 , 1.94 \ 1.80 , 1.93 \ 1.79 , 1.91 \ 1.78 , 1.90 \ 1.76 , 1.88 \ 1.75 , 1.87 \ 1.74 , 1.86 \ 1.73 , 1.85 \ 1.73 , 1.83 \ 1.72 , 1.82 \ 1.71 , 1.81 \ 1.70 , 1.80 \ 1.69 , 1.79 \ 1.68 , 1.78 \ 1.68 , 1.77 \ 1.67 , 1.76 \ 1.66 , 1.75 \ 1.65 , 1.74 \ 1.65 , 1.74 \ 1.64 , 1.73 \ 1.63 , 1.72 \ 1.63 , 1.71 \ 1.62 , 1.70 \ 1.62 , 1.70 \ 1.61 , 1.69 \ 1.60 , 1.68 \ 1.60 , 1.68 \ 1.59 , 1.67 \ 1.59 , 1.66 \ 1.58 , 1.66 \ 1.58 , 1.65 \ 1.57 , 1.64 \ 1.57 , 1.64 \ 1.56 , 1.63 \ 1.56 , 1.63 \ 1.55 , 1.62 \ 1.55 , 1.62 \ 1.54 , 1.61 \ 1.54 , 1.61 \ 1.53 , 1.60 \ 1.53 , 1.60 \ 1.53 , 1.59 \ 1.52 , 1.59 \ 1.52 , 1.58 \ 1.51 , 1.58 \ 1.51 , 1.57 \ 1.50 , 1.57 \ 1.50 , 1.57 \ 1.50 , 1.56 \ 1.49 , 1.56 \ 1.49 , 1.55 \ 1.49 , 1.55 \ 1.48 , 1.55 \ 1.48 , 1.54 \ 1.47 , 1.54 \ 1.47 , 1.54 \ 1.47 , 1.53 \ 1.46 , 1.53 )" +fullmax5 = strtoreal(colshape(colshape(tokens(s_fullmax5), 2)[.,1], 2)) + +s_fullmax10 = +" 19.40 , . \ 10.14 , 11.92 \ 7.18 , 8.39 \ 5.72 , 6.64 \ 4.85 , 5.60 \ 4.27 , 4.90 \ 3.86 , 4.40 \ 3.55 , 4.03 \ 3.31 , 3.73 \ 3.12 , 3.50 \ 2.96 , 3.31 \ 2.83 , 3.15 \ 2.71 , 3.01 \ 2.62 , 2.89 \ 2.53 , 2.79 \ 2.46 , 2.70 \ 2.39 , 2.62 \ 2.33 , 2.55 \ 2.28 , 2.49 \ 2.23 , 2.43 \ 2.19 , 2.38 \ 2.15 , 2.33 \ 2.11 , 2.29 \ 2.08 , 2.25 \ 2.05 , 2.21 \ 2.02 , 2.18 \ 1.99 , 2.14 \ 1.97 , 2.11 \ 1.94 , 2.08 \ 1.92 , 2.06 \ 1.90 , 2.03 \ 1.88 , 2.01 \ 1.86 , 1.99 \ 1.85 , 1.97 \ 1.83 , 1.95 \ 1.81 , 1.93 \ 1.80 , 1.91 \ 1.79 , 1.89 \ 1.77 , 1.88 \ 1.76 , 1.86 \ 1.75 , 1.85 \ 1.74 , 1.83 \ 1.72 , 1.82 \ 1.71 , 1.81 \ 1.70 , 1.80 \ 1.69 , 1.78 \ 1.68 , 1.77 \ 1.67 , 1.76 \ 1.66 , 1.75 \ 1.66 , 1.74 \ 1.65 , 1.73 \ 1.64 , 1.72 \ 1.63 , 1.71 \ 1.62 , 1.70 \ 1.62 , 1.69 \ 1.61 , 1.69 \ 1.60 , 1.68 \ 1.60 , 1.67 \ 1.59 , 1.66 \ 1.58 , 1.65 \ 1.58 , 1.65 \ 1.57 , 1.64 \ 1.57 , 1.63 \ 1.56 , 1.63 \ 1.55 , 1.62 \ 1.55 , 1.61 \ 1.54 , 1.61 \ 1.54 , 1.60 \ 1.53 , 1.60 \ 1.53 , 1.59 \ 1.52 , 1.59 \ 1.52 , 1.58 \ 1.52 , 1.58 \ 1.51 , 1.57 \ 1.51 , 1.57 \ 1.50 , 1.56 \ 1.50 , 1.56 \ 1.49 , 1.55 \ 1.49 , 1.55 \ 1.49 , 1.54 \ 1.48 , 1.54 \ 1.48 , 1.53 \ 1.48 , 1.53 \ 1.47 , 1.53 \ 1.47 , 1.52 \ 1.46 , 1.52 \ 1.46 , 1.51 \ 1.46 , 1.51 \ 1.45 , 1.51 \ 1.45 , 1.50 \ 1.45 , 1.50 \ 1.44 , 1.50 \ 1.44 , 1.49 \ 1.44 , 1.49 \ 1.44 , 1.49 \ 1.43 , 1.48 \ 1.43 , 1.48 \ 1.43 , 1.48 \ 1.42 , 1.48 \ 1.42 , 1.47 )" +fullmax10 = strtoreal(colshape(colshape(tokens(s_fullmax10), 2)[.,1], 2)) + +s_fullmax20 = +" 15.39 , . \ 8.16 , 9.41 \ 5.87 , 6.79 \ 4.75 , 5.47 \ 4.08 , 4.66 \ 3.64 , 4.13 \ 3.32 , 3.74 \ 3.08 , 3.45 \ 2.89 , 3.22 \ 2.74 , 3.03 \ 2.62 , 2.88 \ 2.51 , 2.76 \ 2.42 , 2.65 \ 2.35 , 2.56 \ 2.28 , 2.48 \ 2.22 , 2.40 \ 2.17 , 2.34 \ 2.12 , 2.28 \ 2.08 , 2.23 \ 2.04 , 2.19 \ 2.01 , 2.15 \ 1.98 , 2.11 \ 1.95 , 2.07 \ 1.92 , 2.04 \ 1.89 , 2.01 \ 1.87 , 1.98 \ 1.85 , 1.96 \ 1.83 , 1.93 \ 1.81 , 1.91 \ 1.79 , 1.89 \ 1.77 , 1.87 \ 1.76 , 1.85 \ 1.74 , 1.83 \ 1.73 , 1.82 \ 1.72 , 1.80 \ 1.70 , 1.79 \ 1.69 , 1.77 \ 1.68 , 1.76 \ 1.67 , 1.74 \ 1.66 , 1.73 \ 1.65 , 1.72 \ 1.64 , 1.71 \ 1.63 , 1.70 \ 1.62 , 1.69 \ 1.61 , 1.68 \ 1.60 , 1.67 \ 1.59 , 1.66 \ 1.58 , 1.65 \ 1.58 , 1.64 \ 1.57 , 1.63 \ 1.56 , 1.62 \ 1.56 , 1.62 \ 1.55 , 1.61 \ 1.54 , 1.60 \ 1.54 , 1.59 \ 1.53 , 1.59 \ 1.52 , 1.58 \ 1.52 , 1.57 \ 1.51 , 1.57 \ 1.51 , 1.56 \ 1.50 , 1.56 \ 1.50 , 1.55 \ 1.49 , 1.54 \ 1.49 , 1.54 \ 1.48 , 1.53 \ 1.48 , 1.53 \ 1.47 , 1.52 \ 1.47 , 1.52 \ 1.47 , 1.51 \ 1.46 , 1.51 \ 1.46 , 1.51 \ 1.45 , 1.50 \ 1.45 , 1.50 \ 1.45 , 1.49 \ 1.44 , 1.49 \ 1.44 , 1.48 \ 1.44 , 1.48 \ 1.43 , 1.48 \ 1.43 , 1.47 \ 1.43 , 1.47 \ 1.42 , 1.46 \ 1.42 , 1.46 \ 1.42 , 1.46 \ 1.41 , 1.45 \ 1.41 , 1.45 \ 1.41 , 1.45 \ 1.40 , 1.44 \ 1.40 , 1.44 \ 1.40 , 1.44 \ 1.40 , 1.44 \ 1.39 , 1.43 \ 1.39 , 1.43 \ 1.39 , 1.43 \ 1.39 , 1.42 \ 1.38 , 1.42 \ 1.38 , 1.42 \ 1.38 , 1.42 \ 1.38 , 1.41 \ 1.37 , 1.41 \ 1.37 , 1.41 )" +fullmax20 = strtoreal(colshape(colshape(tokens(s_fullmax20), 2)[.,1], 2)) + +s_fullmax30 = + " 12.76 , . \ 6.97 , 8.01 \ 5.11 , 5.88 \ 4.19 , 4.78 \ 3.64 , 4.12 \ 3.27 , 3.67 \ 3.00 , 3.35 \ 2.80 , 3.10 \ 2.64 , 2.91 \ 2.52 , 2.76 \ 2.41 , 2.63 \ 2.33 , 2.52 \ 2.25 , 2.43 \ 2.19 , 2.35 \ 2.13 , 2.29 \ 2.08 , 2.22 \ 2.04 , 2.17 \ 2.00 , 2.12 \ 1.96 , 2.08 \ 1.93 , 2.04 \ 1.90 , 2.01 \ 1.87 , 1.97 \ 1.84 , 1.94 \ 1.82 , 1.92 \ 1.80 , 1.89 \ 1.78 , 1.87 \ 1.76 , 1.84 \ 1.74 , 1.82 \ 1.73 , 1.80 \ 1.71 , 1.79 \ 1.70 , 1.77 \ 1.68 , 1.75 \ 1.67 , 1.74 \ 1.66 , 1.72 \ 1.64 , 1.71 \ 1.63 , 1.70 \ 1.62 , 1.68 \ 1.61 , 1.67 \ 1.60 , 1.66 \ 1.59 , 1.65 \ 1.58 , 1.64 \ 1.57 , 1.63 \ 1.57 , 1.62 \ 1.56 , 1.61 \ 1.55 , 1.60 \ 1.54 , 1.59 \ 1.54 , 1.59 \ 1.53 , 1.58 \ 1.52 , 1.57 \ 1.52 , 1.56 \ 1.51 , 1.56 \ 1.50 , 1.55 \ 1.50 , 1.54 \ 1.49 , 1.54 \ 1.49 , 1.53 \ 1.48 , 1.53 \ 1.48 , 1.52 \ 1.47 , 1.51 \ 1.47 , 1.51 \ 1.46 , 1.50 \ 1.46 , 1.50 \ 1.45 , 1.49 \ 1.45 , 1.49 \ 1.44 , 1.48 \ 1.44 , 1.48 \ 1.44 , 1.47 \ 1.43 , 1.47 \ 1.43 , 1.47 \ 1.42 , 1.46 \ 1.42 , 1.46 \ 1.42 , 1.45 \ 1.41 , 1.45 \ 1.41 , 1.45 \ 1.41 , 1.44 \ 1.40 , 1.44 \ 1.40 , 1.44 \ 1.40 , 1.43 \ 1.39 , 1.43 \ 1.39 , 1.43 \ 1.39 , 1.42 \ 1.39 , 1.42 \ 1.38 , 1.42 \ 1.38 , 1.41 \ 1.38 , 1.41 \ 1.37 , 1.41 \ 1.37 , 1.40 \ 1.37 , 1.40 \ 1.37 , 1.40 \ 1.36 , 1.40 \ 1.36 , 1.39 \ 1.36 , 1.39 \ 1.36 , 1.39 \ 1.36 , 1.38 \ 1.35 , 1.38 \ 1.35 , 1.38 \ 1.35 , 1.38 \ 1.35 , 1.37 \ 1.34 , 1.37 \ 1.34 , 1.37 \ 1.34 , 1.37 )" +fullmax30 = strtoreal(colshape(colshape(tokens(s_fullmax30), 2)[.,1], 2)) + + +s_limlsize10 = + " 16.38 , . \ 8.68 , 7.03 \ 6.46 , 5.44 \ 5.44 , 4.72 \ 4.84 , 4.32 \ 4.45 , 4.06 \ 4.18 , 3.90 \ 3.97 , 3.78 \ 3.81 , 3.70 \ 3.68 , 3.64 \ 3.58 , 3.60 \ 3.50 , 3.58 \ 3.42 , 3.56 \ 3.36 , 3.55 \ 3.31 , 3.54 \ 3.27 , 3.55 \ 3.24 , 3.55 \ 3.20 , 3.56 \ 3.18 , 3.57 \ 3.21 , 3.58 \ 3.39 , 3.59 \ 3.57 , 3.60 \ 3.68 , 3.62 \ 3.75 , 3.64 \ 3.79 , 3.65 \ 3.82 , 3.67 \ 3.85 , 3.74 \ 3.86 , 3.87 \ 3.87 , 4.02 \ 3.88 , 4.12 \ 3.89 , 4.19 \ 3.89 , 4.24 \ 3.90 , 4.27 \ 3.90 , 4.31 \ 3.90 , 4.33 \ 3.90 , 4.36 \ 3.90 , 4.38 \ 3.90 , 4.39 \ 3.90 , 4.41 \ 3.90 , 4.43 \ 3.90 , 4.44 \ 3.90 , 4.45 \ 3.90 , 4.47 \ 3.90 , 4.48 \ 3.90 , 4.49 \ 3.90 , 4.50 \ 3.90 , 4.51 \ 3.90 , 4.52 \ 3.90 , 4.53 \ 3.90 , 4.54 \ 3.90 , 4.55 \ 3.90 , 4.56 \ 3.90 , 4.56 \ 3.90 , 4.57 \ 3.90 , 4.58 \ 3.90 , 4.59 \ 3.90 , 4.59 \ 3.90 , 4.60 \ 3.90 , 4.61 \ 3.90 , 4.61 \ 3.90 , 4.62 \ 3.90 , 4.62 \ 3.90 , 4.63 \ 3.90 , 4.63 \ 3.89 , 4.64 \ 3.89 , 4.64 \ 3.89 , 4.64 \ 3.89 , 4.65 \ 3.89 , 4.65 \ 3.89 , 4.65 \ 3.89 , 4.66 \ 3.89 , 4.66 \ 3.89 , 4.66 \ 3.89 , 4.66 \ 3.88 , 4.66 \ 3.88 , 4.66 \ 3.88 , 4.66 \ 3.88 , 4.66 \ 3.88 , 4.66 \ 3.88 , 4.66 \ 3.88 , 4.66 \ 3.87 , 4.66 \ 3.87 , 4.66 \ 3.87 , 4.66 \ 3.87 , 4.66 \ 3.87 , 4.66 \ 3.86 , 4.65 \ 3.86 , 4.65 \ 3.86 , 4.65 \ 3.86 , 4.64 \ 3.85 , 4.64 \ 3.85 , 4.64 \ 3.85 , 4.63 \ 3.85 , 4.63 \ 3.84 , 4.62 \ 3.84 , 4.62 \ 3.84 , 4.61 \ 3.84 , 4.60 \ 3.83 , 4.60 \ 3.83 , 4.59 )" +limlsize10 = strtoreal(colshape(colshape(tokens(s_limlsize10), 2)[.,1], 2)) + +s_limlsize15 = + " 8.96 , . \ 5.33 , 4.58 \ 4.36 , 3.81 \ 3.87 , 3.39 \ 3.56 , 3.13 \ 3.34 , 2.95 \ 3.18 , 2.83 \ 3.04 , 2.73 \ 2.93 , 2.66 \ 2.84 , 2.60 \ 2.76 , 2.55 \ 2.69 , 2.52 \ 2.63 , 2.48 \ 2.57 , 2.46 \ 2.52 , 2.44 \ 2.48 , 2.42 \ 2.44 , 2.41 \ 2.41 , 2.40 \ 2.37 , 2.39 \ 2.34 , 2.38 \ 2.32 , 2.38 \ 2.29 , 2.37 \ 2.27 , 2.37 \ 2.25 , 2.37 \ 2.24 , 2.37 \ 2.22 , 2.38 \ 2.21 , 2.38 \ 2.20 , 2.38 \ 2.19 , 2.39 \ 2.18 , 2.39 \ 2.19 , 2.40 \ 2.22 , 2.41 \ 2.33 , 2.42 \ 2.40 , 2.42 \ 2.45 , 2.43 \ 2.48 , 2.44 \ 2.50 , 2.45 \ 2.52 , 2.54 \ 2.53 , 2.55 \ 2.54 , 2.66 \ 2.55 , 2.73 \ 2.56 , 2.78 \ 2.57 , 2.82 \ 2.57 , 2.85 \ 2.58 , 2.87 \ 2.58 , 2.89 \ 2.58 , 2.91 \ 2.59 , 2.92 \ 2.59 , 2.93 \ 2.59 , 2.94 \ 2.59 , 2.95 \ 2.59 , 2.96 \ 2.60 , 2.97 \ 2.60 , 2.98 \ 2.60 , 2.98 \ 2.60 , 2.99 \ 2.60 , 2.99 \ 2.60 , 3.00 \ 2.60 , 3.00 \ 2.60 , 3.01 \ 2.60 , 3.01 \ 2.60 , 3.02 \ 2.61 , 3.02 \ 2.61 , 3.02 \ 2.61 , 3.03 \ 2.61 , 3.03 \ 2.61 , 3.03 \ 2.61 , 3.03 \ 2.61 , 3.04 \ 2.61 , 3.04 \ 2.61 , 3.04 \ 2.60 , 3.04 \ 2.60 , 3.04 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.60 , 3.05 \ 2.59 , 3.05 \ 2.59 , 3.05 \ 2.59 , 3.05 \ 2.59 , 3.05 \ 2.59 , 3.05 \ 2.59 , 3.04 \ 2.58 , 3.04 \ 2.58 , 3.04 \ 2.58 , 3.04 \ 2.58 , 3.04 \ 2.58 , 3.03 \ 2.57 , 3.03 \ 2.57 , 3.03 \ 2.57 , 3.03 \ 2.57 , 3.02 \ 2.56 , 3.02 \ 2.56 , 3.02 )" +limlsize15 = strtoreal(colshape(colshape(tokens(s_limlsize15), 2)[.,1], 2)) + +s_limlsize20 = + " 6.66 , . \ 4.42 , 3.95 \ 3.69 , 3.32 \ 3.30 , 2.99 \ 3.05 , 2.78 \ 2.87 , 2.63 \ 2.73 , 2.52 \ 2.63 , 2.43 \ 2.54 , 2.36 \ 2.46 , 2.30 \ 2.40 , 2.25 \ 2.34 , 2.21 \ 2.29 , 2.17 \ 2.25 , 2.14 \ 2.21 , 2.11 \ 2.18 , 2.09 \ 2.14 , 2.07 \ 2.11 , 2.05 \ 2.09 , 2.03 \ 2.06 , 2.02 \ 2.04 , 2.01 \ 2.02 , 1.99 \ 2.00 , 1.98 \ 1.98 , 1.98 \ 1.96 , 1.97 \ 1.95 , 1.96 \ 1.93 , 1.96 \ 1.92 , 1.95 \ 1.90 , 1.95 \ 1.89 , 1.95 \ 1.88 , 1.94 \ 1.87 , 1.94 \ 1.86 , 1.94 \ 1.85 , 1.94 \ 1.84 , 1.94 \ 1.83 , 1.94 \ 1.82 , 1.94 \ 1.81 , 1.95 \ 1.81 , 1.95 \ 1.80 , 1.95 \ 1.79 , 1.95 \ 1.79 , 1.96 \ 1.78 , 1.96 \ 1.78 , 1.97 \ 1.80 , 1.97 \ 1.87 , 1.98 \ 1.92 , 1.98 \ 1.95 , 1.99 \ 1.97 , 2.00 \ 1.99 , 2.00 \ 2.00 , 2.01 \ 2.01 , 2.09 \ 2.02 , 2.11 \ 2.03 , 2.18 \ 2.04 , 2.23 \ 2.04 , 2.27 \ 2.05 , 2.29 \ 2.05 , 2.31 \ 2.06 , 2.33 \ 2.06 , 2.34 \ 2.07 , 2.35 \ 2.07 , 2.36 \ 2.07 , 2.37 \ 2.08 , 2.38 \ 2.08 , 2.39 \ 2.08 , 2.39 \ 2.08 , 2.40 \ 2.09 , 2.40 \ 2.09 , 2.41 \ 2.09 , 2.41 \ 2.09 , 2.41 \ 2.09 , 2.42 \ 2.09 , 2.42 \ 2.09 , 2.42 \ 2.09 , 2.43 \ 2.10 , 2.43 \ 2.10 , 2.43 \ 2.10 , 2.43 \ 2.10 , 2.44 \ 2.10 , 2.44 \ 2.10 , 2.44 \ 2.10 , 2.44 \ 2.10 , 2.44 \ 2.09 , 2.44 \ 2.09 , 2.44 \ 2.09 , 2.45 \ 2.09 , 2.45 \ 2.09 , 2.45 \ 2.09 , 2.45 \ 2.09 , 2.45 \ 2.09 , 2.45 \ 2.09 , 2.45 \ 2.08 , 2.45 \ 2.08 , 2.45 \ 2.08 , 2.45 \ 2.08 , 2.45 \ 2.08 , 2.45 \ 2.07 , 2.44 \ 2.07 , 2.44 \ 2.07 , 2.44 )" +limlsize20 = strtoreal(colshape(colshape(tokens(s_limlsize20), 2)[.,1], 2)) + +s_limlsize25 = + " 5.53 , . \ 3.92 , 3.63 \ 3.32 , 3.09 \ 2.98 , 2.79 \ 2.77 , 2.60 \ 2.61 , 2.46 \ 2.49 , 2.35 \ 2.39 , 2.27 \ 2.32 , 2.20 \ 2.25 , 2.14 \ 2.19 , 2.09 \ 2.14 , 2.05 \ 2.10 , 2.02 \ 2.06 , 1.99 \ 2.03 , 1.96 \ 2.00 , 1.93 \ 1.97 , 1.91 \ 1.94 , 1.89 \ 1.92 , 1.87 \ 1.90 , 1.86 \ 1.88 , 1.84 \ 1.86 , 1.83 \ 1.84 , 1.81 \ 1.83 , 1.80 \ 1.81 , 1.79 \ 1.80 , 1.78 \ 1.78 , 1.77 \ 1.77 , 1.77 \ 1.76 , 1.76 \ 1.75 , 1.75 \ 1.74 , 1.75 \ 1.73 , 1.74 \ 1.72 , 1.73 \ 1.71 , 1.73 \ 1.70 , 1.73 \ 1.69 , 1.72 \ 1.68 , 1.72 \ 1.67 , 1.71 \ 1.67 , 1.71 \ 1.66 , 1.71 \ 1.65 , 1.71 \ 1.65 , 1.71 \ 1.64 , 1.70 \ 1.63 , 1.70 \ 1.63 , 1.70 \ 1.62 , 1.70 \ 1.62 , 1.70 \ 1.61 , 1.70 \ 1.61 , 1.70 \ 1.61 , 1.70 \ 1.60 , 1.70 \ 1.60 , 1.70 \ 1.59 , 1.70 \ 1.59 , 1.70 \ 1.59 , 1.70 \ 1.58 , 1.70 \ 1.58 , 1.71 \ 1.58 , 1.71 \ 1.57 , 1.71 \ 1.59 , 1.71 \ 1.60 , 1.71 \ 1.63 , 1.72 \ 1.65 , 1.72 \ 1.67 , 1.72 \ 1.69 , 1.72 \ 1.70 , 1.76 \ 1.71 , 1.81 \ 1.72 , 1.87 \ 1.73 , 1.91 \ 1.74 , 1.94 \ 1.74 , 1.96 \ 1.75 , 1.98 \ 1.75 , 1.99 \ 1.76 , 2.01 \ 1.76 , 2.02 \ 1.77 , 2.03 \ 1.77 , 2.04 \ 1.78 , 2.04 \ 1.78 , 2.05 \ 1.78 , 2.06 \ 1.79 , 2.06 \ 1.79 , 2.07 \ 1.79 , 2.07 \ 1.79 , 2.08 \ 1.80 , 2.08 \ 1.80 , 2.09 \ 1.80 , 2.09 \ 1.80 , 2.09 \ 1.80 , 2.09 \ 1.80 , 2.10 \ 1.80 , 2.10 \ 1.80 , 2.10 \ 1.80 , 2.10 \ 1.80 , 2.10 \ 1.80 , 2.11 \ 1.80 , 2.11 \ 1.80 , 2.11 \ 1.80 , 2.11 \ 1.80 , 2.11 \ 1.80 , 2.11 )" +limlsize25 = strtoreal(colshape(colshape(tokens(s_limlsize25), 2)[.,1], 2)) + +if (choice == 1) { + st_matrix(temp, ivbias5) +} else if (choice == 2) { + st_matrix(temp, ivbias10) +} else if (choice == 3) { + st_matrix(temp, ivbias20) +} else if (choice == 4) { + st_matrix(temp, ivbias30) +} else if (choice == 5) { + st_matrix(temp, ivsize10) +} else if (choice == 6) { + st_matrix(temp, ivsize15) +} else if (choice == 7) { + st_matrix(temp, ivsize20) +} else if (choice == 8) { + st_matrix(temp, ivsize25) +} else if (choice == 9) { + st_matrix(temp, fullrel5) +} else if (choice == 10) { + st_matrix(temp, fullrel10) +} else if (choice == 11) { + st_matrix(temp, fullrel20) +} else if (choice == 12) { + st_matrix(temp, fullrel30) +} else if (choice == 13) { + st_matrix(temp, fullmax5) +} else if (choice == 14) { + st_matrix(temp, fullmax10) +} else if (choice == 15) { + st_matrix(temp, fullmax20) +} else if (choice == 16) { + st_matrix(temp, fullmax30) +} else if (choice == 17) { + st_matrix(temp, limlsize10) +} else if (choice == 18) { + st_matrix(temp, limlsize15) +} else if (choice == 19) { + st_matrix(temp, limlsize20) +} else if (choice == 20) { + st_matrix(temp, limlsize25) +} +} // end of program cdsy + +end + + +****************************************** END *************************************** +*********************************** livreg2.mlib CODE ******************************** + +***************************************** START ************************************** +*********************************** ranktest.ado CODE ******************************** +* Code from: +* ranktest 1.3.04 24aug2014 +* author mes, based on code by fk +* Imported into ivreg210 so that ivreg210 is free-standing. +* See end of file for version notes. + +program define ivreg210_ranktest, rclass sortpreserve + version 9.2 + local lversion 01.3.04 + + if substr("`1'",1,1)== "," { + if "`2'"=="version" { + di in ye "`lversion'" + return local version `lversion' + exit + } + else { +di as err "invalid syntax" + exit 198 + } + } + +* If varlist 1 or varlist 2 have a single element, parentheses optional + + if substr("`1'",1,1)=="(" { + GetVarlist `0' + local y `s(varlist)' + local K : word count `y' + local 0 `"`s(rest)'"' + sret clear + } + else { + local y `1' + local K 1 + mac shift 1 + local 0 `"`*'"' + } + + if substr("`1'",1,1)=="(" { + GetVarlist `0' + local z `s(varlist)' + local L : word count `z' + local 0 `"`s(rest)'"' + sret clear + } + else { + local z `1' + local K 1 + mac shift 1 +* Need to reinsert comma before options (if any) for -syntax- command to work + local 0 `", `*'"' + } + +* Option version ignored here if varlists were provided + syntax [if] [in] [aw fw pw iw/] [, partial(varlist ts) fwl(varlist ts) /* + */ NOConstant wald ALLrank NULLrank FULLrank ROBust cluster(varlist) /* + */ BW(string) kernel(string) Tvar(varname) Ivar(varname) sw psd version /* + */ dofminus(integer 0) ] + + local partial "`partial' `fwl'" + + if "`noconstant'"=="" { + tempvar one + gen byte `one' = 1 + local partial "`partial' `one'" + } + + if "`wald'"~="" { + local LMWald "Wald" + } + else { + local LMWald "LM" + } + + local optct : word count `allrank' `nullrank' `fullrank' + if `optct' > 1 { +di as err "Incompatible options: `allrank' `nullrank' `fullrank'" + error 198 + } + else if `optct' == 0 { +* Default + local allrank "allrank" + } + +* Note that by tsrevar-ing here, subsequent disruption to the sort doesn't matter +* for TS operators. + tsrevar `y' + local vl1 `r(varlist)' + tsrevar `z' + local vl2 `r(varlist)' + tsrevar `partial' + local partial `r(varlist)' + + foreach vn of varlist `vl1' { + tempvar tv + qui gen double `tv' = . + local tempvl1 "`tempvl1' `tv'" + } + foreach vn of varlist `vl2' { + tempvar tv + qui gen double `tv' = . + local tempvl2 "`tempvl2' `tv'" + } + + marksample touse + markout `touse' `vl1' `vl2' `partial' `cluster', strok + +* Stock-Watson and cluster imply robust. + if "`sw'`cluster'" ~= "" { + local robust "robust" + } + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + gen double `wvar'=`exp' + } + if "`fsqrt(wf)*(wvar^0.5):*'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "fweight" & "`sw'" != "" { + di in red "fweights currently not supported with -sw- option" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`bw'" !="" { + di in red "iweights not allowed with robust, cluster, AC or HAC" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + + +* Every time a weight is used, must multiply by scalar wf ("weight factor") +* wf=1 for no weights, fw and iw, wf = scalar that normalizes sum to be N if aw or pw + sum `wvar' if `touse' `wtexp', meanonly +* Weight statement + if "`weight'" ~= "" { +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + if "`weight'"=="" | "`weight'"=="fweight" | "`weight'"=="iweight" { +* If weight is "", weight var must be column of ones and N is number of rows. +* With fw and iw, effective number of observations is sum of weight variable. + local wf=1 + local N=r(sum_w) + } + else if "`weight'"=="aweight" | "`weight'"=="pweight" { +* With aw and pw, N is number of obs, unadjusted. + local wf=r(N)/r(sum_w) + local N=r(N) + } + else { +* Should never reach here +di as err "ivreg210_ranktest error - misspecified weights" + exit 198 + } + +* HAC estimation. +* If bw is omitted, default `bw' is empty string. +* If bw or kernel supplied, check/set `kernel'. +* Macro `kernel' is also used for indicating HAC in use. + if "`bw'" == "" & "`kernel'" == "" { + local bw=0 + } + else { +* Need tvar for markout with time-series stuff +* Data must be tsset for time-series operators in code to work +* User-supplied tvar checked if consistent with tsset + capture tsset + if "`r(timevar)'" == "" { +di as err "must tsset data and specify timevar" + exit 5 + } + if "`tvar'" == "" { + local tvar "`r(timevar)'" + } + else if "`tvar'"!="`r(timevar)'" { +di as err "invalid tvar() option - data already -tsset-" + exit 5 + } +* If no panel data, ivar will still be empty + if "`ivar'" == "" { + local ivar "`r(panelvar)'" + } + else if "`ivar'"!="`r(panelvar)'" { +di as err "invalid ivar() option - data already -tsset-" + exit 5 + } + local tdelta `r(tdelta)' + tsreport if `touse', panel + if `r(N_gaps)' != 0 { +di in gr "Warning: time variable " in ye "`tvar'" in gr " has " /* + */ in ye "`r(N_gaps)'" in gr " gap(s) in relevant range" + } + +* Check it's a valid kernel and replace with unabbreviated kernel name; check bw. +* Automatic kernel selection allowed by ivreg2 but not ranktest so must trap. +* s_ivreg210_vkernel is in livreg2 mlib. + if "`bw'"=="auto" { +di as err "invalid bandwidth in option bw() - must be real > 0" + exit 198 + } + mata: s_ivreg210_vkernel("`kernel'", "`bw'", "`ivar'") + local kernel `r(kernel)' + local bw = `r(bw)' + } + +* tdelta missing if version 9 or if not tsset + if "`tdelta'"=="" { + local tdelta=1 + } + + if "`sw'"~="" { + capture xtset + if "`ivar'" == "" { + local ivar "`r(panelvar)'" + } + else if "`ivar'"!="`r(panelvar)'" { +di as err "invalid ivar() option - data already tsset or xtset" + exit 5 + } +* Exit with error if ivar is neither supplied nor tsset nor xtset + if "`ivar'"=="" { +di as err "Must -xtset- or -tsset- data or specify -ivar- with -sw- option" + exit 198 + } + qui describe, short varlist + local sortlist "`r(sortlist)'" + tokenize `sortlist' + if "`ivar'"~="`1'" { +di as err "Error - dataset must be sorted on panel var with -sw- option" + exit 198 + } + } + +* Create variable used for getting lags etc. in Mata + tempvar tindex + qui gen `tindex'=1 if `touse' + qui replace `tindex'=sum(`tindex') if `touse' + +********** CLUSTER SETUP ********************************************** + +* Mata code requires data are sorted on (1) the first var cluster if there +* is only one cluster var; (2) on the 3rd and then 1st if two-way clustering, +* unless (3) two-way clustering is combined with kernel option, in which case +* the data are tsset and sorted on panel id (first cluster variable) and time +* id (second cluster variable). +* Second cluster var is optional and requires an identifier numbered 1..N_clust2, +* unless combined with kernel option, in which case it's the time variable. +* Third cluster var is the intersection of 1 and 2, unless combined with kernel +* opt, in which case it's unnecessary. +* Sorting on "cluster3 cluster1" means that in Mata, panelsetup works for +* both, since cluster1 nests cluster3. +* Note that it is possible to cluster on time but not panel, in which case +* cluster1 is time, cluster2 is empty and data are sorted on panel-time. +* Note also that if no kernel-robust, sorting will disrupt any tsset-ing, +* but data are tsrevar-ed earlier to avoid any problems. + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + tokenize `cluster' + local cluster1 "`1'" + local cluster2 "`2'" + if "`kernel'"~="" { +* kernel requires either that cluster1 is time var and cluster2 is empty +* or that cluster1 is panel var and cluster2 is time var. +* Either way, data must be tsset and sorted for panel data. + if "`cluster2'"~="" { +* Allow backwards order + if "`cluster1'"=="`tvar'" & "`cluster2'"=="`ivar'" { + local cluster1 "`2'" + local cluster2 "`1'" + } + if "`cluster1'"~="`ivar'" | "`cluster2'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset panel & time vars." +di as err " tsset panel var=`ivar'; tsset time var=`tvar'; cluster vars=`cluster1',`cluster2'" + exit 198 + } + } + else { + if "`cluster1'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset time variable." +di as err " tsset time var=`tvar'; cluster var=`cluster1'" + exit 198 + } + } + } +* Simple way to get quick count of 1st cluster variable without disrupting sort +* clusterid1 is numbered 1.._Nclust1. + tempvar clusterid1 + qui egen `clusterid1'=group(`cluster1') if `touse' + sum `clusterid1' if `touse', meanonly + if "`cluster2'"=="" { + local N_clust=r(max) + local N_clust1=. + local N_clust2=. + if "`kernel'"=="" { +* Single level of clustering and no kernel-robust, so sort on single cluster var. +* kernel-robust already sorted via tsset. + sort `cluster1' + } + } + else { + local N_clust1=r(max) + if "`kernel'"=="" { + tempvar clusterid2 clusterid3 +* New cluster id vars are numbered 1..N_clust2 and 1..N_clust3 + qui egen `clusterid2'=group(`cluster2') if `touse' + qui egen `clusterid3'=group(`cluster1' `cluster2') if `touse' +* Two levels of clustering and no kernel-robust, so sort on cluster3/nested in/cluster1 +* kernel-robust already sorted via tsset. + sort `clusterid3' `cluster1' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) + } + else { +* Need to create this only to count the number of clusters + tempvar clusterid2 + qui egen `clusterid2'=group(`cluster2') if `touse' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) +* Now replace with original variable + local clusterid2 `cluster2' + } + local N_clust=min(`N_clust1',`N_clust2') + } + } + +************************************************************************************************ + +* Note that bw is passed as a value, not as a string + mata: ivreg210_rkstat( "`vl1'", /* + */ "`vl2'", /* + */ "`partial'", /* + */ "`wvar'", /* + */ "`weight'", /* + */ `wf', /* + */ `N', /* + */ "`touse'", /* + */ "`LMWald'", /* + */ "`allrank'", /* + */ "`nullrank'", /* + */ "`fullrank'", /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`tvar'", /* + */ "`ivar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ `dofminus', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`tempvl1'", /* + */ "`tempvl2'") + + tempname rkmatrix chi2 df df_r p rank ccorr eval + mat `rkmatrix'=r(rkmatrix) + mat `ccorr'=r(ccorr) + mat `eval'=r(eval) + mat colnames `rkmatrix' = "rk" "df" "p" "rank" "eval" "ccorr" + +di +di "Kleibergen-Paap rk `LMWald' test of rank of matrix" + if "`robust'"~="" & "`kernel'"~= "" & "`cluster'"=="" { +di " Test statistic robust to heteroskedasticity and autocorrelation" +di " Kernel: `kernel' Bandwidth: `bw'" + } + else if "`kernel'"~="" & "`cluster'"=="" { +di " Test statistic robust to autocorrelation" +di " Kernel: `kernel' Bandwidth: `bw'" + } + else if "`cluster'"~="" { +di " Test statistic robust to heteroskedasticity and clustering on `cluster'" + if "`kernel'"~="" { +di " and kernel-robust to common correlated disturbances" +di " Kernel: `kernel' Bandwidth: `bw'" + } + } + else if "`robust'"~="" { +di " Test statistic robust to heteroskedasticity" + } + else if "`LMWald'"=="LM" { +di " Test assumes homoskedasticity (Anderson canonical correlations test)" + } + else { +di " Test assumes homoskedasticity (Cragg-Donald test)" + } + + local numtests = rowsof(`rkmatrix') + forvalues i=1(1)`numtests' { +di "Test of rank=" %3.0f `rkmatrix'[`i',4] " rk=" %8.2f `rkmatrix'[`i',1] /* + */ " Chi-sq(" %3.0f `rkmatrix'[`i',2] ") pvalue=" %8.6f `rkmatrix'[`i',3] + } + scalar `chi2' = `rkmatrix'[`numtests',1] + scalar `p' = `rkmatrix'[`numtests',3] + scalar `df' = `rkmatrix'[`numtests',2] + scalar `rank' = `rkmatrix'[`numtests',4] + local N `r(N)' + return scalar df = `df' + return scalar chi2 = `chi2' + return scalar p = `p' + return scalar rank = `rank' + if "`cluster'"~="" { + return scalar N_clust = `N_clust' + } + if "`cluster2'"~="" { + return scalar N_clust1 = `N_clust1' + return scalar N_clust2 = `N_clust2' + } + return scalar N = `N' + return matrix rkmatrix `rkmatrix' + return matrix ccorr `ccorr' + return matrix eval `eval' + + tempname S V Omega + if `K' > 1 { + foreach en of local y { +* Remove "." from equation name + local en1 : subinstr local en "." "_", all + foreach vn of local z { + local cn "`cn' `en1':`vn'" + } + } + } + else { + foreach vn of local z { + local cn "`cn' `vn'" + } + } + mat `V'=r(V) + matrix colnames `V' = `cn' + matrix rownames `V' = `cn' + return matrix V `V' + mat `S'=r(S) + matrix colnames `S' = `cn' + matrix rownames `S' = `cn' + return matrix S `S' +end + +* Adopted from -canon- +program define GetVarlist, sclass + sret clear + gettoken open 0 : 0, parse("(") + if `"`open'"' != "(" { + error 198 + } + gettoken next 0 : 0, parse(")") + while `"`next'"' != ")" { + if `"`next'"'=="" { + error 198 + } + local list `list'`next' + gettoken next 0 : 0, parse(")") + } + sret local rest `"`0'"' + tokenize `list' + local 0 `*' + sret local varlist "`0'" +end + + +******************************************************************************* +*************************** BEGIN MATA CODE *********************************** +******************************************************************************* + + + +version 9.2 +mata: + +// ********* MATA CODE SHARED BY ivreg2 AND ranktest *************** // +// ********* 1. struct ms_ivreg210_vcvorthog *************** // +// ********* 2. m_ivreg210_omega *************** // +// ********* 3. m_ivreg210_calckw *************** // +// ********* 4. s_ivreg210_vkernel *************** // +// *********************************************************************** // + +// For reference: +// struct ms_ivreg210_vcvorthog { +// string scalar ename, Znames, touse, weight, wvarname +// string scalar robust, clustvarname, clustvarname2, clustvarname3, kernel +// string scalar sw, psd, ivarname, tvarname, tindexname +// real scalar wf, N, bw, tdelta, dofminus +// real matrix ZZ +// pointer matrix e +// pointer matrix Z +// pointer matrix wvar +// } + +void ivreg210_rkstat( string scalar vl1, + string scalar vl2, + string scalar partial, + string scalar wvarname, + string scalar weight, + scalar wf, + scalar N, + string scalar touse, + string scalar LMWald, + string scalar allrank, + string scalar nullrank, + string scalar fullrank, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + bw, + string scalar tvarname, + string scalar ivarname, + string scalar tindexname, + tdelta, + dofminus, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar tempvl1, + string scalar tempvl2) +{ + +// tempx, tempy and tempz are the Stata names of temporary variables that will be changed by rkstat + if (partial~="") { + tempx=tokens(partial) + } + tempy=tokens(tempvl1) + tempz=tokens(tempvl2) + + st_view(y=.,.,tokens(vl1),touse) + st_view(z=.,.,tokens(vl2),touse) + st_view(yhat=.,.,tempy,touse) + st_view(zhat=.,.,tempz,touse) + st_view(mtouse=.,.,tokens(touse),touse) + st_view(wvar=.,.,tokens(wvarname),touse) + noweight=(st_vartype(wvarname)=="byte") + +// Note that we now use wf*wvar instead of wvar +// because wvar is raw weighting variable and +// wf*wvar normalizes so that sum(wf*wvar)=N. + +// Partial out the X variables +// Note that this is entered if there is a constant, +// i.e., variables are centered + if (partial~="") { + st_view(x=.,.,tempx,touse) + xx = quadcross(x, wf*wvar, x) + xy = quadcross(x, wf*wvar, y) + xz = quadcross(x, wf*wvar, z) + + by = invsym(xx)*xy + bz = invsym(xx)*xz + + yhat[.,.] = y-x*by + zhat[.,.] = z-x*bz + } + else { + yhat[.,.] = y + zhat[.,.] = z + } + K=cols(y) + L=cols(z) + + zhzh = quadcross(zhat, wf*wvar, zhat) + zhyh = quadcross(zhat, wf*wvar, yhat) + yhyh = quadcross(yhat, wf*wvar, yhat) + + pihat = invsym(zhzh)*zhyh +// rzhat is F in paper (p. 103) +// iryhat is G in paper (p. 103) + ryhat=cholesky(yhyh) + rzhat=cholesky(zhzh) + iryhat=luinv(ryhat') + irzhat=luinv(rzhat') + that=rzhat'*pihat*iryhat + +// cc is canonical correlations. Squared cc is eigenvalues. + fullsvd(that, ut, cc, vt) + vt=vt' + vecth=vec(that) + ev = cc:^2 +// S matrix in paper (p. 100). Not used in code below. +// smat=fullsdiag(cc, rows(that)-cols(that)) + + if (abs(1-cc[1,1])<1e-10) { +printf("\n{text:Warning: collinearities detected between (varlist1) and (varlist2)}\n") + } + if ((missing(ryhat)>0) | (missing(iryhat)>0) | (missing(rzhat)>0) | (missing(irzhat)>0)) { +printf("\n{error:Error: non-positive-definite matrix. May be caused by collinearities.}\n") + exit(error(3351)) + } + +// If Wald, yhat is residuals + if (LMWald=="Wald") { + yhat[.,.]=yhat-zhat*pihat + yhyh = quadcross(yhat, wvar, yhat) + } + +// Covariance matrices +// vhat is W in paper (eqn below equation 17, p. 103) +// shat is V in paper (eqn below eqn 15, p. 103) + +// ************************************************************************************* // +// shat calculated using struct and programs m_ivreg210_omega, m_ivreg210_calckw shared with ivreg2 // + + struct ms_ivreg210_vcvorthog scalar vcvo + + + vcvo.ename = tempy // ivreg2 has = ename // + vcvo.Znames = tempz // ivreg2 has = Znames // + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + vcvo.ZZ = zhzh // ivreg2 has = st_matrix(ZZmatrix) // + + vcvo.e = &yhat // ivreg2 has = &e // + vcvo.Z = &zhat // ivreg2 has = &Z // + vcvo.wvar = &wvar + + shat=m_ivreg210_omega(vcvo) + +// *************************************************************************************** + +// Finally, calcluate vhat + if ((LMWald=="LM") & (kernel=="") & (robust=="") & (clustvarname=="")) { +// Homoskedastic, iid LM case means vcv is identity matrix +// Generates canonical correlation stats. Default. + vhat=I(L*K,L*K)/N + } + else { + vhat=(iryhat'#irzhat')*shat*(iryhat'#irzhat')' * N + _makesymmetric(vhat) + } + +// ready to start collecting test stats + if (allrank~="") { + firstrank=1 + lastrank=min((K,L)) + } + else if (nullrank~="") { + firstrank=1 + lastrank=1 + } + else if (fullrank~="") { + firstrank=min((K,L)) + lastrank=min((K,L)) + } + else { +// should never reach this point +printf("ivreg210_ranktest error\n") + exit + } + + rkmatrix=J(lastrank-firstrank+1,6,.) + for (i=firstrank; i<=lastrank; i++) { + + if (i>1) { + u12=ut[(1::i-1),(i..L)] + v12=vt[(1::i-1),(i..K)] + } + u22=ut[(i::L),(i..L)] + v22=vt[(i::K),(i..K)] + + symeigensystem(u22*u22', evec, eval) + u22v=evec + u22d=diag(eval) + u22h=u22v*(u22d:^0.5)*u22v' + + symeigensystem(v22*v22', evec, eval) + v22v=evec + v22d=diag(eval) + v22h=v22v*(v22d:^0.5)*v22v' + + if (i>1) { + aq=(u12 \ u22)*luinv(u22)*u22h + bq=v22h*luinv(v22')*(v12 \ v22)' + } + else { + aq=u22*luinv(u22)*u22h + bq=v22h*luinv(v22')*v22' + } + +// lab is lambda_q in paper (eqn below equation 21, p. 104) +// vlab is omega_q in paper (eqn 19 in paper, p. 104) + lab=(bq#aq')*vecth + vlab=(bq#aq')*vhat*(bq#aq')' + +// Symmetrize if numerical inaccuracy means it isn't + _makesymmetric(vlab) + vlabinv=invsym(vlab) +// rk stat Assumption 2: vlab (omega_q in paper) is nonsingular. Detected by a zero on the diagonal, +// since when returning a generalized inverse, Stata/Mata choose the generalized inverse that +// sets entire column(s)/row(s) to zeros. +// Save df and rank even if test stat not available. + df=(L-i+1)*(K-i+1) + rkmatrix[i-firstrank+1,2]=df + rkmatrix[i-firstrank+1,4]=i-1 + if (diag0cnt(vlabinv)>0) { +printf("\n{text:Warning: covariance matrix omega_%f}", i-1) +printf("{text: not full rank; test of rank %f}", i-1) +printf("{text: unavailable}\n") + } +// Note not multiplying by N - already incorporated in vhat. + else { + rk=lab'*vlabinv*lab + pvalue=chi2tail(df, rk) + rkmatrix[i-firstrank+1,1]=rk + rkmatrix[i-firstrank+1,3]=pvalue + } +// end of test loop + } + +// insert squared (=eigenvalues if canon corr) and unsquared canon correlations + for (i=firstrank; i<=lastrank; i++) { + rkmatrix[i-firstrank+1,6]=cc[i-firstrank+1,1] + rkmatrix[i-firstrank+1,5]=ev[i-firstrank+1,1] + } + st_matrix("r(rkmatrix)", rkmatrix) + st_matrix("r(ccorr)", cc') + st_matrix("r(eval)",ev') +// Save V matrix as in paper, without factor of 1/N + vhat=N*vhat*wf + st_matrix("r(V)", vhat) +// Save S matrix as in ivreg2, with factor of 1/N + st_matrix("r(S)", shat) + st_numscalar("r(N)", N) + if (clustvarname~="") { + st_numscalar("r(N_clust)", N_clust) + } + if (clustvarname2~="") { + st_numscalar("r(N_clust2)", N_clust2) + } +// end of program +} + + +end + +****************************************** END *************************************** +*********************************** ranktest.ado CODE ******************************** + + +program define ivreg210, eclass byable(recall) /* properties(svyj) */ sortpreserve + version 10.1 + local lversion 03.1.10 + + local ivreg2_cmd "ivreg210" + local ranktest_cmd "ivreg210_ranktest" + + if replay() { + syntax [, FIRST FFIRST RF Level(integer $S_level) NOHEader NOFOoter dropfirst droprf /* + */ EForm(string) PLUS VERsion] + if "`version'" != "" & "`first'`ffirst'`rf'`noheader'`nofooter'`dropfirst'`droprf'`eform'`plus'" != "" { + di as err "option version not allowed" + error 198 + } + if "`version'" != "" { + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + if `"`e(cmd)'"' != "`ivreg2_cmd'" { + error 301 + } + if "`e(firsteqs)'" != "" & "`dropfirst'" == "" { +* On replay, set flag so saved eqns aren't dropped + local savefirst "savefirst" + } + if "`e(rfeq)'" != "" & "`droprf'" == "" { +* On replay, set flag so saved eqns aren't dropped + local saverf "saverf" + } + } + else { + local cmdline "`ivreg2_cmd' `*'" + + syntax [anything(name=0)] [if] [in] [aw fw pw iw/] [, /* + */ FIRST FFIRST NOID NOCOLLIN SAVEFIRST SAVEFPrefix(name) /* + */ SMall Robust CLuster(varlist) kiefer dkraay(integer 0) /* + */ GMM GMM2s CUE ORTHOG(string) ENDOGtest(string) /* + */ PARTIAL(string) FWL(string) NOConstant Level(integer $S_level) /* + */ NOHEader NOFOoter NOOUTput title(string) subtitle(string) /* + */ DEPname(string) EForm(string) PLUS /* + */ BW(string) kernel(string) Tvar(varname) Ivar(varname)/* + */ LIML COVIV FULLER(real 0) Kclass(real 0) /* + */ REDundant(string) RF SAVERF SAVERFPrefix(name) /* + */ B0(string) SMATRIX(string) WMATRIX(string) sw psd0 psda /* + */ dofminus(integer 0) sdofminus(integer 0) NOPARTIALSMALL ] + + local n 0 + + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + if `s(stop)' { + error 198 + } + while `s(stop)'==0 { + if "`paren'"=="(" { + local n = `n' + 1 + if `n'>1 { +capture noi error 198 +di in red `"syntax is "(all instrumented variables = instrument variables)""' +exit 198 + } + gettoken p lhs : lhs, parse(" =") + while "`p'"!="=" { + if "`p'"=="" { +capture noi error 198 +di in red `"syntax is "(all instrumented variables = instrument variables)""' +di in red `"the equal sign "=" is required"' +exit 198 + } + local endo `endo' `p' + gettoken p lhs : lhs, parse(" =") + } +* To enable Cragg HOLS estimator, allow for empty endo list + local temp_ct : word count `endo' + if `temp_ct' > 0 { + tsunab endo : `endo' + } +* To enable OLS estimator with (=) syntax, allow for empty exexog list + local temp_ct : word count `lhs' + if `temp_ct' > 0 { + tsunab exexog : `lhs' + } + } + else { + local inexog `inexog' `lhs' + } + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + } + local 0 `"`lhs' `0'"' + + tsunab inexog : `inexog' + tokenize `inexog' + local lhs "`1'" + local 1 " " + local inexog `*' + + if "`gmm2s'`cue'" != "" & "`exexog'" == "" { + di in red "option `gmm2s'`cue' invalid: no excluded instruments specified" + exit 102 + } + +/* DISABLED IN IVREG210 - RANKTEST IS INTERNAL +* Check that -ranktest- is installed + capture `ranktest_cmd', version + if _rc != 0 { +di as err "Error: must have ranktest version `ranktestversion' or greater installed" +di as err "To install, from within Stata type " _c +di in smcl "{stata ssc install ranktest :ssc install ranktest}" + exit 601 + } + local vernum "`r(version)'" + if ("`vernum'" < "`ranktestversion'") | ("`vernum'" > "09.9.99") { +di as err "Error: must have ranktest version `ranktestversion' or greater installed" +di as err "Currently installed version is `vernum'" +di as err "To update, from within Stata type " _c +di in smcl "{stata ssc install ranktest, replace :ssc install ranktest, replace}" + exit 601 + } +*/ + +* Process options + +* Legacy gmm option + if "`gmm'" ~= "" { +di in ye "-gmm- is no longer a supported option; use -gmm2s- with the appropriate option" +di in ye " gmm = gmm2s robust" +di in ye " gmm robust = gmm2s robust" +di in ye " gmm bw() = gmm2s bw()" +di in ye " gmm robust bw() = gmm2s robust bw()" +di in ye " gmm cluster() = gmm2s cluster()" + local gmm2s "gmm2s" + if "`robust'`cluster'`bw'"=="" { +* 2-step efficient gmm with arbitrary heteroskedasticity + local robust "robust" + } + } + +* partial, including legacy FWL option + local partial "`partial' `fwl'" + local partial : list retokenize partial +* Need word option so that varnames with cons in them aren't zapped + local partial : subinstr local partial "_cons" "", all count(local partialcons) word + if `partialcons' > 0 & "`noconstant'"~="" { +di in r "Error: _cons listed in partial() but equation specifies -noconstant-." + error 198 + } + else if "`noconstant'"~="" { + local partialcons 0 + } + else if `partialcons' > 1 { +* Just in case of multiple _cons +di in r "Error: _cons listed more than once in partial()." + error 198 + } + else if "`partial'" ~= "" { + local partialcons 1 + } + + if `fuller' != 0 { + local fulleropt "fuller(`fuller')" + } + if `kclass' != 0 { + local kclassopt "kclass(`kclass')" + } + +* Fuller implies LIML + if "`liml'" == "" & `fuller' != 0 { + local liml "liml" + } + +* b0 implies noid. Also check for incompatible options. + if "`b0'" ~= "" { + local noid "noid" + local b0opts "`gmm2s'`cue'`liml'`wmatrix'`kclassopt'" + if "`b0opts'" != "" { +* ...with spaces + local b0opts "`gmm2s' `cue' `liml' `wmatrix' `kclassopt'" + local b0opts : list retokenize b0opts +di as err "incompatible options: -b0- and `b0opts'" + exit 198 + } + } + + if "`gmm2s'" != "" & "`cue'" != "" { +di as err "incompatible options: 2-step efficient gmm and cue gmm" + exit 198 + } + +* savefprefix implies savefirst + if "`savefprefix'" != "" & "`savefirst'" == "" { + local savefirst "savefirst" + } + +* default savefprefix is _ivreg2_ + if "`savefprefix'" == "" { + local savefprefix "_`ivreg2_cmd'_" + } + +* saverfprefix implies saverf + if "`saverfprefix'" != "" & "`saverf'" == "" { + local saverf "saverf" + } + +* default saverfprefix is _ivreg2_ + if "`saverfprefix'" == "" { + local saverfprefix "_`ivreg2_cmd'_" + } + +* LIML/kclass incompatibilities + if "`liml'`kclassopt'" != "" { + if "`gmm2s'`cue'" != "" { +di as err "GMM estimation not available with LIML or k-class estimators" + exit 198 + } + if `fuller' < 0 { +di as err "invalid Fuller option" + exit 198 + } + if "`liml'" != "" & "`kclassopt'" != "" { +di as err "cannot use liml and kclass options together" + exit 198 + } +* Process kclass string + if `kclass' < 0 { +di as err "invalid k-class option" + exit 198 + } + } + + if "`cluster'`sw'"~="" { +* Cluster and SW imply robust + local robust "robust" + } + + if "`psd0'"~="" & "`psda'"~="" { +di as err "cannot use psd0 and psda options together" + exit 198 + } +* Macro psd has either psd0, psda or is empty + local psd "`psd0'`psda'" + +******************* Prepare for TS data ******************* + + if "`orthog'`endogtest'`redundant'`partial'" != "" { + capture tsunab orthog : `orthog' + capture tsunab endogtest : `endogtest' + capture tsunab redundant : `redundant' + capture tsunab partial : `partial' + } + +* TS operators not allowed with cluster, ivar or tvar. Captured in -syntax-. + +* Set flag for use of time-series variables. Will be =1 if a TS operator is used, =0 otherwise. + tsunab vnames : `lhs' `inexog' `exexog' `endo' + local vnames : subinstr local vnames "." ".", count(local tsused) + +* Routines below will call tsrevar or, from within Mata, st_tsrevar. +* This will create temporary variables according to how the data are tsset now. +* tsrevar remembers the temp vars created between calls, so we create +* them all now. `exp' is weight variable. + + tsrevar `lhs' `inexog' `exexog' `endo' + +* If kernel-robust, data must be tsset. +* Later code maintains tsset-ing for kernel-robust, but can change sort +* order for cluster if not kernel-robust, which would make ts operators +* fail subsequently, so a later call to -tsset- is needed to restore -tsset-ing. +* -sortpreserve- should take care of the rest following exit of ivreg2. +* User-supplied tvar and ivar checked if consistent with tsset. + + capture tsset + if "`tvar'" == "" { + local tvar "`r(timevar)'" + } + else if "`tvar'"!="`r(timevar)'" { +di as err "invalid tvar() option - data already -tsset-" + exit 5 + } +* If no panel data, ivar will still be empty + if "`ivar'" == "" { + local ivar "`r(panelvar)'" + } + else if "`ivar'"!="`r(panelvar)'" { +di as err "invalid ivar() option - data already -tsset-" + exit 5 + } + if "`r(tdelta)'" != "" { + local tdelta = `r(tdelta)' + } + else { + local tdelta=1 + } + +*********************************************************** + +* dkraay(bw) = clustering on time-series var in a panel + kernel-robust +* Default is zero + if `dkraay' ~= 0 { + if "`ivar'" == "" | "`tvar'" == "" { +di as err "invalid use of dkraay option - must use tsset panel data" + exit 198 + } + local bw "`dkraay'" + if "`cluster'" == "" { + local cluster "`tvar'" + } + else if "`cluster'" ~= "`tvar'" { +di as err "invalid use of dkraay option - must cluster on `tvar' (or omit cluster option)" + exit 198 + } + } + +*********************************************************** + +* HAC estimation. +* If bw is omitted, default `bw' is 0. +* If bw or kernel supplied, check/set `kernel'. +* Macro `kernel' is also used for indicating HAC in use. +* If bw or kernel not supplied, set bw=0 so from here on, bw is real. + if "`bw'" == "" & "`kernel'" == "" { + local bw=0 + } + else { +* Check it's a valid kernel and replace with unabbreviated kernel name; check bw. +* s_ivreg210_vkernel is in livreg2 mlib. + mata: s_ivreg210_vkernel("`kernel'", "`bw'", "`ivar'") + local kernel `r(kernel)' + local bw = `r(bw)' +* And force tsused flag to 1 + local tsused = 1 + } + +*********************************************************** + +* Weights +* fweight and aweight accepted as is +* iweight not allowed with robust or gmm and requires a trap below when used with summarize +* pweight is equivalent to aweight + robust +* but in HAC case, robust implied by `kernel' rather than `robust' +* Since we subsequently work with wvar, tsrevar of weight vars in weight `exp' not needed. + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + if "`weight'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "fweight" & "`sw'" != "" { + di in red "fweights currently not supported with -sw- option" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`gmm2s'`kernel'" !="" { + di in red "iweights not allowed with robust or gmm" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + qui gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + + if `dofminus' > 0 { + local dofmopt "dofminus(`dofminus')" + } +* Stock-Watson robust SEs. + if "`sw'" ~= "" { + if "`cluster'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -cluster- option" + exit 198 + } + if "`kernel'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -kernel- option" + exit 198 + } + capture xtset + if "`ivar'" == "" { + local ivar "`r(panelvar)'" + } + else if "`ivar'"!="`r(panelvar)'" { +di as err "invalid ivar() option - data already tsset or xtset" + exit 5 + } +* Exit with error if ivar is neither supplied nor tsset nor xtset + if "`ivar'"=="" { +di as err "Must -xtset- or -tsset- data or specify -ivar- with -sw- option" + exit 198 + } + qui describe, short varlist + local sortlist "`r(sortlist)'" + tokenize `sortlist' + if "`ivar'"~="`1'" { +di as err "Error - dataset must be sorted on panel var with -sw- option" + exit 198 + } + } + +******************************************************************************** + + marksample touse + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster', strok +* Limit sample to where tvar is available, but only if TS operators used + if `tsused' { + markout `touse' `tvar' + } + +******************************************************************************** + +* Every time a weight is used, must multiply by scalar wf ("weight factor") +* wf=1 for no weights, fw and iw, wf = scalar that normalizes sum to be N if aw or pw + sum `wvar' if `touse' `wtexp', meanonly +* Weight statement + if "`weight'" ~= "" { +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + if "`weight'"=="" | "`weight'"=="fweight" | "`weight'"=="iweight" { +* Effective number of observations is sum of weight variable. +* If weight is "", weight var must be column of ones and N is number of rows + local wf=1 + local N=r(sum_w) + } + else if "`weight'"=="aweight" | "`weight'"=="pweight" { + local wf=r(N)/r(sum_w) + local N=r(N) + } + else { +* Should never reach here +di as err "ivreg2 error - misspecified weights" + exit 198 + } + + if `N'==0 { +di as err "no observations" + exit 2000 + } + +******************************************************************************** +* If kernel-robust, confirm tsset and check for gaps + if `bw' != 0 { +* Data must be tsset for time-series operators in code to work + capture tsset + if "`r(timevar)'" == "" { +di as err "must tsset data and specify timevar" + exit 5 + } + tsreport if `touse', panel + if `r(N_gaps)' != 0 { +di in gr "Warning: time variable " in ye "`tvar'" in gr " has " /* + */ in ye "`r(N_gaps)'" in gr " gap(s) in relevant range" + } + } + +******************************************************************************** + + tempvar tindex + qui gen `tindex'=1 if `touse' + qui replace `tindex'=sum(`tindex') if `touse' + +* Set local macro T and check that bw < (T-1) +* Also make sure only used sample is checked + if "`tvar'" ~= "" { + sum `tvar' if `touse', meanonly + local T = r(max)-r(min) + 1 + local T1 = `T' - 1 + if (`bw' > (`T1'/`tdelta')) & (`bw' ~= -1) { +di as err "invalid bandwidth in option bw() - cannot exceed timespan of data" + exit 198 + } + } + +* kiefer VCV = kernel(tru) bw(T) and no robust with tsset data + if "`kiefer'" ~= "" { + if "`ivar'" == "" | "`tvar'" == "" { +di as err "invalid use of kiefer option - must use tsset panel data" + exit 198 + } + if "`robust'" ~= "" { +di as err "incompatible options: kiefer and robust" + exit 198 + } + if "`kernel'" ~= "" & "`kernel'" ~= "Truncated" { +di as err "incompatible options: kiefer and bw/kernel" + exit 198 + } + if (`bw'~=0) & (`bw' ~= `T'/`tdelta') { +di as err "incompatible options: kiefer and bw" + exit 198 + } + local kernel "Truncated" + local bw=`T' + } + +*********** Column of ones for constant set up here ************** + + if "`noconstant'"=="" { +* If macro not created, automatically omitted. + tempvar ones + qui gen byte `ones' = 1 if `touse' + } + +************* Duplicates ***************** + + if "`noconstant'" != "" { + local rmcnocons "nocons" + } + +* Check for duplicates of variables +* To mimic official ivreg, in the case of duplicates, +* (1) inexog > endo +* (2) inexog > exexog +* (3) endo + exexog = inexog, as if it were "perfectly predicted" + local dupsen1 : list dups endo + local endo1 : list uniq endo + local dupsex1 : list dups exexog + local exexog1 : list uniq exexog + local dupsin1 : list dups inexog + local inexog1 : list uniq inexog +* Remove inexog from endo + local dupsen2 : list endo1 & inexog1 + local endo1 : list endo1 - inexog1 +* Remove inexog from exexog + local dupsex2 : list exexog1 & inexog1 + local exexog1 : list exexog1 - inexog1 +* Remove endo from exexog + local dupsex3 : list exexog1 & endo1 + local exexog1 : list exexog1 - endo1 + local dups "`dupsen1' `dupsex1' `dupsin1' `dupsen2' `dupsex2' `dupsex3'" + local dups : list uniq dups + + +*********** Collinearities ************************* + +* Also define full set of tempnames for matrices; some used now, some used later + tempname YY yy yyc + tempname XX X1X1 X2X2 X1Z X1Z1 XZ Xy + tempname ZZ Z1Z1 Z2Z2 Z1Z2 Z1X2 Zy ZY Z2y Z2Y + tempname XXinv X2X2inv ZZinv XPZXinv + + if "`nocollin'" == "" { + tempname ccmat + +* Collinearities check using canonical correlations approach +* First, check endo and drop or reclassify as exog regressor + local endo1_ct : word count `endo1' + if `endo1_ct' > 0 { + local Alist "`endo1'" + local Blist "`inexog1' `ones' `exexog1'" + + mata: s_cc_crossprods ("`Alist'", /* + */ "`Blist'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf') + + mat `X1X1'=r(AA) + mat `ZZ'=r(BB) + mat `ZZinv'=r(BBinv) + mat `X1Z'=r(AB) + +* Eigenvalue=1 => included endog is really included exogenous +* Eigenvalue=0 => included endog collinear with other included endog +* Corresponding column names give name of variable + mata: s_cc_collin ( "`ZZ'", /* + */ "`X1X1'", /* + */ "`X1Z'", /* + */ "`ZZinv'") + mat `ccmat'=r(ccmat) + +* Loop through endo1 to find eigenvalues=0 or 1 + local i=1 + foreach vn of varlist `endo1' { + if round(`ccmat'[`i',`i'],10e-7)==0 { +* Collinear with another endog, so remove from endog list + local endo1 : list endo1-vn + local ncollin "`ncollin' `vn'" + } + if round(`ccmat'[`i',`i'],10e-7)==1 { +* Collinear with exogenous, so remove from endog and add to inexog + local endo1 : list endo1-vn + local inexog1 "`inexog1' `vn'" + local ecollin "`ecollin' `vn'" + } + local i=`i'+1 + } + } + +* Check inexog and exexog separately + local inexog1_ct : word count `inexog1' `ones' + if `inexog1_ct' > 1 { + qui _rmcoll `inexog1' if `touse' `wtexp', `noconstant' + local todrop "`r(varlist)'" + local todrop : list inexog1-todrop + local inexog1 "`r(varlist)'" + local collin "`collin' `todrop'" + } + local exexog1_ct : word count `exexog1' + if `exexog1_ct' > 1 { + qui _rmcoll `exexog1' if `touse' `wtexp', nocons + local todrop "`r(varlist)'" + local todrop : list exexog1-todrop + local exexog1 "`r(varlist)'" + local collin "`collin' `todrop'" + } + +* Check exexog vs. inexeg and drop from former if collinear +* Eigenvalue=1 => exexog is collinear with included exogenous + local exexog1_ct : word count `exexog1' + local inexog1_ct : word count `inexog1' `ones' + if `exexog1_ct' > 0 & `inexog1_ct' > 0 { + local Alist "`exexog1'" + local Blist "`inexog1' `ones'" + + mata: s_cc_crossprods ("`Alist'", /* + */ "`Blist'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf') + + mat `Z1Z1'=r(AA) + mat `X2X2'=r(BB) + mat `X2X2inv'=r(BBinv) + mat `Z1X2'=r(AB) + + mata: s_cc_collin ( "`X2X2'", /* + */ "`Z1Z1'", /* + */ "`Z1X2'", /* + */ "`X2X2inv'") + mat `ccmat'=r(ccmat) + +* Loop through exexog1 to find eigenvalues=1 + local i=1 + foreach vn of varlist `exexog1' { + if round(`ccmat'[`i',`i'],10e-7)==1 { +* Excluded exog collinear with included exog, so remove from exexog list + local exexog1 : list exexog1-vn + local collin "`collin' `vn'" + } + local i=`i'+1 + } + } + +* Some collinearities involving inexog/exog wont' be caught by method above, +* so call _rmcoll to catch any remaining ones. + capture _rmcoll `inexog1' `exexog1' if `touse' `wtexp', `rmcnocons' + if _rc == 908 { +di as err "matsize too small" + exit 908 + } + if r(k_omitted) > 0 { + local allinex "`inexog1' `exexog1'" + local noncollin "`r(varlist)'" + local inexcollin : list allinex - noncollin + local inexog1 : list inexog1 - inexcollin + local exexog1 : list exexog1 - inexcollin + local collin "`collin' `inexcollin'" + } + +* Finally, add dropped endogenous to collinear list, trimming down to "" if empty + local collin "`collin' `ncollin'" + local collin : list clean collin + +* Collinearity and duplicates warning messages, if necessary + if "`dups'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `dups', _col(21) + } + if "`ecollin'" != "" { +di in gr "Warning - endogenous variable(s) collinear with instruments" +di in gr "Vars now exogenous:" _c + Disp `ecollin', _col(21) + } + if "`collin'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `collin', _col(21) + } + } + + +**** End of collinearities block ************ + +**** Partial-out block ****************** + +* `partial' has all to be partialled out except for constant + if "`partial'" != "" | `partialcons'==1 { + preserve + local partialdrop : list inexog - inexog1 + local partial1 : list partial - partialdrop + local partialcheck : list partial1 - inexog1 + if ("`partialcheck'"~="") { +di in r "Error: `partialcheck' listed in partial() but not in list of regressors." + error 198 + } + local inexog1 : list inexog1 - partial1 +* Check that cluster, weight, tvar or ivar variables won't be transformed + local allvars "`lhs' `inexog' `endo' `exexog'" + if "`cluster'"~="" { + local pvarcheck : list cluster in allvars + if `pvarcheck' { +di in r "Error: cannot use cluster variable `cluster' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`tvar'"~="" { + local pvarcheck : list tvar in allvars + if `pvarcheck' { +di in r "Error: cannot use time variable `tvar' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`ivar'"~="" { + local pvarcheck : list ivar in allvars + if `pvarcheck' { +di in r "Error: cannot use panel variable `ivar' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`wtexp'"~="" { + tokenize `exp', parse("*/()+-^&|~") + local wvartokens `*' + local nwvarnames : list allvars - wvartokens + local wvarnames : list allvars - nwvarnames + if "`wvarnames'"~="" { +di in r "Error: cannot use weight variables as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } +* Constant is partialled out, unless nocons already specified in the first place + capture drop `ones' + local ones "" + tempname partial_resid + foreach var of varlist `lhs' `inexog1' `endo1' `exexog1' { + qui regress `var' `partial1' if `touse' `wtexp', `noconstant' + qui predict double `partial_resid' if `touse', resid + qui replace `var' = `partial_resid' + drop `partial_resid' + } + local partial_ct : word count `partial1' + if "`noconstant'" == "" { +* partial_ct used for small-sample adjustment to regression F-stat + local partial_ct = `partial_ct' + 1 + local noconstant "noconstant" + } + } + else { +* Set count of partial vars to zero if option not used + local partial_ct 0 + local partialcons 0 + } +* Add partial_ct to small dof adjustment sdofminus + if "`nopartialsmall'"=="" { + local sdofminus = `sdofminus'+`partial_ct' + } + +********************************************* + + local insts1 `inexog1' `exexog1' + local rhs1 `endo1' `inexog1' + local iv1_ct : word count `insts1' + local rhs1_ct : word count `rhs1' + local endo1_ct : word count `endo1' + local exex1_ct : word count `exexog1' + local endoexex1_ct : word count `endo1' `exexog1' + local inexog1_ct : word count `inexog1' + + if "`noconstant'" == "" { + local cons 1 + } + else { + local cons 0 + } + +* Counts modified to include constant if appropriate + local iv1_ct = `iv1_ct' + `cons' + local rhs1_ct = `rhs1_ct' + `cons' + + if `rhs1_ct' == 0 { + di in red "error: no regressors specified" + exit 102 + } + + if "`nocollin'" == "" { +* If collinearity check has been done, iv_ct=iv1_ct and rhs_ct=rhs1_ct + local iv_ct = `iv1_ct' + local rhs_ct = `rhs1_ct' + } + else { +* If no full collinearity check, still need to do careful count of Xs and Zs. + qui _rmcoll `endo1' `inexog1', `noconstant' + local rhs_ct : word count `r(varlist)' + qui _rmcoll `exexog1' `inexog1', `noconstant' + local iv_ct : word count `r(varlist)' + local iv_ct = `iv_ct' + `cons' + local rhs_ct = `rhs_ct' + `cons' + } + + if `rhs1_ct' > `iv1_ct' { + di in red "equation not identified; must have at " /* + */ "least as many instruments not in" + di in red "the regression as there are " /* + */ "instrumented variables" + exit 481 + } + + if `bw' != 0 { + local bwopt "bw(`bw')" + } + if "`kernel'"!="" { + local kernopt "kernel(`kernel')" + } +* If depname not provided (default) name is lhs variable + if "`depname'"=="" { + local depname `lhs' + } + +*************** Commonly used matrices (reprise) ************ + + mata: s_crossprods ("`lhs'", /* + */ "`endo1'", /* + */ "`inexog1' `ones'", /* + */ "`exexog1'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N') + mat `XX'=r(XX) + mat `X1X1'=r(X1X1) + mat `X1Z'=r(X1Z) + mat `ZZ'=r(ZZ) + mat `Z2Z2'=r(Z2Z2) + mat `Z1Z2'=r(Z1Z2) + mat `XZ'=r(XZ) + mat `Xy'=r(Xy) + mat `Zy'=r(Zy) + mat `YY'=r(YY) + scalar `yy'=r(yy) + scalar `yyc'=r(yyc) + mat `ZY'=r(ZY) + mat `Z2y'=r(Z2y) + mat `Z2Y'=r(Z2Y) + mat `XXinv'=r(XXinv) + mat `ZZinv'=r(ZZinv) + mat `XPZXinv'=r(XPZXinv) + + +********** CLUSTER SETUP ********************************************** + +* Mata code requires data are sorted on (1) the first var cluster if there +* is only one cluster var; (2) on the 3rd and then 1st if two-way clustering, +* unless (3) two-way clustering is combined with kernel option, in which case +* the data are tsset and sorted on panel id (first cluster variable) and time +* id (second cluster variable). +* Second cluster var is optional and requires an identifier numbered 1..N_clust2, +* unless combined with kernel option, in which case it's the time variable. +* Third cluster var is the intersection of 1 and 2, unless combined with kernel +* opt, in which case it's unnecessary. +* Sorting on "cluster3 cluster1" means that in Mata, panelsetup works for +* both, since cluster1 nests cluster3. +* Note that it is possible to cluster on time but not panel, in which case +* cluster1 is time, cluster2 is empty and data are sorted on panel-time. +* Note also that if data are sorted here but happen to be tsset, will need +* to be re-tsset after estimation code concludes. + + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + tokenize `cluster' + local cluster1 "`1'" + local cluster2 "`2'" + if "`kernel'"~="" { +* kernel requires either that cluster1 is time var and cluster2 is empty +* or that cluster1 is panel var and cluster2 is time var. +* Either way, data must be tsset and sorted for panel data. + if "`cluster2'"~="" { +* Allow backwards order + if "`cluster1'"=="`tvar'" & "`cluster2'"=="`ivar'" { + local cluster1 "`2'" + local cluster2 "`1'" + } + if "`cluster1'"~="`ivar'" | "`cluster2'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset panel & time vars." +di as err " tsset panel var=`ivar'; tsset time var=`tvar'; cluster vars=`cluster1',`cluster2'" + exit 198 + } + } + else { + if "`cluster1'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset time variable." +di as err " tsset time var=`tvar'; cluster var=`cluster1'" + exit 198 + } + } + } +* Simple way to get quick count of 1st cluster variable without disrupting sort +* clusterid1 is numbered 1.._Nclust1. + tempvar clusterid1 + qui egen `clusterid1'=group(`cluster1') if `touse' + sum `clusterid1' if `touse', meanonly + if "`cluster2'"=="" { + local N_clust=r(max) + local N_clust1=. + local N_clust2=. + if "`kernel'"=="" { +* Single level of clustering and no kernel-robust, so sort on single cluster var. +* kernel-robust already sorted via tsset. + sort `cluster1' + } + } + else { + local N_clust1=r(max) + if "`kernel'"=="" { + tempvar clusterid2 clusterid3 +* New cluster id vars are numbered 1..N_clust2 and 1..N_clust3 + qui egen `clusterid2'=group(`cluster2') if `touse' + qui egen `clusterid3'=group(`cluster1' `cluster2') if `touse' +* Two levels of clustering and no kernel-robust, so sort on cluster3/nested in/cluster1 +* kernel-robust already sorted via tsset. + sort `clusterid3' `cluster1' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) + } + else { +* Need to create this only to count the number of clusters + tempvar clusterid2 + qui egen `clusterid2'=group(`cluster2') if `touse' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) +* Now replace with original variable + local clusterid2 `cluster2' + } + local N_clust=min(`N_clust1',`N_clust2') + } + } + else { +* No cluster options but for Mata purposes, set N_clust=0 + local N_clust=0 + } + +************************************************************************************************ + + tempname b W S V beta lambda j jp rss mss rmse sigmasq rankV rankS + tempname arubin arubinp arubin_lin arubin_linp + tempname r2 r2_a r2u r2c F Fp Fdf2 ivest + + local cnb "`endo1' `inexog1'" + local cnZ "`exexog1' `inexog1'" + if "`noconstant'"=="" { + local cnb "`cnb' _cons" + local cnZ "`cnZ' _cons" + } + + tempvar resid + qui gen double `resid'=. + +******************************************************************************************* +* LIML +******************************************************************************************* + + if "`liml'`kclassopt'"~="" { + + mata: s_liml( "`ZZ'", /* + */ "`XX'", /* + */ "`XZ'", /* + */ "`Zy'", /* + */ "`Z2Z2'", /* + */ "`YY'", /* + */ "`ZY'", /* + */ "`Z2Y'", /* + */ "`Xy'", /* + */ "`ZZinv'", /* + */ "`lhs'", /* + */ "`lhs' `endo1'", /* + */ "`resid'", /* + */ "`endo1' `inexog1' `ones'", /* + */ "`endo1'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`exexog1'", /* + */ "`inexog1' `ones'", /* + */ `fuller', /* + */ `kclass', /* + */ "`coviv'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`ivar'", /* + */ "`tvar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ `dofminus') + + mat `b'=r(beta) + mat `S'=r(S) + mat `V'=r(V) + scalar `lambda'=r(lambda) + local kclass=r(kclass) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + + scalar `arubin'=(`N'-`dofminus')*ln(`lambda') + scalar `arubin_lin'=(`N'-`dofminus')*(`lambda'-1) + + } + +******************************************************************************************* +* OLS, IV and 2SGMM. Also enter to get CUE starting values. +************************************************************************************************ + if "`liml'`kclassopt'`b0'"=="" { +* Check user-supplied S matrix + if "`smatrix'" != "" { + tempname S0 + matrix `S0'=`smatrix' +* Rearrange/select columns to mat IV matrix + capture matsort `S0' "`cnZ'" + local srows = rowsof(`S0') + local scols = colsof(`S0') + local zcols : word count `cnZ' + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-smatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + if issymmetric(`S0')==0 { +di as err "-smatrix- option error: supplied matrix is not symmetric" +exit 198 + } + } + +* First call to s_gmm. +* If W or S supplied, calculates GMM beta and residuals +* If b0 supplied, calculates residuals +* If none of the above supplied, calculates GMM beta using default IV weighting matrix and residuals + + mata: s_gmm1s( "`ZZ'", /* + */ "`XX'", /* + */ "`XZ'", /* + */ "`Zy'", /* + */ "`ZZinv'", /* + */ "`lhs'", /* + */ "`resid'", /* + */ "`endo1' `inexog1' `ones'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`wmatrix'", /* + */ "`S0'", /* + */ `dofminus') + + mat `b'=r(beta) + mat `W'=r(W) + +* Block calls s_omega to get cov matrix of orthog conditions, if not supplied + if "`smatrix'"~="" { + mat `S'=`S0' + } + else { + +* NB: xtivreg2 calls ivreg2 with data sorted on ivar and optionally tvar. +* Stock-Watson adjustment -sw- assumes data are sorted on ivar. Checked at start of ivreg2. + +* call abw code if bw() is defined and bw(auto) selected + if `bw' != 0 { + if `bw' == -1 { + tempvar abwtouse + gen byte `abwtouse' = (`resid' < .) + abw `resid' `exexog1' `inexog1' `abwtouse', /* + */ tindex(`tindex') nobs(`N') tobs(`T') noconstant kernel(`kernel') + local bw `r(abw)' + local bwopt "bw(`bw')" + local bwchoice "`r(bwchoice)'" + } + } +* S covariance matrix of orthogonality conditions +// cfb B102 +// loc klc = cond("`kernel'" == "Quadratic-Spectral", "Quadratic spectral", "`kernel'") + mata: s_omega( "`ZZ'", /* + */ "`resid'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`ivar'", /* + */ "`tvar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ `dofminus') + mat `S'=r(S) + } + +* By this point: `b' has 1st step beta +* `resid' has resids from the above beta +* `S' has vcv of orthog conditions using either `resid' or user-supplied `S0' + +* Efficient IV. S calculated above. W replaced here. + if "`gmm2s'`robust'`cluster'`kernel'"=="" { + mata: s_egmm( "`ZZ'", /* + */ "`XX'", /* + */ "`XZ'", /* + */ "`Zy'", /* + */ "`ZZinv'", /* + */ "`lhs'", /* + */ "`resid'", /* + */ "`endo1' `inexog1' `ones'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`S'", /* + */ `dofminus') + mat `W'=r(W) + } + +* Inefficient IV. S, W and b calculated above. + if "`gmm2s'"=="" & "`robust'`cluster'`kernel'"~="" { + mata: s_iegmm( "`ZZ'", /* + */ "`XX'", /* + */ "`XZ'", /* + */ "`Zy'", /* + */ "`lhs'", /* + */ "`resid'", /* + */ "`endo1' `inexog1' `ones'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`W'", /* + */ "`S'", /* + */ "`b'", /* + */ `dofminus') + } +* 2-step efficient GMM. S calculated above, b and W are empty. + if "`gmm2s'"~="" { + mata: s_egmm( "`ZZ'", /* + */ "`XX'", /* + */ "`XZ'", /* + */ "`Zy'", /* + */ "`ZZinv'", /* + */ "`lhs'", /* + */ "`resid'", /* + */ "`endo1' `inexog1' `ones'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`S'", /* + */ `dofminus') + mat `b'=r(beta) + mat `W'=r(W) + } + + mat `V'=r(V) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + +* Finished with non-CUE/LIML block + } +*************************************************************************************** +* Block for cue gmm +******************************************************************************************* + if "`cue'`b0'" != "" { + +* s_gmmcue is passed initial b from IV/2-step GMM block above +* OR user-supplied b0 for evaluation of CUE obj function at b0 + mata: s_gmmcue( "`ZZ'", /* + */ "`XZ'", /* + */ "`lhs'", /* + */ "`resid'", /* + */ "`endo1' `inexog1' `ones'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`ivar'", /* + */ "`tvar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ "`b'", /* + */ "`b0'", /* + */ `dofminus') + + mat `b'=r(beta) + mat `S'=r(S) + mat `W'=r(W) + mat `V'=r(V) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + + } + +**************************************************************** +* Done with estimation blocks +**************************************************************** + + mat colnames `b' = `cnb' + mat colnames `V' = `cnb' + mat rownames `V' = `cnb' + mat colnames `S' = `cnZ' + mat rownames `S' = `cnZ' +* No W matrix for LIML or kclass + capture mat colnames `W' = `cnZ' + capture mat rownames `W' = `cnZ' + tempname tempmat + mat `tempmat'=syminv(`V') + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + mat `tempmat'=syminv(`S') + scalar `rankS'=rowsof(`tempmat') - diag0cnt(`tempmat') + +******************************************************************************************* +* RSS, counts, dofs, F-stat, small-sample corrections +******************************************************************************************* + + scalar `rmse'=sqrt(`sigmasq') + if "`noconstant'"=="" { + scalar `mss'=`yyc' - `rss' + } + else { + scalar `mss'=`yy' - `rss' + } + + local Fdf1 = `rhs_ct' - `cons' + local df_m = `rhs_ct' - `cons' + (`sdofminus'-`partialcons') + +* Residual dof + if "`cluster'"=="" { +* Use int(`N') because of non-integer N with iweights, and also because of +* possible numeric imprecision with N returned by above. + local df_r = int(`N') - `rhs_ct' - `dofminus' - `sdofminus' + } + else { +* To match Stata, subtract 1 + local df_r = `N_clust' - 1 + } + +* Sargan-Hansen J dof and p-value +* df=0 doesn't guarantee j=0 since can be call to get value of CUE obj fn + local jdf = `iv_ct' - `rhs_ct' + if `jdf' == 0 & "`b0'"=="" { + scalar `j' = 0 + } + else { + scalar `jp' = chiprob(`jdf',`j') + } + if "`liml'"~="" { + scalar `arubinp' = chiprob(`jdf',`arubin') + scalar `arubin_linp' = chiprob(`jdf',`arubin_lin') + } + +* Small sample corrections for var-cov matrix. +* If robust, the finite sample correction is N/(N-K), and with no small +* we change this to 1 (a la Davidson & MacKinnon 1993, p. 554, HC0). +* If cluster, the finite sample correction is (N-1)/(N-K)*M/(M-1), and with no small +* we change this to 1 (a la Wooldridge 2002, p. 193), where M=number of clusters. + + if "`small'" != "" { + if "`cluster'"=="" { + matrix `V'=`V'*(`N'-`dofminus')/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + } + else { + matrix `V'=`V'*(`N'-1)/(`N'-`rhs_ct'-`sdofminus') /* + */ * `N_clust'/(`N_clust'-1) + } + scalar `sigmasq'=`rss'/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + scalar `rmse'=sqrt(`sigmasq') + } + + scalar `r2u'=1-`rss'/`yy' + scalar `r2c'=1-`rss'/`yyc' + if "`noconstant'"=="" { + scalar `r2'=`r2c' + scalar `r2_a'=1-(1-`r2')*(`N'-1)/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + } + else { + scalar `r2'=`r2u' + scalar `r2_a'=1-(1-`r2')*`N'/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + } +* `N' is rounded down to nearest integer if iweights are used. +* If aw, pw or fw, should already be integer but use round in case of numerical imprecision. + local N=int(`N') + +* Fstat +* To get it to match Stata's, must post separately with dofs and then do F stat by hand +* in case weights generate non-integer obs and dofs +* Create copies so they can be posted + tempname FB FV + mat `FB'=`b' + mat `FV'=`V' + capture ereturn post `FB' `FV' +* If the cov matrix wasn't positive definite, the post fails with error code 506 + local rc = _rc + if `rc' != 506 { + local Frhs1 `rhs1' + capture test `Frhs1' + if "`small'" == "" { + if "`cluster'"=="" { + capture scalar `F' = r(chi2)/`Fdf1' * `df_r'/(`N'-`dofminus') + } + else { + capture scalar `F' = r(chi2)/`Fdf1' * /* +* sdofminus used here so that F-stat matches test stat from regression with no partial and small + */ (`N_clust'-1)/`N_clust' * (`N'-`rhs_ct'-`sdofminus')/(`N'-1) + } + } + else { + capture scalar `F' = r(chi2)/`Fdf1' + } + capture scalar `Fp'=Ftail(`Fdf1',`df_r',`F') + capture scalar `Fdf2'=`df_r' + } + +* If j==. or vcv wasn't full rank, then vcv problems and F is meaningless + if `j' == . | `rc'==506 { + scalar `F' = . + scalar `Fp' = . + } + +* End of counts, dofs, F-stat, small sample corrections + +******************************************************************************************** +* Reduced form and first stage regression options +******************************************************************************************* +* Relies on proper count of (non-collinear) IVs generated earlier. +* Note that nocons option + constant in instrument list means first-stage +* regressions are reported with nocons option. First-stage F-stat therefore +* correctly includes the constant as an explanatory variable. + + if "`rf'`saverf'`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) & "`noid'"=="" { +* Restore original order if changed for mata code above + capture tsset +* Reduced form needed for AR first-stage test stat. Also estimated if requested. + tempname archi2 archi2p arf arfp ardf ardf_r sstat sstatp sstatdf + doRF "`lhs'" "`inexog1'" "`exexog1'" /* + */ `touse' `"`wtexp'"' `"`noconstant'"' `"`robust'"' /* + */ `"`clopt'"' `"`bwopt'"' `"`kernopt'"' /* + */ `"`saverfprefix'"' /* + */ "`dofminus'" "`sdofminus'" `"`sw'"' `"`psd'"' "`ivreg2_cmd'" + scalar `archi2'=r(archi2) + scalar `archi2p'=r(archi2p) + scalar `arf'=r(arf) + scalar `arfp'=r(arfp) + scalar `ardf'=r(ardf) + scalar `ardf_r'=r(ardf_r) + local rfeq "`r(rfeq)'" +* Drop saved rf results if needed only for first-stage estimations + if "`rf'`saverf'" == "" { + capture estimates drop `rfeq' + } +* Stock-Wright S statistic. Equiv to J LM test of exexog. +* Note that Z2==X2 so Z2Z2 is X2X2 and Z2y is X2y + tempname swresid + qui gen double `swresid'=. + +* mata code requires sorting on cluster 3 / cluster 1 (if 2-way) or cluster 1 (if one-way) + if "`cluster'"!="" { + sort `clusterid3' `cluster1' + } + mata: s_sstat( "`Z2Z2'", /* + */ "`Z1Z2'", /* + */ "`Z2y'", /* + */ "`lhs'", /* + */ "`swresid'", /* + */ "`inexog1' `ones'", /* + */ "`exexog1' `inexog1' `ones'", /* + */ "`exexog1'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`ivar'", /* + */ "`tvar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ `dofminus' /* + */ ) + + scalar `sstat'=r(j) + scalar `sstatdf'=`ardf' + scalar `sstatp'=chiprob(`sstatdf',`sstat') + } + + if "`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) { +* Restore original order if changed for mata code above + capture tsset + + if `iv1_ct' > `iv_ct' { +di +di in gr "Warning: collinearities detected among instruments" +di in gr "1st stage tests of excluded exogenous variables may be incorrect" + } + local sdofmopt = "sdofminus(`sdofminus')" + if "`first'`savefirst'" ~= "" { + doFirst "`endo1'" "`inexog1'" "`exexog1'" /* + */ `touse' `"`wtexp'"' `"`noconstant'"' `"`robust'"' /* + */ `"`clopt'"' `"`bwopt'"' `"`kernopt'"' /* + */ `"`savefprefix'"' `"`dofmopt'"' `"`sdofmopt'"' /* + */ `"`sw'"' `"`psd'"' "`ivreg2_cmd'" + local firsteqs "`r(firsteqs)'" + } + +* Need to create Stata placeholders for Mata code so that Stata time-series operators can work on them + tempname firstmat + tempname fsresid + qui gen double `fsresid'=. + tsrevar `endo1' + local ts_endo1 "`r(varlist)'" + foreach x of local ts_endo1 { + tempname `x'_hat + qui gen double ``x'_hat' = . + local endo1_hat "`endo1_hat' ``x'_hat'" + } + +* mata code requires sorting on cluster 3 / cluster 1 (if 2-way) or cluster 1 (if one-way) + if "`cluster'"!="" { + sort `clusterid3' `cluster1' + } + mata: s_ffirst( "`ZZ'", /* + */ "`XX'", /* + */ "`XZ'", /* + */ "`ZZinv'", /* + */ "`XXinv'", /* + */ "`XPZXinv'", /* + */ "`fsresid'", /* + */ "`endo1'", /* + */ "`endo1_hat'", /* + */ "`inexog1' `ones'", /* + */ "`exexog1'", /* + */ "`touse'", /* + */ "`weight'", /* + */ "`wvar'", /* + */ `wf', /* + */ `N', /* + */ `N_clust', /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`ivar'", /* + */ "`tvar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ `dofminus', /* + */ `sdofminus') + + mat `firstmat' = r(firstmat) + mat rowname `firstmat' = sheapr2 pr2 F df df_r pvalue APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + mat colname `firstmat' = `endo1' + + } +* End of first-stage regression code + +******************************************************************************************* +* Re-tsset if necessary +************************************************************************************************ + + capture tsset + +******************************************************************************************* +* orthog option: C statistic (difference of Sargan statistics) +******************************************************************************************* +* Requires j dof from above + if "`orthog'"!="" { + tempname cj cstat cstatp +* Initialize cstat + scalar `cstat' = 0 +* Each variable listed must be in instrument list. +* To avoid overwriting, use cendo, cinexog1, cexexog, cendo_ct, cex_ct + local cendo1 "`endo1'" + local cinexog1 "`inexog1'" + local cexexog1 "`exexog1'" + local cinsts1 "`insts1'" + local crhs1 "`rhs1'" + local clist1 "`orthog'" + local clist_ct : word count `clist1' + +* Check to see if c-stat vars are in original list of all ivs +* cinexog1 and cexexog1 are after c-stat exog list vars have been removed +* cendo1 is endo1 after included exog being tested has been added + foreach x of local clist1 { + local llex_ct : word count `cexexog1' + local cexexog1 : list cexexog1 - x + local cex1_ct : word count `cexexog1' + local ok = `llex_ct' - `cex1_ct' + if (`ok'==0) { +* Not in excluded, check included and add to endog list if it appears + local llin_ct : word count `cinexog1' + local cinexog1 : list cinexog1 - x + local cin1_ct : word count `cinexog1' + local ok = `llin_ct' - `cin1_ct' + if (`ok'==0) { +* Not in either list +di in r "Error: `x' listed in orthog() but does not appear as exogenous." + error 198 + } + else { + local cendo1 "`cendo1' `x'" + } + } + } + +* If robust, HAC/AC or GMM (but not LIML or IV), create optimal weighting matrix to pass to ivreg2 +* by extracting the submatrix from the full S and then inverting. +* This guarantees the C stat will be non-negative. See Hayashi (2000), p. 220. +* Calculate C statistic with recursive call to ivreg2 +* Collinearities may cause problems, hence -capture-. +* smatrix works generally, including homoskedastic case with Sargan stat + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + if "`kernel'" != "" { + local bwopt "bw(`bw')" + local kernopt "kernel(`kernel')" + } +* clopt is omitted because it requires calculation of numbers of clusters, which is done +* only when S matrix is calculated + capture `ivreg2_cmd' `lhs' `cinexog1' /* + */ (`cendo1'=`cexexog1') /* + */ if `touse' `wtexp', `noconstant' /* + */ `options' `small' `robust' /* + */ `gmm2s' `bwopt' `kernopt' `dofmopt' `sw' `psd' /* + */ smatrix("`S'") noid nocollin + local rc = _rc + if `rc' == 481 { + scalar `cstat' = 0 + local cstatdf = 0 + } + else { + scalar `cj'=e(j) + local cjdf=e(jdf) + scalar `cstat' = `j' - `cj' + local cstatdf = `jdf' - `cjdf' + } + _estimates unhold `ivest' + scalar `cstatp'= chiprob(`cstatdf',`cstat') +* Collinearities may cause C-stat dof to differ from the number of variables in orthog() +* If so, set cstat=0 + if `cstatdf' != `clist_ct' { + scalar `cstat' = 0 + } + } +* End of orthog block + +******************************************************************************************* +* Endog option +******************************************************************************************* +* Uses recursive call with orthog + if "`endogtest'"!="" { + tempname estat estatp +* Initialize estat + scalar `estat' = 0 +* Each variable to test must be in endo list. +* To avoid overwriting, use eendo, einexog1, etc. + local eendo1 "`endo1'" + local einexog1 "`inexog1'" + local einsts1 "`insts1'" + local elist1 "`endogtest'" + local elist_ct : word count `elist1' +* Check to see if endog test vars are in original endo1 list of endogeneous variables +* eendo1 and einexog1 are after endog test vars have been removed from endo and added to inexog + foreach x of local elist1 { + local llendo_ct : word count `eendo1' + local eendo1 : list eendo1 - x + local eendo1_ct : word count `eendo1' + local ok = `llendo_ct' - `eendo1_ct' + if (`ok'==0) { +* Not in endogenous list +di in r "Error: `x' listed in endog() but does not appear as endogenous." + error 198 + } + else { + local einexog1 "`einexog1' `x'" + } + } +* Recursive call to ivreg2 using orthog option to obtain endogeneity test statistic +* Collinearities may cause problems, hence -capture-. + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + capture `ivreg2_cmd' `lhs' `einexog1' /* + */ (`eendo1'=`exexog1') if `touse' /* + */ `wtexp', `noconstant' `robust' `clopt' /* + */ `gmm2s' `liml' `bwopt' `kernopt' /* + */ `small' `dofmopt' `sw' `psd' `options' /* + */ orthog(`elist1') noid nocollin + local rc = _rc + if `rc' == 481 { + scalar `estat' = 0 + local estatdf = 0 + } + else { + scalar `estat'=e(cstat) + local estatdf=e(cstatdf) + scalar `estatp'=e(cstatp) + } + _estimates unhold `ivest' +* Collinearities may cause endog stat dof to differ from the number of variables in endog() +* If so, set estat=0 + if `estatdf' != `elist_ct' { + scalar `estat' = 0 + } + } +* End of endogeneity test block + } + +******************************************************************************************* +* Rank identification and redundancy block +******************************************************************************************* + if `endo1_ct' > 0 & "`noid'"=="" { + +* id=underidentification statistic, wid=weak identification statistic + tempname idrkstat widrkstat iddf idp + tempname ccf cdf rkf cceval cdeval cd cc + tempname idstat widstat + +* Anderson canon corr underidentification statistic if homo, rk stat if not +* Need only id stat for testing full rank=(#cols-1) + qui `ranktest_cmd' (`endo1') (`exexog1') `wtexp' if `touse', partial(`inexog1') full /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' + if "`cluster'"=="" { + scalar `idstat'=r(chi2)/r(N)*(`N'-`dofminus') + } + else { +* No dofminus adjustment needed for cluster-robust + scalar `idstat'=r(chi2) + } + mat `cceval'=r(ccorr) + mat `cdeval' = J(1,`endo1_ct',.) + forval i=1/`endo1_ct' { + mat `cceval'[1,`i'] = (`cceval'[1,`i'])^2 + mat `cdeval'[1,`i'] = `cceval'[1,`i'] / (1 - `cceval'[1,`i']) + } + local iddf = `iv_ct' - (`rhs_ct'-1) + scalar `idp' = chiprob(`iddf',`idstat') +* Cragg-Donald F statistic. +* Under homoskedasticity, Wald cd eigenvalue = cc/(1-cc) Anderson canon corr eigenvalue. + scalar `cd'=`cdeval'[1,`endo1_ct'] + scalar `cdf'=`cd'*(`N'-`sdofminus'-`iv_ct'-`dofminus')/`exex1_ct' + +* Weak id statistic is Cragg-Donald F stat, rk Wald F stat if not + if "`robust'`cluster'`kernel'"=="" { + scalar `widstat'=`cdf' + } + else { +* Need only test of full rank + qui `ranktest_cmd' (`endo1') (`exexog1') `wtexp' if `touse', partial(`inexog1') full wald /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' +* sdofminus used here so that F-stat matches test stat from regression with no partial + if "`cluster'"=="" { + scalar `rkf'=r(chi2)/r(N)*(`N'-`iv_ct'-`sdofminus'-`dofminus')/`exex1_ct' + } + else { + scalar `rkf'=r(chi2)/(`N'-1) /* + */ *(`N'-`iv_ct'-`sdofminus') /* + */ *(`N_clust'-1)/`N_clust' /`exex1_ct' + } + scalar `widstat'=`rkf' + } + } + +* LM redundancy test + if `endo1_ct' > 0 & "`redundant'" ~= "" & "`noid'"=="" { +* Use K-P rk statistics and LM version of test +* Statistic is the rank of the matrix of Z_1B*X_2, where Z_1B are the possibly redundant +* instruments and X_1 are the endogenous regressors; both have X_2 (exogenous regressors) +* and Z_1A (maintained excluded instruments) partialled out. LM test of rank is +* is numerically equivalent to estimation of set of RF regressions and performing +* standard LM test of possibly redundant instruments. + + local redlist1 "`redundant'" + local rexexog1 : list exexog1 - redlist1 + local notlisted : list redlist1 - exexog1 + if "`notlisted'" ~= "" { +di in r "Error: `notlisted' listed in redundant() but does not appear as excluded instrument." + error 198 + } + local rexexog1_ct : word count `rexexog1' + if `rexexog1_ct' < `endo1_ct' { +di in r "Error: specification with redundant() option is unidentified (fails rank condition)" + error 198 + } +* LM version requires only -nullrank- rk statistics so would not need -all- option + tempname rkmatrix + qui `ranktest_cmd' (`endo1') (`redlist1') `wtexp' if `touse', partial(`inexog1' `rexexog1') null /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' + mat `rkmatrix'=r(rkmatrix) + tempname redstat redp + local redlist_ct : word count `redlist1' +* dof adjustment needed because it doesn't use the adjusted S + if "`cluster'"=="" { + scalar `redstat' = `rkmatrix'[1,1]/r(N)*(`N'-`dofminus') + } + else { +* No dofminus adjustment needed for cluster-robust + scalar `redstat' = `rkmatrix'[1,1] + } + local reddf = `endo1_ct'*`redlist_ct' + scalar `redp' = chiprob(`reddf',`redstat') + } + +* End of identification stats block + +******************************************************************************************* +* Error-checking block +******************************************************************************************* + +* Check if adequate number of observations + if `N' <= `iv_ct' { +di in r "Error: number of observations must be greater than number of instruments" +di in r " including constant." + error 2001 + } + +* Check if robust VCV matrix is of full rank + if ("`gmm2s'`robust'`cluster'`kernel'" != "") & (`rankS' < `iv_ct') { +* Robust covariance matrix not of full rank means either a singleton dummy or too few +* clusters (in which case the indiv SEs are OK but no F stat or 2-step GMM is possible), +* or there are too many AC/HAC-lags, or the HAC covariance estimator +* isn't positive definite (possible with truncated and Tukey-Hanning kernels) +* Previous versions of ivreg2 exited if 2-step GMM but beta and VCV may be OK. +* Continue but J, F, and C stat (if present) all meaningless. +* Must set Sargan-Hansen j = missing so that problem can be reported in output. + scalar `j' = . + if "`orthog'"!="" { + scalar `cstat' = . + } + if "`endogtest'"!="" { + scalar `estat' = . + } + } + +* End of error-checking block + +********************************************************************************************** +* Post and display results. +******************************************************************************************* + +* restore data if preserved for partial option + if `partial_ct' { + restore + } + + if "`small'"!="" { + local NminusK = `N'-`rhs_ct'-`sdofminus' + capture ereturn post `b' `V', dep(`depname') obs(`N') esample(`touse') /* + */ dof(`NminusK') + } + else { + capture ereturn post `b' `V', dep(`depname') obs(`N') esample(`touse') + } + local rc = _rc + if `rc' == 504 { +di in red "Error: estimated variance-covariance matrix has missing values" + exit 504 + } + if `rc' == 506 { +di in red "Error: estimated variance-covariance matrix not positive-definite" + exit 506 + } + if `rc' > 0 { +di in red "Error: estimation failed - could not post estimation results" + exit `rc' + } + + ereturn local instd `endo' + local insts : colnames `S' +* Stata convention is to exclude constant from instrument list +* Need word option so that varnames with "_cons" in them aren't zapped + local insts : subinstr local insts "_cons" "", word + ereturn local insts `insts' + ereturn local inexog `inexog' + ereturn local exexog `exexog' + ereturn local partial `partial' + ereturn scalar inexog_ct=`inexog1_ct' + ereturn scalar exexog_ct=`exex1_ct' + ereturn scalar endog_ct =`endo1_ct' + ereturn scalar partial_ct =`partial_ct' + if "`collin'`ecollin'`dups'" != "" | `partial_ct' > 0 { + ereturn local collin `collin' + ereturn local ecollin `ecollin' + ereturn local dups `dups' + ereturn local instd1 `endo1' + ereturn local inexog1 `inexog1' + ereturn local exexog1 `exexog1' + ereturn local partial1 `partial1' + } + + if "`smatrix'" == "" { + ereturn matrix S `S' + } + else { +* Create a copy so posting doesn't zap the original + tempname Scopy + mat `Scopy'=`smatrix' + ereturn matrix S `Scopy' + } + +* No weighting matrix defined for LIML and kclass + if "`wmatrix'"=="" & "`liml'`kclassopt'"=="" { + ereturn matrix W `W' + } + else if "`liml'`kclassopt'"=="" { +* Create a copy so posting doesn't zap the original + tempname Wcopy + mat `Wcopy'=`wmatrix' + ereturn matrix W `Wcopy' + } + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + if "`bwchoice'" ~= "" { + ereturn local bwchoice "`bwchoice'" + } + } + + if "`small'"!="" { + ereturn scalar df_r=`df_r' + ereturn local small "small" + } + if "`nopartialsmall'"=="" { + ereturn local partialsmall "small" + } + + + if "`robust'" != "" { + local vce "robust" + } + if "`cluster1'" != "" { + if "`cluster2'"=="" { + local vce "`vce' cluster" + } + else { + local vce "`vce' two-way cluster" + } + } + if "`kernel'" != "" { + if "`robust'" != "" { + local vce "`vce' hac" + } + else { + local vce "`vce' ac" + } + local vce "`vce' `kernel' bw=`bw'" + } + if "`sw'" != "" { + local vce "`vce' sw" + } + if "`psd'" != "" { + local vce "`vce' `psd'" + } + local vce : list clean vce + local vce = lower("`vce'") + ereturn local vce `vce' + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + if "`cluster2'"!="" { + ereturn scalar N_clust1=`N_clust1' + ereturn scalar N_clust2=`N_clust2' + ereturn local clustvar1 `cluster1' + ereturn local clustvar2 `cluster2' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + + ereturn scalar df_m=`df_m' + ereturn scalar sdofminus=`sdofminus' + ereturn scalar dofminus=`dofminus' + ereturn scalar r2=`r2' + ereturn scalar rmse=`rmse' + ereturn scalar rss=`rss' + ereturn scalar mss=`mss' + ereturn scalar r2_a=`r2_a' + ereturn scalar F=`F' + ereturn scalar Fp=`Fp' + ereturn scalar Fdf1=`Fdf1' + ereturn scalar Fdf2=`Fdf2' + ereturn scalar yy=`yy' + ereturn scalar yyc=`yyc' + ereturn scalar r2u=`r2u' + ereturn scalar r2c=`r2c' + ereturn scalar rankzz=`iv_ct' + ereturn scalar rankxx=`rhs_ct' + if "`gmm2s'`robust'`cluster'`kernel'" != "" { + ereturn scalar rankS=`rankS' + } + ereturn scalar rankV=`rankV' + ereturn scalar ll = -0.5 * (`N'*ln(2*_pi) + `N'*ln(`rss'/`N') + `N') + +* Always save J. Also save as Sargan if homoskedastic; save A-R if LIML. + ereturn scalar j=`j' + ereturn scalar jdf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar jp=`jp' + } + if ("`robust'`cluster'"=="") { + ereturn scalar sargan=`j' + ereturn scalar sargandf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar sarganp=`jp' + } + } + if "`liml'"!="" { + ereturn scalar arubin=`arubin' + ereturn scalar arubin_lin=`arubin_lin' + if `j' != 0 & `j' != . { + ereturn scalar arubinp=`arubinp' + ereturn scalar arubin_linp=`arubin_linp' + } + ereturn scalar arubindf=`jdf' + } + + if "`orthog'"!="" { + ereturn scalar cstat=`cstat' + if `cstat'!=0 & `cstat' != . { + ereturn scalar cstatp=`cstatp' + ereturn scalar cstatdf=`cstatdf' + ereturn local clist `clist1' + } + } + + if "`endogtest'"!="" { + ereturn scalar estat=`estat' + if `estat'!=0 & `estat' != . { + ereturn scalar estatp=`estatp' + ereturn scalar estatdf=`estatdf' + ereturn local elist `elist1' + } + } + + if `endo1_ct' > 0 & "`noid'"=="" { + ereturn scalar idstat=`idstat' + ereturn scalar iddf=`iddf' + ereturn scalar idp=`idp' + ereturn scalar cd=`cd' + ereturn scalar widstat=`widstat' + ereturn scalar cdf=`cdf' + capture ereturn matrix ccev=`cceval' + capture ereturn matrix cdev `cdeval' + capture ereturn scalar rkf=`rkf' + } + + if "`redundant'"!="" & "`noid'"=="" { + ereturn scalar redstat=`redstat' + ereturn scalar redp=`redp' + ereturn scalar reddf=`reddf' + ereturn local redlist `redlist1' + } + + if "`first'`ffirst'`savefirst'" != "" & `endo1_ct'>0 & "`noid'"=="" { +* Capture here because firstmat empty if mvs encountered in 1st stage regressions + capture ereturn matrix first `firstmat' + ereturn scalar arf=`arf' + ereturn scalar arfp=`arfp' + ereturn scalar archi2=`archi2' + ereturn scalar archi2p=`archi2p' + ereturn scalar ardf=`ardf' + ereturn scalar ardf_r=`ardf_r' + ereturn scalar sstat=`sstat' + ereturn scalar sstatp=`sstatp' + ereturn scalar sstatdf=`sstatdf' + ereturn local firsteqs `firsteqs' + } + if "`rf'`saverf'" != "" & `endo1_ct'>0 { + ereturn local rfeq `rfeq' + } + + ereturn local depvar `lhs' + + if "`liml'"!="" { + ereturn local model "liml" + ereturn scalar kclass=`kclass' + ereturn scalar lambda=`lambda' + if `fuller' > 0 & `fuller' < . { + ereturn scalar fuller=`fuller' + } + } + else if "`kclassopt'" != "" { + ereturn local model "kclass" + ereturn scalar kclass=`kclass' + } + else if "`gmm2s'`cue'`b0'`wmatrix'"=="" { + if "`endo1'" == "" { + ereturn local model "ols" + } + else { + ereturn local model "iv" + } + } + else if "`cue'`b0'"~="" { + ereturn local model "cue" + } + else if "`gmm2s'"~="" { + ereturn local model "gmm2s" + } + else if "`wmatrix'"~="" { + ereturn local model "gmmw" + } + else { +* Should never enter here + ereturn local model "unknown" + } + + if "`weight'" != "" { + ereturn local wexp "=`exp'" + ereturn local wtype `weight' + } + ereturn local cmd `ivreg2_cmd' + ereturn local cmdline `cmdline' + ereturn local version `lversion' + ereturn scalar cons=`cons' + ereturn scalar partialcons=`partialcons' + + ereturn local predict "`ivreg2_cmd'_p" + + if "`e(model)'"=="gmm2s" & "`wmatrix'"=="" { + local title2 "2-Step GMM estimation" + } + else if "`e(model)'"=="gmm2s" & "`wmatrix'"~="" { + local title2 "2-Step GMM estimation with user-supplied first-step weighting matrix" + } + else if "`e(model)'"=="gmmw" { + local title2 "GMM estimation with user-supplied weighting matrix" + } + else if "`e(model)'"=="cue" & "`b0'"=="" { + local title2 "CUE estimation" + } + else if "`e(model)'"=="cue" & "`b0'"~="" { + local title2 "CUE evaluated at user-supplied parameter vector" + } + else if "`e(model)'"=="ols" { + local title2 "OLS estimation" + } + else if "`e(model)'"=="iv" { + local title2 "IV (2SLS) estimation" + } + else if "`e(model)'"=="liml" { + local title2 "LIML estimation" + } + else if "`e(model)'"=="kclass" { + local title2 "k-class estimation" + } + else { +* Should never reach here + local title2 "unknown estimation" + } + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" & "`e(clustvar)'"=="" { + local hacsubtitle3 "autocorrelation" + } + if "`kiefer'"!="" { + local hacsubtitle3 "within-cluster autocorrelation (Kiefer)" + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + else { + local hacsubtitle3 "clustering on `e(clustvar1)' and `e(clustvar2)'" + } + if "`e(kernel)'" != "" { + local hacsubtitle4 "and kernel-robust to common correlated disturbances (Driscoll-Kraay)" + } + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + if "`title'"=="" { + ereturn local title "`title1'`title2'" + } + else { + ereturn local title "`title'" + } + if "`subtitle'"~="" { + ereturn local subtitle "`subtitle'" + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`b0'"~="" { + ereturn local hacsubtitleB "Estimates based on supplied parameter vector" + } + else if "`hacsubtitle'"~="" & "`gmm2s'`cue'"~="" { + ereturn local hacsubtitleB "Estimates efficient for arbitrary `hacsubtitle'" + } + else if "`wmatrix'"~="" { + ereturn local hacsubtitleB "Efficiency of estimates dependent on weighting matrix" + } + else { + ereturn local hacsubtitleB "Estimates efficient for homoskedasticity only" + } + if "`hacsubtitle'"~="" { + ereturn local hacsubtitleV "Statistics robust to `hacsubtitle'" + } + else { + ereturn local hacsubtitleV "Statistics consistent for homoskedasticity only" + } + if "`hacsubtitle4'"~="" { + ereturn local hacsubtitleV2 "`hacsubtitle4'" + } + if "`sw'"~="" { + ereturn local hacsubtitleV "Stock-Watson heteroskedastic-robust statistics (BETA VERSION)" + } + } + +******************************************************************************************* +* Display results unless ivreg2 called just to generate stats or nooutput option + + if "`nooutput'" == "" { + if "`savefirst'`saverf'" != "" { + DispStored `"`saverf'"' `"`savefirst'"' `"`ivreg2_cmd'"' + } + if "`rf'" != "" { + DispRF + } + if "`first'" != "" { + DispFirst `"`ivreg2_cmd'"' + } + if "`first'`ffirst'" != "" { + DispFFirst `"`ivreg2_cmd'"' + } + if "`eform'"!="" { + local efopt "eform(`eform')" + } + DispMain `"`noheader'"' `"`plus'"' `"`efopt'"' `"`level'"' `"`nofooter'"' `"`ivreg2_cmd'"' + } + +* Drop first stage estimations unless explicitly saved or if replay + if "`savefirst'" == "" { + local firsteqs "`e(firsteqs)'" + foreach eqname of local firsteqs { + capture estimates drop `eqname' + } + ereturn local firsteqs + } + +* Drop reduced form estimation unless explicitly saved or if replay + if "`saverf'" == "" { + local eqname "`e(rfeq)'" + capture estimates drop `eqname' + ereturn local rfeq + } + +end + +******************************************************************************************* +* SUBROUTINES +******************************************************************************************* + +program define DispMain, eclass + args noheader plus efopt level nofooter helpfile + version 8.2 +* Prepare for problem resulting from rank(S) being insufficient +* Results from insuff number of clusters, too many lags in HAC, +* to calculate robust S matrix, HAC matrix not PD, singleton dummy, +* and indicated by missing value for j stat +* Macro `rprob' is either 1 (problem) or 0 (no problem) + capture local rprob ("`e(j)'"==".") + + if "`noheader'"=="" { + if "`e(title)'" ~= "" { +di in gr _n "`e(title)'" + local tlen=length("`e(title)'") +di in gr "{hline `tlen'}" + } + if "`e(subtitle)'" ~= "" { +di in gr "`e(subtitle)'" + } + if "`e(model)'"=="liml" | "`e(model)'"=="kclass" { +di in gr "k =" %7.5f `e(kclass)' + } + if "`e(model)'"=="liml" { +di in gr "lambda =" %7.5f `e(lambda)' + } + if e(fuller) > 0 & e(fuller) < . { +di in gr "Fuller parameter=" %-5.0f `e(fuller)' + } + if "`e(hacsubtitleB)'" ~= "" { +di in gr _n "`e(hacsubtitleB)'" _c + } + if "`e(hacsubtitleV)'" ~= "" { +di in gr _n "`e(hacsubtitleV)'" + } + if "`e(hacsubtitleV2)'" ~= "" { +di in gr "`e(hacsubtitleV2)'" + } + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=" `e(bw)' + if "`e(bwchoice)'"!="" { +di in gr " `e(bwchoice)'" + } +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + di + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local N_clust `e(N_clust)' + local clustvar `e(clustvar)' + } + else { + local N_clust `e(N_clust1)' + local clustvar `e(clustvar1)' + } +di in gr "Number of clusters (`clustvar') = " _col(33) in ye %6.0f `N_clust' _continue + } +di in gr _col(55) "Number of obs = " in ye %8.0f e(N) + if "`e(clustvar2)'"!="" { +di in gr "Number of clusters (" "`e(clustvar2)'" ") = " _col(33) in ye %6.0f e(N_clust2) _continue + } +di in gr _c _col(55) "F(" %3.0f e(Fdf1) "," %6.0f e(Fdf2) ") = " + if e(F) < 99999 { +di in ye %8.2f e(F) + } + else { +di in ye %8.2e e(F) + } +di in gr _col(55) "Prob > F = " in ye %8.4f e(Fp) + +di in gr "Total (centered) SS = " in ye %12.0g e(yyc) _continue +di in gr _col(55) "Centered R2 = " in ye %8.4f e(r2c) +di in gr "Total (uncentered) SS = " in ye %12.0g e(yy) _continue +di in gr _col(55) "Uncentered R2 = " in ye %8.4f e(r2u) +di in gr "Residual SS = " in ye %12.0g e(rss) _continue +di in gr _col(55) "Root MSE = " in ye %8.4g e(rmse) +di + } + +* Display coefficients etc. +* Unfortunate but necessary hack here: to suppress message about cluster adjustment of +* standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + ereturn local clustvar + ereturn display, `plus' `efopt' level(`level') + ereturn local clustvar `cluster' + +* Display 1st footer with identification stats +* Footer not displayed if -nofooter- option or if pure OLS, i.e., model="ols" and Sargan-Hansen=0 + if ~("`nofooter'"~="" | (e(model)=="ols" & (e(sargan)==0 | e(j)==0))) { + +* Under ID test + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##idtest:Underidentification test}" + if "`e(vcetype)'`e(kernel)'"=="" { +di in gr _c " (Anderson canon. corr. LM statistic):" + } + else { +di in gr _c " (Kleibergen-Paap rk LM statistic):" + } +di in ye _col(71) %8.3f e(idstat) +di in gr _col(52) "Chi-sq(" in ye e(iddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(idp) +* IV redundancy statistic + if "`e(redlist)'"!="" { +di in gr "-redundant- option:" +di in smcl _c "{help `helpfile'##redtest:IV redundancy test}" +di in gr _c " (LM test of redundancy of specified instruments):" +di in ye _col(71) %8.3f e(redstat) +di in gr _col(52) "Chi-sq(" in ye e(reddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(redp) +di in gr "Instruments tested: " _c + Disp `e(redlist)', _col(23) + } +di in smcl in gr "{hline 78}" + } +* Report Cragg-Donald statistic + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##widtest:Weak identification test}" +di in gr " (Cragg-Donald Wald F statistic):" in ye _col(71) %8.3f e(cdf) + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr " (Kleibergen-Paap rk Wald F statistic):" in ye _col(71) %8.3f e(widstat) + } +di in gr _c "Stock-Yogo weak ID test critical values:" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmmw" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "15% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "25% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "15% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "25% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + if `cdmissing' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + } + di in smcl in gr "{hline 78}" + } + +* Report either (a) Sargan-Hansen-C stats, or (b) robust covariance matrix problem +* e(model)="gmmw" means user-supplied weighting matrix and Hansen J using 2nd-step resids reported + if `rprob' == 0 { +* Display overid statistic + if "`e(vcetype)'" == "Robust" | "`e(model)'" == "gmmw" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } + else { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } +di in ye _col(71) %8.3f e(j) + if e(rankxx) < e(rankzz) { +di in gr _col(52) "Chi-sq(" in ye e(jdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(jp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + +* Display orthog option: C statistic (difference of Sargan statistics) + if e(cstat) != . { +* If C-stat = 0 then warn, otherwise output + if e(cstat) > 0 { +di in gr "-orthog- option:" + if "`e(vcetype)'" == "Robust" { +di in gr _c "Hansen J statistic (eqn. excluding suspect orthog. conditions): " + } + else { +di in gr _c "Sargan statistic (eqn. excluding suspect orthogonality conditions):" + } +di in ye _col(71) %8.3f e(j)-e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(jdf)-e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f chiprob(e(jdf)-e(cstatdf),e(j)-e(cstat)) +di in smcl _c "{help `helpfile'##ctest:C statistic}" +di in gr _c " (exogeneity/orthogonality of suspect instruments): " +di in ye _col(71) %8.3f e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f e(cstatp) +di in gr "Instruments tested: " _c + Disp `e(clist)', _col(23) + } + if e(cstat) == 0 { +di in gr _n "Collinearity/identification problems in eqn. excl. suspect orthog. conditions:" +di in gr " C statistic not calculated for -orthog- option" + } + } + } + else { +* Problem exists with robust VCV - notify and list possible causes +di in r "Warning: estimated covariance matrix of moment conditions not of full rank." + if e(rankxx) < e(rankzz) { +di in r " overidentification statistic not reported, and standard errors and" + } +di in r " model tests should be interpreted with caution." +di in r "Possible causes:" + if "`e(N_clust)'" != "" { +di in r " number of clusters insufficient to calculate robust covariance matrix" + } + if "`e(kernel)'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in r in smcl _c "{help `helpfile'##partial:partial}" +di in r " option may address problem." + } + +* Display endog option: endogeneity test statistic + if e(estat) != . { +* If stat = 0 then warn, otherwise output + if e(estat) > 0 { +di in gr "-endog- option:" +di in smcl _c "{help `helpfile'##endogtest:Endogeneity test}" +di in gr _c " of endogenous regressors: " +di in ye _col(71) %8.3f e(estat) +di in gr _col(52) "Chi-sq(" in ye e(estatdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(estatp) +di in gr "Regressors tested: " _c + Disp `e(elist)', _col(23) + } + if e(estat) == 0 { +di in gr _n "Collinearity/identification problems in restricted equation:" +di in gr " Endogeneity test statistic not calculated for -endog- option" + } + } + + di in smcl in gr "{hline 78}" +* Display AR overid statistic if LIML and not robust + if "`e(model)'" == "liml" & "`e(vcetype)'" ~= "Robust" & "`e(kernel)'" == "" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (LR test of excluded instruments):" + } +di in ye _col(72) %7.3f e(arubin) + if e(rankxx) < e(rankzz) { +di in gr _col(52) "Chi-sq(" in ye e(arubindf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(arubinp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + di in smcl in gr "{hline 78}" + } + } + +* Display 2nd footer with variable lists + if "`nofooter'"=="" { + +* Warn about dropped instruments if any +* (Re-)calculate number of user-supplied instruments + local iv1_ct : word count `e(insts)' + local iv1_ct = `iv1_ct' + `e(cons)' + + if `iv1_ct' > e(rankzz) { +di in gr "Collinearities detected among instruments: " _c +di in gr `iv1_ct'-e(rankzz) " instrument(s) dropped" + } + + if "`e(collin)'`e(dups)'" != "" | `e(partial_ct)'>0 { +* If collinearities, duplicates or partial, abbreviated varlists saved with a 1 at the end + local one "1" + } + if "`e(instd)'" != "" { + di in gr "Instrumented:" _c + Disp `e(instd`one')', _col(23) + } + if "`e(inexog)'" != "" { + di in gr "Included instruments:" _c + Disp `e(inexog`one')', _col(23) + } + if "`e(exexog)'" != "" { + di in gr "Excluded instruments:" _c + Disp `e(exexog`one')', _col(23) + } + if `e(partial_ct)' > 0 { + if e(partialcons) { + local partial "`e(partial`one')' _cons" + } + else { + local partial "`e(partial`one')'" + } +di in smcl _c "{help `helpfile'##partial:Partialled-out}" + di in gr ":" _c + Disp `partial', _col(23) + if "`e(partialsmall)'"=="" { +di in gr _col(23) "nb: small-sample adjustments do not account for" +di in gr _col(23) " partialled-out variables" + } + else { +di in gr _col(23) "nb: small-sample adjustments account for" +di in gr _col(23) " partialled-out variables" + } + } + if "`e(dups)'" != "" { + di in gr "Duplicates:" _c + Disp `e(dups)', _col(23) + } + if "`e(collin)'" != "" { + di in gr "Dropped collinear:" _c + Disp `e(collin)', _col(23) + } + if "`e(ecollin)'" != "" { + di in gr "Reclassified as exog:" _c + Disp `e(ecollin)', _col(23) + } + di in smcl in gr "{hline 78}" + } +end + +************************************************************************************** + +program define DispRF + version 8.2 + local eqname "`e(rfeq)'" + local depvar "`e(depvar)'" + local strlen : length local depvar + local strlen = `strlen'+25 +di +di in gr "Reduced-form regression: `e(depvar)'" +di in smcl in gr "{hline `strlen'}" + capture estimates replay `eqname' + if "`eqname'"=="" | _rc != 0 { +di in ye "Unable to display reduced-form regression of `e(depvar)'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + else { + estimates replay `eqname', noheader +di + } +end + +program define DispFirst + version 8.2 + args helpfile + tempname firstmat ivest sheapr2 pr2 F df df_r pvalue APF APFdf1 APFdf2 APFp APr2 + + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display first-stage estimates; macro e(first) is missing" + exit + } +di in gr _newline "First-stage regressions" +di in smcl in gr "{hline 23}" +di + local endo1 : colnames(`firstmat') + local nrvars : word count `endo1' + local firsteqs "`e(firsteqs)'" + local nreqs : word count `firsteqs' + if `nreqs' < `nrvars' { +di in ye "Unable to display all first-stage regressions." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + foreach eqname of local firsteqs { + _estimates hold `ivest' + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to list stored estimation `eqname'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + else { + local vn "`e(depvar)'" +di in gr "First-stage regression of `vn':" + estimates replay `eqname', noheader + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `APF' =`firstmat'["APF","`vn'"] + mat `APFdf1' =`firstmat'["APFdf1","`vn'"] + mat `APFdf2' =`firstmat'["APFdf2","`vn'"] + mat `APFp' =`firstmat'["APFp","`vn'"] + mat `APr2' =`firstmat'["APr2","`vn'"] + +di in gr "F test of excluded instruments:" +di in gr " F(" %3.0f `df'[1,1] "," %6.0f `df_r'[1,1] ") = " in ye %8.2f `F'[1,1] +di in gr " Prob > F = " in ye %8.4f `pvalue'[1,1] + +di in smcl "{help `helpfile'##apstats:Angrist-Pischke multivariate F test of excluded instruments:}" +di in gr " F(" %3.0f `APFdf1'[1,1] "," %6.0f `APFdf2'[1,1] ") = " in ye %8.2f `APF'[1,1] +di in gr " Prob > F = " in ye %8.4f `APFp'[1,1] + +di + } + _estimates unhold `ivest' + } +end + +program define DispStored + args saverf savefirst helpfile + version 8.2 + if "`saverf'" != "" { + local eqlist "`e(rfeq)'" + } + if "`savefirst'" != "" { + local eqlist "`eqlist' `e(firsteqs)'" + } + local eqlist : list retokenize eqlist +di in gr _newline "Stored estimation results" +di in smcl in gr "{hline 25}" _c + capture estimates dir `eqlist' + if "`eqlist'" != "" & _rc == 0 { +* Estimates exist and can be listed + estimates dir `eqlist' + } + else if "`eqlist'" != "" & _rc != 0 { +di +di in ye "Unable to list stored estimations." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } +end + +program define DispFFirst + version 8.2 + args helpfile + tempname firstmat + tempname sheapr2 pr2 F df df_r pvalue APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display summary of first-stage estimates; macro e(first) is missing" + exit + } + local endo : colnames(`firstmat') + local nrvars : word count `endo' + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + local efirsteqs "`e(firsteqs)'" + + mat `df' =`firstmat'["df",1] + mat `df_r' =`firstmat'["df_r",1] + mat `APFdf1' =`firstmat'["APFdf1",1] + mat `APFdf2' =`firstmat'["APFdf2",1] + +di +di in gr _newline "Summary results for first-stage regressions" +di in smcl in gr "{hline 43}" +di + +di _c in smcl _col(44) "{help `helpfile'##apstats:(Underid)}" +di in smcl _col(65) "{help `helpfile'##apstats:(Weak id)}" + +di _c in gr "Variable |" +di _c in smcl _col(16) "{help `helpfile'##apstats:F}" in gr "(" +di _c in ye _col(17) %3.0f `df'[1,1] in gr "," in ye %6.0f `df_r'[1,1] in gr ") P-val" +di _c in gr _col(37) "|" +di _c in smcl _col(39) "{help `helpfile'##apstats:AP Chi-sq}" in gr "(" +di _c in ye %3.0f `APFdf1'[1,1] in gr ") P-val" +di _c in gr _col(60) "|" +di _c in smcl _col(62) "{help `helpfile'##apstats:AP F}" in gr "(" +di in ye _col(67) %3.0f `APFdf1'[1,1] in gr "," in ye %6.0f `APFdf2'[1,1] in gr ")" + + local i = 1 + foreach vn of local endo { + + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `APF' =`firstmat'["APF","`vn'"] + mat `APFdf1' =`firstmat'["APFdf1","`vn'"] + mat `APFdf2' =`firstmat'["APFdf2","`vn'"] + mat `APFp' =`firstmat'["APFp","`vn'"] + mat `APchi2' =`firstmat'["APchi2","`vn'"] + mat `APchi2p' =`firstmat'["APchi2p","`vn'"] + mat `APr2' =`firstmat'["APr2","`vn'"] + + local vnlen : length local vn + if `vnlen' > 12 { + local vn : piece 1 12 of "`vn'" + } +di _c in y %-12s "`vn'" _col(14) in gr "|" _col(18) in y %8.2f `F'[1,1] +di _c _col(28) in y %8.4f `pvalue'[1,1] +di _c _col(37) in g "|" _col(42) in y %8.2f `APchi2'[1,1] _col(51) in y %8.4f `APchi2p'[1,1] +di _col(60) in g "|" _col(65) in y %8.2f `APF'[1,1] + local i = `i' + 1 + } +di + + if "`robust'`cluster'" != "" { + if "`cluster'" != "" { + local rtype "cluster-robust" + } + else if "`kernel'" != "" { + local rtype "heteroskedasticity and autocorrelation-robust" + } + else { + local rtype "heteroskedasticity-robust" + } + } + else if "`kernel'" != "" { + local rtype "autocorrelation-robust" + } + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: first-stage test statistics `rtype'" +di + } + + local k2 = `APFdf1'[1,1] +di in gr "Stock-Yogo weak ID test critical values for single endogenous regressor:" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmmw" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(37) "5% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "30% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." +di + } + else { +di + } + +* Check that AP chi-sq and F denominator are correct and = underid test dof + if e(iddf)~=`APFdf1'[1,1] { +di in red "Warning: Error in calculating first-stage id statistics above;" +di in red " dof of AP statistics is " `APFdf1'[1,1] ", should be L-(K-1)=`e(iddf)'." + } + + tempname iddf idstat idp widstat cdf rkf + scalar `iddf'=e(iddf) + scalar `idstat'=e(idstat) + scalar `idp'=e(idp) + scalar `widstat'=e(widstat) + scalar `cdf'=e(cdf) + capture scalar `rkf'=e(rkf) +di in smcl "{help `helpfile'##idtest:Underidentification test}" +di in gr "Ho: matrix of reduced form coefficients has rank=K1-1 (underidentified)" +di in gr "Ha: matrix has rank=K1 (identified)" + if "`robust'`kernel'"=="" { +di in ye "Anderson canon. corr. LM statistic" _c + } + else { +di in ye "Kleibergen-Paap rk LM statistic" _c + } +di in gr _col(42) "Chi-sq(" in ye `iddf' in gr ")=" %-7.2f in ye `idstat' /* + */ _col(61) in gr "P-val=" %6.4f in ye `idp' + +di +di in smcl "{help `helpfile'##widtest:Weak identification test}" +di in gr "Ho: equation is weakly identified" +di in ye "Cragg-Donald Wald F statistic" _col(65) %8.2f `cdf' + if "`robust'`kernel'"~="" { +di in ye "Kleibergen-Paap Wald rk F statistic" _col(65) %8.2f `rkf' + } +di + +di in gr "Stock-Yogo weak ID test critical values for K1=`e(endog_ct)' and L1=`e(exexog_ct)':" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmmw" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(37) "5% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "30% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + if `cdmissing' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + di + } + + tempname arf arfp archi2 archi2p ardf ardf_r + tempname sstat sstatp sstatdf +di in smcl "{help `helpfile'##wirobust:Weak-instrument-robust inference}" +di in gr "Tests of joint significance of endogenous regressors B1 in main equation" +di in gr "Ho: B1=0 and orthogonality conditions are valid" +* Needs to be small so that adjusted dof is reflected in F stat + scalar `arf'=e(arf) + scalar `arfp'=e(arfp) + scalar `archi2'=e(archi2) + scalar `archi2p'=e(archi2p) + scalar `ardf'=e(ardf) + scalar `ardf_r'=e(ardf_r) + scalar `sstat'=e(sstat) + scalar `sstatp'=e(sstatp) + scalar `sstatdf'=e(sstatdf) +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "F(" in ye `ardf' in gr "," in ye `ardf_r' in gr ")=" /* + */ _col(49) in ye %7.2f `arf' _col(61) in gr "P-val=" in ye %6.4f `arfp' +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "Chi-sq(" in ye `ardf' in gr ")=" /* + */ _col(49) in ye %7.2f `archi2' _col(61) in gr "P-val=" in ye %6.4f `archi2p' +di in ye _c "Stock-Wright LM S statistic" +di in gr _col(36) "Chi-sq(" in ye `sstatdf' in gr ")=" /* + */ _col(49) in ye %7.2f `sstat' _col(61) in gr "P-val=" in ye %6.4f `sstatp' +di + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: Underidentification, weak identification and weak-identification-robust" +di in gr " test statistics `rtype'" +di + } + + if "`cluster'" != "" & "`e(clustvar2)'"=="" { +di in gr "Number of clusters N_clust = " in ye %10.0f e(N_clust) + } + else if "`e(clustvar2)'" ~= "" { +di in gr "Number of clusters (1) N_clust1 = " in ye %10.0f e(N_clust1) +di in gr "Number of clusters (2) N_clust2 = " in ye %10.0f e(N_clust2) + } +di in gr "Number of observations N = " in ye %10.0f e(N) +di in gr "Number of regressors K = " in ye %10.0f e(rankxx) +di in gr "Number of endogenous regressors K1 = " in ye %10.0f e(endog_ct) +di in gr "Number of instruments L = " in ye %10.0f e(rankzz) +di in gr "Number of excluded instruments L1 = " in ye %10.0f e(ardf) + if "`e(partial)'" != "" { +di in gr "Number of partialled-out regressors/IVs = " in ye %10.0f e(partial_ct) +di in gr "NB: K & L do not included partialled-out variables" + } + +end + +* Performs first-stage regressions + +program define doFirst, rclass + version 8.2 + args endo /* variable list (including depvar) + */ inexog /* list of included exogenous + */ exexog /* list of excluded exogenous + */ touse /* touse sample + */ wtexp /* full weight expression w/ [] + */ noconstant /* + */ robust /* + */ clopt /* + */ bwopt /* + */ kernopt /* + */ savefprefix /* + */ dofmopt /* + */ sdofmopt /* + */ sw /* + */ psd /* + */ ivreg2_cmd + + + local i 1 + foreach x of local endo { + capture `ivreg2_cmd' `x' `inexog' `exexog' `wtexp' /* + */ if `touse', `noconstant' `robust' `clopt' `bwopt' `kernopt' /* + */ `dofmopt' `sdofmopt' `sw' `psd' small nocollin + if _rc ~= 0 { +* First-stage regression failed +di in ye "Unable to estimate first-stage regression of `x'" + if _rc == 506 { +di in ye " var-cov matrix of first-stage regression of `x' not positive-definite" + } + } + else { +* First-stage regression successful +* Check if there is enough room to save results; leave one free. Allow for overwriting. +* Max is 20-1=19 for Stata 9.0 and earlier, 300-1=299 for Stata 9.1+ + local maxest=299 + local vn "`x'" + local plen : length local savefprefix + local vlen : length local vn + if `plen'+`vlen' > 27 { + local vlen=27-`plen' + local vn : permname `vn', length(`vlen') +* Must create a variable so that permname doesn't reuse it + gen `vn'=0 + local dropvn "`dropvn' `vn'" + } + local eqname "`savefprefix'`vn'" + local eqname : subinstr local eqname "." "_" + qui estimates dir + local est_list "`r(names)'" + local est_list : list est_list - eqname + local est_ct : word count `est_list' + if `est_ct' < `maxest' { + capture est store `eqname', title("First-stage regression: `x'") + if _rc == 0 { + local firsteqs "`firsteqs' `eqname'" + } + } + else { +di +di in ye "Unable to store first-stage regression of `x'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + } + } + return local firsteqs "`firsteqs'" +end + +program define doRF, rclass + version 8.2 + args lhs /* + */ inexog /* list of included exogenous + */ exexog /* list of excluded exogenous + */ touse /* touse sample + */ weight /* full weight expression w/ [] + */ nocons /* + */ robust /* + */ clopt /* + */ bwopt /* + */ kernopt /* + */ saverfprefix /* + */ dofminus /* + */ sdofminus /* + */ sw /* + */ psd /* + */ ivreg2_cmd + +* Anderson-Rubin test of signif of endog regressors (Bo=0) +* In case ivreg2 called with adjusted dof, first stage should adjust dof as well + tempname arf arfp archi2 archi2p ardf ardf_r tempest + capture _estimates hold `tempest' + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } +* Needs to be small so that adjusted dof is reflected in F stat +* capture to prevent not-full-rank error warning + capture `ivreg2_cmd' `lhs' `inexog' `exexog' `weight' if `touse', /* + */ small `nocons' dofminus(`dofminus') sdofminus(`sdofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' `sw' `psd' nocollin + if _rc != 0 { +di as err "Error: reduced form estimation failed" + exit 498 + } + + qui test `exexog' + scalar `arf'=r(F) + scalar `arfp'=r(p) + scalar `ardf'=r(df) + scalar `ardf_r'=r(df_r) + if "`clopt'"=="" { + scalar `archi2'=`arf'*`ardf'*(e(N)-`dofminus')/(e(N)-e(rankxx)-`dofminus'-`sdofminus') + } + else { + scalar `archi2'=`arf'*`ardf'*min(e(N_clust), e(N_clust2))/r(df_r)*(e(N)-1)/(e(N)-e(rankxx)-`sdofminus') + } + scalar `archi2p'=chiprob(`ardf',`archi2') + +* Check if there is enough room to save results; leave one free. Allow for overwriting. +* Max is 20-1=19 for Stata 9.0 and earlier, 300-1=299 for Stata 9.1+ + local maxest=299 + local vn "`lhs'" + local plen : length local saverfprefix + local vlen : length local lhs + if `plen'+`vlen' > 27 { + local vlen=27-`plen' + local vn : permname `vn', length(`vlen') + } + local eqname "`saverfprefix'`vn'" + local eqname : subinstr local eqname "." "_" + qui estimates dir + local est_list "`r(names)'" + local est_list : list est_list - eqname + local est_ct : word count `est_list' + if `est_ct' < `maxest' { + capture est store `eqname', title("Reduced-form regression: `lhs'") + return local rfeq "`eqname'" + } + else { +di +di in ye "Unable to store reduced-form regression of `lhs'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + _estimates unhold `tempest' + return scalar arf=`arf' + return scalar arfp=`arfp' + return scalar ardf=`ardf' + return scalar ardf_r=`ardf_r' + return scalar archi2=`archi2' + return scalar archi2p=`archi2p' +end + +************************************************************************************** +program define IsStop, sclass + /* sic, must do tests one-at-a-time, + * 0, may be very large */ + version 8.2 + if `"`0'"' == "[" { + sret local stop 1 + exit + } + if `"`0'"' == "," { + sret local stop 1 + exit + } + if `"`0'"' == "if" { + sret local stop 1 + exit + } +* per official ivreg 5.1.3 + if substr(`"`0'"',1,3) == "if(" { + sret local stop 1 + exit + } + if `"`0'"' == "in" { + sret local stop 1 + exit + } + if `"`0'"' == "" { + sret local stop 1 + exit + } + else sret local stop 0 +end + +program define Disp + version 8.2 + syntax [anything] [, _col(integer 15) ] + local len = 80-`_col'+1 + local piece : piece 1 `len' of `"`anything'"' + local i 1 + while "`piece'" != "" { + di in gr _col(`_col') "`first'`piece'" + local i = `i' + 1 + local piece : piece `i' `len' of `"`anything'"' + } + if `i'==1 { + di + } +end + +program define matsort + version 8.2 + args vmat names + tempname hold + foreach vn in `names' { + mat `hold'=nullmat(`hold'), `vmat'[1...,"`vn'"] + } + mat `vmat'=`hold' + mat drop `hold' + foreach vn in `names' { + mat `hold'=nullmat(`hold') \ `vmat'["`vn'",1...] + } + mat `vmat'=`hold' +end + +program define cdsy, rclass + version 8.2 + syntax , type(string) k2(integer) nendog(integer) + +* type() can be ivbias5 (k2<=100, nendog<=3) +* ivbias10 (ditto) +* ivbias20 (ditto) +* ivbias30 (ditto) +* ivsize10 (k2<=100, nendog<=2) +* ivsize15 (ditto) +* ivsize20 (ditto) +* ivsize25 (ditto) +* fullrel5 (ditto) +* fullrel10 (ditto) +* fullrel20 (ditto) +* fullrel30 (ditto) +* fullmax5 (ditto) +* fullmax10 (ditto) +* fullmax20 (ditto) +* fullmax30 (ditto) +* limlsize10 (ditto) +* limlsize15 (ditto) +* limlsize20 (ditto) +* limlsize25 (ditto) + + tempname temp cv + +* Initialize critical value as MV + scalar `cv'=. + + if "`type'"=="ivbias5" { + mata: s_ivreg210_cdsy("`temp'", 1) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias10" { + mata: s_ivreg210_cdsy("`temp'", 2) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias20" { + mata: s_ivreg210_cdsy("`temp'", 3) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias30" { + mata: s_ivreg210_cdsy("`temp'", 4) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivsize10" { + mata: s_ivreg210_cdsy("`temp'", 5) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize15" { + mata: s_ivreg210_cdsy("`temp'", 6) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize20" { + mata: s_ivreg210_cdsy("`temp'", 7) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize25" { + mata: s_ivreg210_cdsy("`temp'", 8) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel5" { + mata: s_ivreg210_cdsy("`temp'", 9) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel10" { + mata: s_ivreg210_cdsy("`temp'", 10) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel20" { + mata: s_ivreg210_cdsy("`temp'", 11) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel30" { + mata: s_ivreg210_cdsy("`temp'", 12) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax5" { + mata: s_ivreg210_cdsy("`temp'", 13) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax10" { + mata: s_ivreg210_cdsy("`temp'", 14) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax20" { + mata: s_ivreg210_cdsy("`temp'", 15) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax30" { + mata: s_ivreg210_cdsy("`temp'", 16) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize10" { + mata: s_ivreg210_cdsy("`temp'", 17) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize15" { + mata: s_ivreg210_cdsy("`temp'", 18) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize20" { + mata: s_ivreg210_cdsy("`temp'", 19) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize25" { + mata: s_ivreg210_cdsy("`temp'", 20) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + return scalar cv=`cv' +end + +******************************************************************************* +**************** SUBROUTINES FOR KERNEL-ROBUST ******************************** +******************************************************************************* + +// capt prog drop abw +// abw wants a varlist of [ eps | Z | touse] +// where Z includes all instruments, included and excluded, with constant if +// present as the last column; eps are a suitable set of residuals; and touse +// marks the observations in the data matrix used to generate the residuals +// (e.g. e(sample) of the appropriate model). +// The Noconstant option indicates that no constant term exists in the Z matrix. +// kern is the name of the HAC kernel. -ivregress- only provides definitions +// for Bartlett (default), Parzen, quadratic spectral. + +// returns the optimal bandwidth as local abw + +// abw 1.0.1 CFB 30jun2007 +// 1.0.1 : redefine kernel names (3 instances) to match ivreg2 +// 1.1.0 : pass nobs and tobs to s_abw; abw bug fix and also handles gaps in data correctly + +prog def abw, rclass + version 9.2 + syntax varlist(ts), [ tindex(varname) nobs(integer 0) tobs(integer 0) NOConstant Kernel(string)] +// validate kernel + if "`kernel'" == "" { + local kernel = "Bartlett" + } +// cfb B102 + if !inlist("`kernel'", "Bartlett", "Parzen", "Quadratic Spectral") { + di as err "Error: kernel `kernel' not compatible with bw(auto)" + return scalar abw = 1 + return local bwchoice "Kernel `kernel' not compatible with bw(auto); bw=1 (default)" + exit + } + else { +// set constant + local cons 1 + if "`noconstant'" != "" { + local cons 0 + } +// deal with ts ops + tsrevar `varlist' + local varlist1 `r(varlist)' + mata: s_abw("`varlist1'", "`tindex'", `nobs', `tobs', `cons', "`kernel'") + return scalar abw = `abw' + return local bwchoice "Automatic bw selection according to Newey-West (1994)" + } +end + + +******************************************************************************* +************** END SUBROUTINES FOR KERNEL-ROBUST ****************************** +******************************************************************************* + + + +******************************************************************************* +*************************** BEGIN MATA CODE *********************************** +******************************************************************************* + +version 10.1 +mata: + +// For reference: +// struct ms_ivreg210_vcvorthog { +// string scalar ename, Znames, touse, weight, wvarname +// string scalar robust, clustvarname, clustvarname2, clustvarname3, kernel +// string scalar sw, psd, ivarname, tvarname, tindexname +// real scalar wf, N, bw, tdelta, dofminus +// real matrix ZZ +// pointer matrix e +// pointer matrix Z +// pointer matrix wvar +// } + + +void s_abw (string scalar Zulist, + string scalar tindexname, + real scalar nobs, + real scalar tobs, + real scalar cons, + string scalar kernel + ) +{ + +// nobs = number of observations = number of data points available = rows(uZ) +// tobs = time span of data = t_N - t_1 + 1 +// nobs = tobs if no gaps in data +// nobs < tobs if there are gaps +// nobs used below when calculating means, e.g., covariances in sigmahat. +// tobs used below when time span of data is needed, e.g., mstar. + + string rowvector Zunames, tov + string scalar v, v2 + real matrix uZ + real rowvector h + real scalar lenzu, abw + +// access the Stata variables in Zulist, honoring touse stored as last column + Zunames = tokens(Zulist) + lenzu=cols(Zunames)-1 + v = Zunames[|1\lenzu|] + v2 = Zunames[lenzu+1] + st_view(uZ,.,v,v2) + tnow=st_data(., tindexname) + +// assume constant in last col of uZ if it exists +// account for eps as the first column of uZ + if (cons) { + nrows1=cols(uZ)-2 + nrows2=1 + } + else { + nrows1=cols(uZ)-1 + nrows2=0 + } +// [R] ivregress p.42: referencing Newey-West 1994 REStud 61(4):631-653 +// define h indicator rowvector + h = J(nrows1,1,1) \ J(nrows2,1,0) + +// calc mstar per p.43 +// Hannan (1971, 296) & Priestley (1981, 58) per Newey-West p. 633 +// corrected per Alistair Hall msg to Brian Poi 17jul2008 +// T = rows(uZ) +// oneT = 1/T + expo = 2/9 + q = 1 +// cgamma = 1.4117 + cgamma = 1.1447 + if(kernel == "Parzen") { + expo = 4/25 + q = 2 + cgamma = 2.6614 + } +// cfb B102 + if(kernel == "Quadratic Spectral") { + expo = 2/25 + q = 2 + cgamma = 1.3221 + } +// per Newey-West p.639, Anderson (1971), Priestley (1981) may provide +// guidance on setting expo for other kernels +// mstar = trunc(20 *(T/100)^expo) +// use time span of data (not number of obs) + mstar = trunc(20 *(tobs/100)^expo) + +// calc uZ matrix + u = uZ[.,1] + Z = uZ[|1,2 \.,.|] + +// calc f vector: (u_i Z_i) * h + f = (u :* Z) * h + +// calc sigmahat vector +// sigmahat = J(mstar+1,1,oneT) +// for(j=0;j<=mstar;j++) { +// for(i=j+1;i<=T;i++) { +// sigmahat[j+1] = sigmahat[j+1] + f[i]*f[i-j] +// } +// } + +// sigmahat vector following _iv_hacbw_select.mata logic +// sigmahat = J(mstar+1,1,0) +// for(j=0;j<=mstar;j++) { +// for(i=j+1;i<=nobs;i++) { // sum through nobs = number of datapoints available +// sigmahat[j+1] = sigmahat[j+1] + f[i]*f[i-j] +// } +// sigmahat[j+1] = sigmahat[j+1] / nobs +// } + +// alt approach that allows for gaps in time series + sigmahat = J(mstar+1,1,0) + for(j=0;j<=mstar;j++) { + lsj = "L"+strofreal(j) + tlag=st_data(., lsj+"."+tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix + // now calculate autocovariance; divide by nobs + sigmahat[j+1] = quadcross(f[tmatrix[.,1],.], f[tmatrix[.,2],.]) / nobs + } + +// calc shat(q), shat(0) + shatq = 0 + shat0 = sigmahat[1] + for(j=1;j<=mstar;j++) { + shatq = shatq + 2 * sigmahat[j+1] * j^q + shat0 = shat0 + 2 * sigmahat[j+1] + } + +// calc gammahat + expon = 1/(2*q+1) + gammahat = cgamma*( (shatq/shat0)^2 )^expon +// m = gammahat * T^expon +// use time span of data (not number of obs) + m = gammahat * tobs^expon + +// calc opt lag + if(kernel == "Bartlett" | kernel == "Parzen") { + optlag = min((trunc(m),mstar)) + } + else if(kernel == "Quadratic Spectral") { + optlag = min((m,mstar)) + } + +// if optlag is the optimal lag to be used, we need to add one to +// specify bandwidth in ivreg2 terms + abw = optlag + 1 + st_local("abw",strofreal(abw)) +} // end program s_abw + + +// ************** Common cross-products ************************************* + +void s_crossprods( string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N) + +{ + +// y = dep var +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + ytoken=tokens(yname) + X1tokens=tokens(X1names) + X2tokens=tokens(X2names) + Z1tokens=tokens(Z1names) + + Xtokens = (X1tokens, X2tokens) + Ztokens = (Z1tokens, X2tokens) + + K1=cols(X1tokens) + K2=cols(X2tokens) + K=K1+K2 + L1=cols(Z1tokens) + L2=cols(X2tokens) + L=L1+L2 + + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(A, ., st_tsrevar((ytoken, Xtokens, Z1tokens)), touse) + + AA = quadcross(A, wf*wvar, A) + + if (K>0) { + XX = AA[(2::K+1),(2..K+1)] + Xy = AA[(2::K+1),1] + } + if (K1>0) { + X1X1 = AA[(2::K1+1),(2..K1+1)] + } + + if (L1 > 0) { + Z1Z1 = AA[(K+2::rows(AA)),(K+2..rows(AA))] + } + + if (L2 > 0) { + Z2Z2 = AA[(K1+2::K+1), (K1+2::K+1)] + Z2y = AA[(K1+2::K+1), 1] + } + + if ((L1>0) & (L2>0)) { + Z2Z1 = AA[(K1+2::K+1), (K+2::rows(AA))] + ZZ2 = Z2Z1, Z2Z2 + ZZ1 = Z1Z1, Z2Z1' + ZZ = ZZ1 \ ZZ2 + } + else if (L1>0) { + ZZ = Z1Z1 + } + else { +// L1=0 + ZZ = Z2Z2 + ZZ2 = Z2Z2 + } + + if ((K1>0) & (L1>0)) { // K1>0, L1>0 + X1Z1 = AA[(2::K1+1), (K+2::rows(AA))] + } + + if ((K1>0) & (L2>0)) { + X1Z2 = AA[(2::K1+1), (K1+2::K+1)] + if (L1>0) { // K1>0, L1>0, L2>0 + X1Z = X1Z1, X1Z2 + XZ = X1Z \ ZZ2 + } + else { // K1>0, L1=0, L2>0 + XZ = X1Z2 \ ZZ2 + X1Z = X1Z2 + } + } + else if (K1>0) { // K1>0, L2=0 + XZ = X1Z1 + X1Z= X1Z1 + } + else if (L1>0) { // K1=0, L2>0 + XZ = AA[(2::K+1),(K+2..rows(AA))], AA[(2::K+1),(2..K+1)] + } + else { // K1=0, L2=0 + XZ = ZZ + } + + if ((L1>0) & (L2>0)) { + Zy = AA[(K+2::rows(AA)), 1] \ AA[(K1+2::K+1), 1] + ZY = AA[(K+2::rows(AA)), (1..K1+1)] \ AA[(K1+2::K+1), (1..K1+1)] + Z2Y = AA[(K1+2::K+1), (1..K1+1)] + } + else if (L1>0) { + Zy = AA[(K+2::rows(AA)), 1] + ZY = AA[(K+2::rows(AA)), (1..K1+1)] + } + else if (L2>0) { + Zy = AA[(K1+2::K+1), 1] + ZY = AA[(K1+2::K+1), (1..K1+1)] + Z2Y = ZY + } +// Zy, ZY, Z2Y not created if L1=L2=0 + + YY = AA[(1::K1+1), (1..K1+1)] + yy = AA[1,1] + st_subview(y, A, ., 1) + ym = sum(wf*wvar:*y)/N + yyc = quadcrossdev(y, ym, wf*wvar, y, ym) + + XXinv = invsym(XX) + if (Xtokens==Ztokens) { + ZZinv = XXinv + XPZXinv = XXinv + } + else { + ZZinv = invsym(ZZ) + XPZX = makesymmetric(XZ*ZZinv*XZ') + XPZXinv=invsym(XPZX) + } + + st_matrix("r(XX)", XX) + st_matrix("r(X1X1)", X1X1) + st_matrix("r(X1Z)", X1Z) + st_matrix("r(ZZ)", ZZ) + st_matrix("r(Z2Z2)", Z2Z2) + st_matrix("r(Z1Z2)", Z2Z1') + st_matrix("r(Z2y)",Z2y) + st_matrix("r(XZ)", XZ) + st_matrix("r(Xy)", Xy) + st_matrix("r(Zy)", Zy) + st_numscalar("r(yy)", yy) + st_numscalar("r(yyc)", yyc) + st_matrix("r(YY)", YY) + st_matrix("r(ZY)", ZY) + st_matrix("r(Z2Y)", Z2Y) + st_matrix("r(XXinv)", XXinv) + st_matrix("r(ZZinv)", ZZinv) + st_matrix("r(XPZXinv)", XPZXinv) + +} // end program s_crossprods + +// ************** Cross-products needed for collinearity checks ************************************* + +void s_cc_crossprods( string scalar Anames, + string scalar Bnames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf) + +{ + + Atokens=tokens(Anames) + Btokens=tokens(Bnames) + + a=cols(Atokens) + b=cols(Btokens) + + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(ABvars, ., st_tsrevar((Atokens, Btokens)), touse) + + M = quadcross(ABvars, wf*wvar, ABvars) + + if (a>0) { + AA = M[(1::a),(1::a)] + } + if (b>0) { + BB = M[(a+1::a+b),(a+1::a+b)] + BBinv = invsym(BB) + } + if ((a>0) & (b>0)) { + AB = M[(1::a),(a+1::a+b)] + } + + + st_matrix("r(AA)", AA) + st_matrix("r(BB)", BB) + st_matrix("r(AB)", AB) + st_matrix("r(BBinv)", BBinv) + +} // end program s_cc_crossprods + +// *************** 1st step GMM ******************** // + +void s_gmm1s( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Wmatrix, + string scalar Smatrix, + scalar dofminus) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + ZZ = st_matrix(ZZmatrix) + XX = st_matrix(XXmatrix) + XZ = st_matrix(XZmatrix) + Zy = st_matrix(Zymatrix) + ZZinv = st_matrix(ZZinvmatrix) + + QZZ = ZZ / N + QXX = XX / N + QXZ = XZ / N + QZy = Zy / N + QZZinv = ZZinv*N + +// Weighting matrix supplied + if (Wmatrix~="") { + W = st_matrix(Wmatrix) + } +// Var-cov matrix of orthog conditions supplied + else if (Smatrix~="") { + omega=st_matrix(Smatrix) + W = invsym(omega) + } +// No weighting matrix supplied, default to IV weighting matrix + else { + W = QZZinv + IVflag=1 + } + + if ((Xtokens==Ztokens) & (IVflag==1)) { + beta = QZZinv*QZy // OLS + } + else { + QXZ_W_QZX = QXZ * W * QXZ' + _makesymmetric(QXZ_W_QZX) + QXZ_W_QZXinv=invsym(QXZ_W_QZX) + beta = (QXZ_W_QZXinv * QXZ * W * QZy) + } + beta = beta' + + e[.,.] = y - X * beta' + +// If default weighting matrix, normalize by sigma^2 for standard IV reporting purposes + if (IVflag==1) { + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = W/sigmasq + } + + st_matrix("r(beta)", beta) + st_matrix("r(W)",W) + +} // end program s_gmm1s + + +// *************** efficient GMM ******************** // + +void s_egmm( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Smatrix, + scalar dofminus) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + ZZ = st_matrix(ZZmatrix) + XX = st_matrix(XXmatrix) + XZ = st_matrix(XZmatrix) + Zy = st_matrix(Zymatrix) + ZZinv = st_matrix(ZZinvmatrix) + + QZZ = ZZ / N + QXX = XX / N + QXZ = XZ / N + QZy = Zy / N + QZZinv = ZZinv*N + +// Var-cov matrix of orthog conditions supplied + if (Smatrix~="") { + omega=st_matrix(Smatrix) + W = invsym(omega) + } +// No weighting matrix supplied, default to IV weighting matrix + else { + W = QZZinv + IVflag=1 + } + + QXZ_W_QZX = QXZ * W * QXZ' + _makesymmetric(QXZ_W_QZX) + QXZ_W_QZXinv=invsym(QXZ_W_QZX) + + beta = (QXZ_W_QZXinv * QXZ * W * QZy) + beta = beta' + + e[.,.] = y - X * beta' + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + +// If default weighting matrix, need to normalize by sigma^2 + if (IVflag==1) { + W = W/sigmasq + } + +// Sandwich var-cov matrix (no finite-sample correction) +// Reduces to classical var-cov matrix if omega is not robust form. +// But the GMM estimator is "root-N consistent", and technically we do +// inference on sqrt(N)*beta. By convention we work with beta, so we adjust +// the var-cov matrix instead: + V = 1/N * QXZ_W_QZXinv + +// J if overidentified + if (cols(Z) > cols(X)) { + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + j = N * gbar' * W * gbar + } + else { + j=0 + } + + st_matrix("r(beta)", beta) + st_matrix("r(V)", V) + st_matrix("r(W)", W) + st_numscalar("r(rss)", ee) + st_numscalar("r(j)", j) + st_numscalar("r(sigmasq)", sigmasq) + +} // end program s_egmm + +// *************** inefficient GMM ******************** // + +void s_iegmm( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Wmatrix, + string scalar Smatrix, + string scalar bname, + scalar dofminus) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QXZ = st_matrix(XZmatrix) / N + QZy = st_matrix(Zymatrix) / N + +// beta is supplied + beta = st_matrix(bname) + +// Weighting matrix supplied + W = st_matrix(Wmatrix) + +// Var-cov matrix of orthog conditions supplied + omega=st_matrix(Smatrix) + +// Residuals are supplied + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + + QXZ_W_QZX = QXZ * W * QXZ' + _makesymmetric(QXZ_W_QZX) + QXZ_W_QZXinv=invsym(QXZ_W_QZX) + +// Calculate V and J. + +// V +// The GMM estimator is "root-N consistent", and technically we do +// inference on sqrt(N)*beta. By convention we work with beta, so we adjust +// the var-cov matrix instead: + V = 1/N * QXZ_W_QZXinv * QXZ * W * omega * W * QXZ' * QXZ_W_QZXinv + _makesymmetric(V) + +// J if overidentified + if (cols(Z) > cols(X)) { +// Note that J requires efficient GMM residuals, which means do 2-step GMM to get them. + W2s = invsym(omega) + QXZ_W2s_QZX = QXZ * W2s * QXZ' + _makesymmetric(QXZ_W2s_QZX) + QXZ_W2s_QZXinv=invsym(QXZ_W2s_QZX) + beta2s = (QXZ_W2s_QZXinv * QXZ * W2s * QZy) + beta2s = beta2s' + e2s = y - X * beta2s' + Ze2s = quadcross(Z, wf*wvar, e2s) + gbar = Ze2s / N + j = N * gbar' * W2s * gbar + } + else { + j=0 + } + + st_matrix("r(V)", V) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + +} // end program s_iegmm + +// *************** LIML ******************** // + +void s_liml( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar Z2Z2matrix, + string scalar YYmatrix, + string scalar ZYmatrix, + string scalar Z2Ymatrix, + string scalar Xymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar Ynames, + string scalar ename, + string scalar Xnames, + string scalar X1names, + string scalar Znames, + string scalar Z1names, + string scalar Z2names, + scalar fuller, + scalar kclass, + string scalar coviv, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar dofminus) + +{ + struct ms_ivreg210_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + Ytokens=tokens(Ynames) + Ztokens=tokens(Znames) + Z1tokens=tokens(Z1names) + Z2tokens=tokens(Z2names) + Xtokens=tokens(Xnames) + X1tokens=tokens(X1names) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QXZ = st_matrix(XZmatrix) / N + QZy = st_matrix(Zymatrix) / N + QZ2Z2 = st_matrix(Z2Z2matrix) / N + QYY = st_matrix(YYmatrix) / N + QZY = st_matrix(ZYmatrix) / N + QZ2Y = st_matrix(Z2Ymatrix) / N + QXy = st_matrix(Xymatrix) / N + QZZinv = st_matrix(ZZinvmatrix)*N + +// kclass=0 => LIML or Fuller LIML so calculate lambda + if (kclass == 0) { + QWW = QYY - QZY'*QZZinv*QZY + _makesymmetric(QWW) + if (cols(Z2tokens) > 0) { + QZ2Z2inv = invsym(QZ2Z2) + QWW1 = QYY - QZ2Y'*QZ2Z2inv*QZ2Y + _makesymmetric(QWW1) + } + else { +// Special case of no exogenous regressors + QWW1 = QYY + } + M=matpowersym(QWW, -0.5) + Eval=symeigenvalues(M*QWW1*M) + lambda=rowmin(Eval) + } + +// Exactly identified but might not be exactly 1, so make it so + if (cols(Z)==cols(X)) { + lambda=1 + } + + if (fuller > (N-cols(Z))) { +printf("\n{error:Error: invalid choice of Fuller LIML parameter.}\n") + exit(error(3351)) + } + else if (fuller > 0) { + k = lambda - fuller/(N-cols(Z)) + } + else if (kclass > 0) { + k = kclass + } + else { + k = lambda + } + QXhXh=(1-k)*QXX + k*QXZ*QZZinv*QXZ' + _makesymmetric(QXhXh) + QXhXhinv=invsym(QXhXh) + beta = QXy'*QXhXhinv*(1-k) + k*QZy'*QZZinv*QXZ'*QXhXhinv + + e[.,.] = y - X * beta' + ee = quadcross(e, wf*wvar, e) + sigmasq = ee /(N-dofminus) + + S = m_ivreg210_omega(vcvo) + Sinv = invsym(S) + + if ((robust=="") & (clustvarname=="") & (kernel=="")) { +// Efficient LIML + if (coviv=="") { +// Note dof correction is already in sigmasq, and the N reverses the division by N to get the Q version above. + V=sigmasq*QXhXhinv/N + } + else { + QXPZXinv=invsym(makesymmetric(QXZ*QZZinv*QXZ')) + V=sigmasq*QXPZXinv/N + } + if (cols(Z)>cols(X)) { + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + j = N * gbar' * Sinv * gbar + } + else { + j=0 + } + } + else { + if (coviv=="") { + V=QXhXhinv*QXZ*QZZinv*S*QZZinv*QXZ'*QXhXhinv/N + } + else { + QXPZXinv=invsym(makesymmetric(QXZ*QZZinv*QXZ')) + V=QXPZXinv*QXZ*QZZinv*S*QZZinv*QXZ'*QXPZXinv/N + } + if (cols(Z)>cols(X)) { + QXZ_Sinv_QZX = QXZ * Sinv * QXZ' + _makesymmetric(QXZ_Sinv_QZX) + QXZ_Sinv_QZXinv=invsym(QXZ_Sinv_QZX) + beta2s = (QXZ_Sinv_QZXinv * QXZ * Sinv * QZy) + beta2s = beta2s' + e2s = y - X * beta2s' + Ze2s = quadcross(Z, wf*wvar, e2s) + gbar = Ze2s / N + j = N * gbar' * Sinv * gbar + } + else { + j=0 + } + } + _makesymmetric(V) + + st_matrix("r(beta)", beta) + st_matrix("r(S)",S) + st_matrix("r(V)",V) + st_numscalar("r(lambda)", lambda) + st_numscalar("r(kclass)", k) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + +} // end program s_liml + + +// *************** CUE ******************** // + +void s_gmmcue( string scalar ZZmatrix, + string scalar XZmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + string scalar bname, + string scalar b0name, + scalar dofminus) + +{ + + struct ms_ivreg210_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Pointers to views + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + py = &y + pX = &X + + if (b0name=="") { + +// CUE beta not supplied, so calculate/optimize + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + +// CUE is preceded by IV or 2-step GMM to get starting values. +// Stata convention is that parameter vectors are row vectors, and optimizers +// require this, so must conform to this in what follows. + + beta_init = st_matrix(bname) + +// What follows is how to set out an optimization in Stata. First, initialize +// the optimization structure in the variable S. Then tell Mata where the +// objective function is, that it's a minimization, that it's a "d0" type of +// objective function (no analytical derivatives or Hessians), and that the +// initial values for the parameter vector are in beta_iv. Finally, optimize. + S = optimize_init() + + optimize_init_evaluator(S, &m_cuecrit()) + optimize_init_which(S, "min") + optimize_init_evaluatortype(S, "d0") + optimize_init_params(S, beta_init) +// CUE objective function takes 3 extra arguments: y, X and the structure with omega details + optimize_init_argument(S, 1, py) + optimize_init_argument(S, 2, pX) + optimize_init_argument(S, 3, vcvo) + + beta = optimize(S) + +// The last evaluation of the GMM objective function is J. + j = optimize_result_value(S) +// Call m_ivreg210_omega one last time to get CUE weighting matrix. + e[.,.] = y - X * beta' + omega = m_ivreg210_omega(vcvo) + W = invsym(omega) + } + else { +// CUE beta supplied, so obtain maximized GMM obj function at b0 + beta = st_matrix(b0name) + e[.,.] = y - X * beta' + omega = m_ivreg210_omega(vcvo) + W = invsym(omega) + gbar = 1/N * quadcross(Z, wf*wvar, e) + j = N * gbar' * W * gbar + } + +// Bits and pieces + QXZ = st_matrix(XZmatrix)/N + + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + + QXZ_W_QZX = QXZ * W * QXZ' + _makesymmetric(QXZ_W_QZX) + QXZ_W_QZXinv=invsym(QXZ_W_QZX) + V = 1/N * QXZ_W_QZXinv + + st_matrix("r(beta)", beta) + st_matrix("r(S)", omega) + st_matrix("r(W)", W) + st_matrix("r(V)", V) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + +} // end program s_gmmcue + +// CUE evaluator function. +// Handles only d0-type optimization; todo, g and H are just ignored. +// beta is the parameter set over which we optimize, and +// J is the objective function to minimize. + +void m_cuecrit(todo, beta, pointer py, pointer pX, struct ms_ivreg210_vcvorthog scalar vcvo, j, g, H) +{ + *vcvo.e[.,.] = *py - *pX * beta' + + omega = m_ivreg210_omega(vcvo) + W = invsym(omega) + +// Calculate gbar=Z'*e/N + gbar = 1/vcvo.N * quadcross(*vcvo.Z, vcvo.wf*(*vcvo.wvar), *vcvo.e) + j = vcvo.N * gbar' * W * gbar + +} // end program CUE criterion function + +// *************** Stock-Wright S statistic ******************** // + +void s_sstat( string scalar X2X2matrix, + string scalar Z1X2matrix, + string scalar X2ymatrix, + string scalar yname, + string scalar ename, + string scalar X2names, + string scalar Znames, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar dofminus) + +{ + + struct ms_ivreg210_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + + st_view(X2, ., st_tsrevar(tokens(X2names)), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., vcvo.ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Strategy is to partial out X2 (=Z2) from Z1 and from y +// For omega to work with partialled-out Z1, must set up a copy that can be changed. + Z1=st_data( ., st_tsrevar(tokens(Z1names)), touse) + + + X2X2 = st_matrix(X2X2matrix) + Z1X2 = st_matrix(Z1X2matrix) + X2y = st_matrix(X2ymatrix) + + if (cols(tokens(X2names))>0) { +// The X2s are exogenous so we partial them out of y in one step - simple OLS +// Same with Z1 + QX2X2 = X2X2 / N + QX2y = X2y / N + QX2Z1 = Z1X2' / N + +// y=XB => X'y=X'XB + by = invsym(QX2X2)*QX2y + e[.,.] = y-X2*by + bZ1 = invsym(QX2X2)*QX2Z1 + Z1[.,.] = Z1 - X2*bZ1 + } + else { + e[.,.] = y + } + +// And tell m_ivreg210_omega that ZZ is the cross-product of the partialled-out Zs + Z1Z1matrix = quadcross(Z1, wf*wvar, Z1) + + vcvo.e = &e + vcvo.Z = &Z1 + vcvo.Znames = Z1names + vcvo.wvar = &wvar + vcvo.ZZ = Z1Z1matrix + + omega = m_ivreg210_omega(vcvo) + + W = invsym(omega) + +// If zeros on diag, not full rank and can't calculate the statistic + if (diag0cnt(W)==0) { + Ze = quadcross(Z1, wf*wvar, e) + gbar = Ze / N + j = N * gbar' * W * gbar + st_numscalar("r(j)", j) + } + +} // end program s_sstat + +// ************** Canonical correlations utility for collinearity check ************************************* + +void s_cc_collin( string scalar ZZmatrix, + string scalar X1X1matrix, + string scalar X1Zmatrix, + string scalar ZZinvmatrix) + +{ + + ZZ = st_matrix(ZZmatrix) + X1X1 = st_matrix(X1X1matrix) + X1Z = st_matrix(X1Zmatrix) + X1X1inv = invsym(X1X1) + ZZinv = st_matrix(ZZinvmatrix) + X1PZX1 = makesymmetric(X1Z*ZZinv*X1Z') + X1PZX1inv = invsym(X1PZX1) + + ccmat = X1X1inv*X1PZX1 + st_matrix("r(ccmat)", ccmat) + +} // end program s_cc_collin + +// ************** ffirst-stage stats ************************************* + +void s_ffirst( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar ZZinvmatrix, + string scalar XXinvmatrix, + string scalar XPZXinvmatrix, + string scalar ename, + string scalar X1names, + string scalar X1hatnames, + string scalar X2names, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + scalar N_clust, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar dofminus, + scalar sdofminus) + + +{ + + struct ms_ivreg210_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + Xnames = invtokens( (X1names, X2names), " ") + Znames = invtokens( (Z1names, X2names), " ") + + st_view(X1, ., st_tsrevar(tokens(X1names)), touse) + st_view(X1hat, ., st_tsrevar(tokens(X1hatnames)), touse) + st_view(Z1, ., st_tsrevar(tokens(Z1names)), touse) + st_view(X, ., st_tsrevar(tokens(Xnames)), touse) + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + + if ("X2names"~="") { + st_view(X2, ., st_tsrevar(tokens(X2names)), touse) + } + + K1=cols(X1) + K2=cols(X2) + K=K1+K2 + L1=cols(Z1) + L2=cols(X2) + L=L1+L2 + df = L1 + df_r = N-L + + ZZinv = st_matrix(ZZinvmatrix) + XXinv = st_matrix(XXinvmatrix) + XPZXinv = st_matrix(XPZXinvmatrix) + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QZX = st_matrix(XZmatrix)' / N + QZZinv = ZZinv*N + QXXinv = XXinv*N + + shea = (diagonal(XXinv) :/ diagonal(XPZXinv)) // (X1, X2) in column vector + shea = (shea[(1::K1), 1 ])' // Just X1 in row vector + +// First-stage regressions + bz = invsym(QZZ)*QZX +// Drop 0s/1s = coefficients for "predicting" X2 (=Z2) + bz = bz[., (1..K1)] +// VCV + X1hat[.,.] = Z*bz + eall = X1 - X1hat + eeall = quadcross(eall, wf*wvar, eall) +// sigmas have large-sample dofminus correction incorporated but no small dof corrections + sigmasqall = eeall / (N-dofminus) +// V has all the classical VCVs in block diagonals + V = sigmasqall # ZZinv +// For Wald test of excluded instruments + R = I(L1) , J(L1, L2, 0) +// For AP F stats. Augment exogenous Z with X1hats. +// Create using new view to save memory + st_view(ZX1hat, ., st_tsrevar((tokens(Znames), tokens(X1hatnames))), touse) + QZhZh = quadcross(ZX1hat, wf*wvar, ZX1hat) / N + QZhX1 = quadcross(ZX1hat, wf*wvar, X1 ) / N +// matrix to save first-stage results + firstmat=J(12,0,0) + +// F and AP F stats loop over X1s + for (i=1; i<=K1; i++) { +// first-stage coeffs and residuals for ith X1. + b=bz[., i] + e[.,.] = eall[.,i] + +// Classical Wald F stat; also generates partial R2 +// Since r is an L1 x 1 zero vector, can use Rb instead of (Rb-r) +// Vi=V[| 1+(i-1)*L,1+(i-1)*L \ i*L, i*L |] +// Rb = R*b +// Wald = Rb' * invsym(R*Vi*R') * Rb +// Above is written out properly but amounts to the same thing as: + Rb = b[ (1::L1), . ] + RVR = V[| 1+(i-1)*L,1+(i-1)*L \ (i-1)*L+L1, (i-1)*L+L1 |] + Wald = Rb' * invsym(RVR) * Rb +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + pr2 = (Wald/(N-dofminus)) / (1 + (Wald/(N-dofminus))) + +// Robustify F stat if necessary. + if ((robust~="") | (clustvarname~="") | (kernel~="") | (sw~="")) { +// Z and ZZ changed later in loop, so here reset +// in order that m_ivreg210_omega is passed the right ones. +// vcvo.Z is a pointer so automatically updated. + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + vcvo.ZZ = st_matrix(ZZmatrix) + omega=m_ivreg210_omega(vcvo) +// omega incorporates large dofminus adjustment + Vi = makesymmetric(QZZinv * omega * QZZinv) / N + RVR = Vi[ (1::L1), (1..L1) ] + Wald = Rb' * invsym(RVR) * Rb + } +// small dof adjustment is effectively additional L2, e.g., partialled-out regressors + df = L1 + if (clustvarname=="") { + df_r = (N-dofminus-L-sdofminus) + F = Wald / (N-dofminus) * df_r / df + } + else { + df_r = N_clust - 1 + F = Wald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / df + } + pvalue = Ftail(df, df_r, F) + +// Angrist-Pischke F stat etc. +// selmat is selection matrix for choosing Z1s, X2s and X1hats. +// Entries are col numbers of QZhZh. +// keepmat is for invsym - if linear dependencies, must keep X2 and X1hat. +// Entries are col numbers of selmat = col numbers of SELECTED QZhZh cols. + keepmat=J(1,0,.) + selmat=J(1,0,.) + for (j=1; j<=(L+K1); j++) { + if (j~=(L+i)) { // L+i is position of endog i for which we want AP F + selmat = (selmat, j) + } + if ( (j>L1) & (j~=(L+i)) ) { // Must keep all X2s and the (not-i) X1hats + keepmat = (keepmat, cols(selmat)) + } + } +// QZhZh is crossproduct of all Zs plus X1hats +// QZhZhi has all the Zs plus the right X1hats + QZhZhi = QZhZh[ selmat', selmat ] + QZhX1i = QZhX1 [ selmat', i ] +// Can't use qrsolve as in b = qrsolve(QZhZhi,QZhX1i), since we +// need to be able to control which cols get dropped, including +// cases of perverse collinearities. Must keep X2 and X1hat. + QZhZhinv = invsym(QZhZhi, keepmat) + b = QZhZhinv*QZhX1i + + e[.,.] = X1[.,i] - (Z, X1hat)[., selmat]*b + ee = quadcross(e, wf*wvar, e) + sigmasq = ee / (N-dofminus) + Vi = sigmasq * QZhZhinv / N + Rb=b[ (1::L1), .] + RVR = Vi[ (1::L1), (1..L1) ] + Wald = Rb' * invsym(RVR) * Rb + +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + APr2 = (Wald/(N-dofminus)) / (1 + (Wald/(N-dofminus))) + +// Having calculated AP R-sq based on non-robust AP Wald, now get robust AP Wald if needed. + if ((robust~="") | (clustvarname~="") | (kernel~="") | (sw~="")) { +// Reset Z and ZZ so that m_ivreg210_omega is passed the right variables. +// vcvo.Z is a pointer so automatically updated. + Zhtokens = (tokens(Znames), tokens(X1hatnames)) + st_view(Z, ., st_tsrevar(Zhtokens[1, selmat]), touse) + vcvo.ZZ = quadcross(Z, wf*wvar, Z) + omega=m_ivreg210_omega(vcvo) + Vi = makesymmetric(QZhZhinv * omega * QZhZhinv) / N + } + RVR = Vi[ (1::L1), (1..L1) ] + Wald = Rb' * invsym(RVR) * Rb + +// small dof adjustment is effectively additional L2, e.g., partialled-out regressors + APFdf1 = (L1-K1+1) + if (clustvarname=="") { + APFdf2 = (N-dofminus-L-sdofminus) + APF = Wald / (N-dofminus) * APFdf2 / APFdf1 + } + else { + APFdf2 = N_clust - 1 + APF = Wald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / APFdf1 + } + APFp = Ftail(APFdf1, APFdf2, APF) + APchi2 = Wald + APchi2p = chi2tail(APFdf1, APchi2) +// Assemble results + firstmat = firstmat , (pr2 \ F \ df \ df_r \ pvalue \ APF \ APFdf1 \ APFdf2 \ APFp \ APchi2 \ APchi2p \ APr2) + } // end of loop for an X1 variable + + firstmat = shea \ firstmat // append shea partial r2 + st_matrix("r(firstmat)", firstmat) + +} // end program s_ffirst + +// ********************************************************************** + +void s_omega( + string scalar ZZmatrix, + string scalar ename, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar dofminus) +{ + + struct ms_ivreg210_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(e, ., vcvo.ename, touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + + ZZ = st_matrix(ZZmatrix) + + S=m_ivreg210_omega(vcvo) + + st_matrix("r(S)", S) +} // end of s_omega program + +end + + +exit + +********************************** VERSION COMMENTS ********************************** +* Initial version cloned from official ivreg version 5.0.9 19Dec2001 +* 1.0.2: add logic for reg3. Sargan test +* 1.0.3: add prunelist to ensure that count of excluded exogeneous is correct +* 1.0.4: revise option to exog(), allow included exog to be specified as well +* 1.0.5: switch from reg3 to regress, many options and output changes +* 1.0.6: fixed treatment of nocons in Sargan and C-stat, and corrected problems +* relating to use of nocons combined with a constant as an IV +* 1.0.7: first option reports F-test of excluded exogenous; prunelist bug fix +* 1.0.8: dropped prunelist and switched to housekeeping of variable lists +* 1.0.9: added collinearity checks; C-stat calculated with recursive call; +* added ffirst option to report only F-test of excluded exogenous +* from 1st stage regressions +* 1.0.10: 1st stage regressions also report partial R2 of excluded exogenous +* 1.0.11: complete rewrite of collinearity approach - no longer uses calls to +* _rmcoll, does not track specific variables dropped; prunelist removed +* 1.0.12: reorganised display code and saved results to enable -replay()- +* 1.0.13: -robust- and -cluster- now imply -small- +* 1.0.14: fixed hascons bug; removed ivreg predict fn (it didn't work); allowed +* robust and cluster with z stats and correct dofs +* 1.0.15: implemented robust Sargan stat; changed to only F-stat, removed chi-sq; +* removed exog option (only orthog works) +* 1.0.16: added clusterised Sargan stat; robust Sargan handles collinearities; +* predict now works with standard SE options plus resids; fixed orthog() +* so it accepts time series operators etc. +* 1.0.17: fixed handling of weights. fw, aw, pw & iw all accepted. +* 1.0.18: fixed bug in robust Sargan code relating to time series variables. +* 1.0.19: fixed bugs in reporting ranks of X'X and Z'Z +* fixed bug in reporting presence of constant +* 1.0.20: added GMM option and replaced robust Sargan with (equivalent) J; +* added saved statistics of 1st stage regressions +* 1.0.21: added Cragg HOLS estimator, including allowing empty endog list; +* -regress- syntax now not allowed; revised code searching for "_cons" +* 1.0.22: modified cluster output message; fixed bug in replay for Sargan/Hansen stat; +* exactly identified Sargan/Hansen now exactly zero and p-value not saved as e(); +* cluster multiplier changed to 1 (from buggy multiplier), in keeping with +* eg Wooldridge 2002 p. 193. +* 1.0.23: fixed orthog option to prevent abort when restricted equation is underid. +* 1.0.24: fixed bug if 1st stage regressions yielded missing values for saving in e(). +* 1.0.25: Added Shea version of partial R2 +* 1.0.26: Replaced Shea algorithm with Godfrey algorithm +* 1.0.27: Main call to regress is OLS form if OLS or HOLS is specified; error variance +* in Sargan and C statistics use small-sample adjustment if -small- option is +* specified; dfn of S matrix now correctly divided by sample size +* 1.0.28: HAC covariance estimation implemented +* Symmetrize all matrices before calling syminv +* Added hack to catch F stats that ought to be missing but actually have a +* huge-but-not-missing value +* Fixed dof of F-stat - was using rank of ZZ, should have used rank of XX (couldn't use df_r +* because it isn't always saved. This is because saving df_r triggers small stats +* (t and F) even when -post- is called without dof() option, hence df_r saved only +* with -small- option and hence a separate saved macro Fdf2 is needed. +* Added rankS to saved macros +* Fixed trap for "no regressors specified" +* Added trap to catch gmm option with no excluded instruments +* Allow OLS syntax (no endog or excluded IVs specified) +* Fixed error messages and traps for rank-deficient robust cov matrix; includes +* singleton dummy possibility +* Capture error if posting estimated VCV that isn't pos def and report slightly +* more informative error message +* Checks 3 variable lists (endo, inexog, exexog) separately for collinearities +* Added AC (autocorrelation-consistent but conditionally-homoskedastic) option +* Sargan no longer has small-sample correction if -small- option +* robust, cluster, AC, HAC all passed on to first-stage F-stat +* bw must be < T +* 1.0.29 -orthog- also displays Hansen-Sargan of unrestricted equation +* Fixed collinearity check to include nocons as well as hascons +* Fixed small bug in Godfrey-Shea code - macros were global rather than local +* Fixed larger bug in Godfrey-Shea code - was using mixture of sigma-squares from IV and OLS +* with and without small-sample corrections +* Added liml and kclass +* 1.0.30 Changed order of insts macro to match saved matrices S and W +* 2.0.00 Collinearities no longer -qui- +* List of instruments tested in -orthog- option prettified +* 2.0.01 Fixed handling of nocons with no included exogenous, including LIML code +* 2.0.02 Allow C-test if unrestricted equation is just-identified. Implemented by +* saving Hansen-Sargan dof as = 0 in e() if just-identified. +* 2.0.03 Added score() option per latest revision to official ivreg +* 2.0.04 Changed score() option to pscore() per new official ivreg +* 2.0.05 Fixed est hold bug in first-stage regressions +* Fixed F-stat finite sample adjustment with cluster option to match official Stata +* Fixed F-stat so that it works with hascons (collinearity with constant is removed) +* Fixed bug in F-stat code - wasn't handling failed posting of vcv +* No longer allows/ignores nonsense options +* 2.0.06 Modified lsStop to sync with official ivreg 5.1.3 +* 2.0.07a Working version of CUE option +* Added sortpreserve, ivar and tvar options +* Fixed smalls bug in calculation of T for AC/HAC - wasn't using the last ob +* in QS kernel, and didn't take account of possible dropped observations +* 2.0.07b Fixed macro bug that truncated long varlists +* 2.0.07c Added dof option. +* Changed display of RMSE so that more digits are displayed (was %8.1g) +* Fixed small bug where cstat was local macro and should have been scalar +* Fixed bug where C stat failed with cluster. NB: wmatrix option and cluster are not compatible! +* 2.0.7d Fixed bug in dof option +* 2.1.0 Added first-stage identification, weak instruments, and redundancy stats +* 2.1.01 Tidying up cue option checks, reporting of cue in output header, etc. +* 2.1.02 Used Poskitt-Skeels (2002) result that C-D eval = cceval / (1-cceval) +* 2.1.03 Added saved lists of separate included and excluded exogenous IVs +* 2.1.04 Added Anderson-Rubin test of signif of endog regressors +* 2.1.05 Fix minor bugs relating to cluster and new first-stage stats +* 2.1.06 Fix bug in cue: capture estimates hold without corresponding capture on estimates unhold +* 2.1.07 Minor fix to ereturn local wexp, promote to version 8.2 +* 2.1.08 Added dofminus option, removed dof option. Added A-R test p-values to e(). +* Minor bug fix to A-R chi2 test - was N chi2, should have been N-L chi2. +* Changed output to remove potentially misleading refs to N-L etc. +* Bug fix to rhs count - sometimes regressors could have exact zero coeffs +* Bug fix related to cluster - if user omitted -robust-, orthog would use Sargan and not J +* Changed output of Shea R2 to make clearer that F and p-values do not refer to it +* Improved handling of collinearites to check across inexog, exexog and endo lists +* Total weight statement moved to follow summ command +* Added traps to catch errors if no room to save temporary estimations with _est hold +* Added -savefirst- option. Removed -hascons-, now synonymous with -nocons-. +* 2.1.09 Fixes to dof option with cluster so it no longer mimics incorrect areg behavior +* Local ivreg2_cmd to allow testing under name ivreg2 +* If wmatrix supplied, used (previously not used if non-robust sargan stat generated) +* Allowed OLS using (=) syntax (empty endo and exexog lists) +* Clarified error message when S matrix is not of full rank +* cdchi2p, ardf, ardf_r added to saved macros +* first and ffirst replay() options; DispFirst and DispFFirst separately codes 1st stage output +* Added savefprefix, macro with saved first-stage equation names. +* Added version option. +* Added check for duplicate variables to collinearity checks +* Rewrote/simplified Godfrey-Shea partial r2 code +* 2.1.10 Added NOOUTput option +* Fixed rf bug so that first does not trigger unnecessary saved rf +* Fixed cue bug - was not starting with robust 2-step gmm if robust/cluster +* 2.1.11 Dropped incorrect/misleading dofminus adjustments in first-stage output summary +* 2.1.12 Collinearity check now checks across inexog/exexog/endog simultaneously +* 2.1.13 Added check to catch failed first-stage regressions +* Fixed misleading failed C-stat message +* 2.1.14 Fixed mishandling of missing values in AC (non-robust) block +* 2.1.15 Fixed bug in RF - was ignoring weights +* Added -endog- option +* Save W matrix for all cases; ensured copy is posted with wmatrix option so original isn't zapped +* Fixed cue bug - with robust, was entering IV block and overwriting correct VCV +* 2.1.16 Added -fwl- option +* Saved S is now robust cov matrix of orthog conditions if robust, whereas W is possibly non-robust +* weighting matrix used by estmator. inv(S)=W if estimator is efficient GMM. +* Removed pscore option (dropped by official ivreg). +* Fixed bug where -post- would fail because of missing values in vcv +* Remove hascons as synonym for nocons +* OLS now outputs 2nd footer with variable lists +* 2.1.17 Reorganization of code +* Added ll() macro +* Fixed N bug where weights meant a non-integer ob count that was rounded down +* Fixed -fwl- option so it correctly handles weights (must include when partialling-out) +* smatrix option takes over from wmatrix option. Consistent treatment of both. +* Saved smatrix and wmatrix now differ in case of inefficient GMM. +* Added title() and subtitle() options. +* b0 option returns a value for the Sargan/J stat even if exactly id'd. +* (Useful for S-stat = value of GMM objective function.) +* HAC and AC now allowed with LIML and k-class. +* Collinearity improvements: bug fixed because collinearity was mistakenly checked across +* inexog/exexog/endog simultaneously; endog predicted exactly by IVs => reclassified as inexog; +* _rmcollright enforces inexog>endo>exexog priority for collinearities, if Stata 9.2 or later. +* K-class, LIML now report Sargan and J. C-stat based on Sargan/J. LIML reports AR if homosked. +* nb: can always easily get a C-stat for LIML based on diff of two AR stats. +* Always save Sargan-Hansen as e(j); also save as e(sargan) if homoskedastic. +* Added Stock-Watson robust SEs options sw() +* 2.1.18 Added Cragg-Donald-Stock-Yogo weak ID statistic critical values to main output +* Save exexog_ct, inexog_ct and endog_ct as macros +* Stock-Watson robust SEs now assume ivar is group variable +* Option -sw- is standard SW. Option -swpsd- is PSD version a la page 6 point 10. +* Added -noid- option. Suppresses all first-stage and identification statistics. +* Internal calls to ivreg2 use noid option. +* Added hyperlinks to ivreg2.hlp and helpfile argument to display routines to enable this. +* 2.1.19 Added matrix rearrangement and checks for smatrix and wmatrix options +* Recursive calls to cstat simplified - no matrix rearrangement or separate robust/nonrobust needed +* Reintroduced weak ID stats to ffirst output +* Added robust ID stats to ffirst output for case of single endogenous regressor +* Fixed obscure bug in reporting 1st stage partial r2 - would report zero if no included exogenous vars +* Removed "HOLS" in main output (misleading if, e.g., estimation is AC but not HAC) +* Removed "ML" in main output if no endogenous regressors - now all ML is labelled LIML +* model=gmm is now model=gmm2s; wmatrix estimation is model=gmm +* wmatrix relates to gmm estimator; smatrix relates to gmm var-cov matrix; b0 behavior equiv to wmatrix +* b0 option implies nooutput and noid options +* Added nocollin option to skip collinearity checks +* Fixed minor display bug in ffirst output for endog vars with varnames > 12 characters +* Fixed bug in saved rf and first-stage results for vars with long varnames; uses permname +* Fixed bug in model df - had counted RHS, now calculates rank(V) since latter may be rank-deficient +* Rank of V now saved as macro rankV +* fwl() now allows partialling-out of just constant with _cons +* Added Stock-Wright S statistic (but adds overhead - calls preserve) +* Properties now include svyj. +* Noted only: fwl bug doesn't allow time-series operators. +* 2.1.20 Fixed Stock-Wright S stat bug - didn't allow time-series operators +* 2.1.21 Fixed Stock-Wright S stat to allow for no exog regressors cases +* 2.2.00 CUE partials out exog regressors, estimates endog coeffs, then exog regressors separately - faster +* gmm2s becomes standard option, gmm supported as legacy option +* 2.2.01 Added explanatory messages if gmm2s used. +* States if estimates efficient for/stats consistent for het, AC, etc. +* Fixed small bug that prevented "{help `helpfile'##fwl:fwl}" from displaying when -capture-d. +* Error message in footer about insuff rank of S changed to warning message with more informative message. +* Fixed bug in CUE with weights. +* 2.2.02 Removed CUE partialling-out; still available with fwl +* smatrix and wmatrix become documented options. e(model)="gmmw" means GMM with arbitrary W +* 2.2.03 Fixed bug in AC with aweights; was weighting zi'zi but not ei'ei. +* 2.2.04 Added abw code for bw(), removed properties(svyj) +* 2.2.05 Fixed bug in AC; need to clear variable vt1 at start of loop +* If iweights, N (#obs with precision) rounded to nearest integer to mimic official Stata treatment +* and therefore don't need N scalar at all - will be same as N +* Saves fwl_ct as macro. +* -ffirst- output, weak id stat, etc. now adjust for number of partialled-out variables. +* Related changes: df_m, df_r include adjustments for partialled-out variables. +* Option nofwlsmall introduced - suppresses above adjustments. Undocumented in ivreg2.hlp. +* Replaced ID tests based on canon corr with Kleibergen-Paap rk-based stats if not homoskedastic +* Replaced LR ID test stats with LM test stats. +* Checks that -ranktest- is installed. +* 2.2.06 Fixed bug with missing F df when cue called; updated required version of ranktest +* 2.2.07 Modified redundancy test statistic to match standard regression-based LM tests +* Change name of -fwl- option to -partial-. +* Use of b0 means e(model)=CUE. Added informative b0 option titles. b0 generates output but noid. +* Removed check for integer bandwidth if auto option used. +* 2.2.08 Add -nocollin- to internal calls and to -ivreg2_cue- to speed performance. +* 2.2.09 Per msg from Brian Poi, Alastair Hall verifies that Newey-West cited constant of 1.1447 +* is correct. Corrected mata abw() function. Require -ranktest- 1.1.03. +* 2.2.10 Added Angrist-Pischke multivariate f stats. Rewrite of first and ffirst output. +* Added Cragg-Donald to weak ID output even when non-iid. +* Fixed small bug in non-robust HAC code whereby extra obs could be used even if dep var missing. +* (required addition of L`tau'.(`s1resid') in creation of second touse variable) +* Fixed bugs that zapped varnames with "_cons" in them +* Changed tvar and ivar setup so that data must be tsset or xtset. +* Fixed bug in redundancy test stat when called by xtivreg2+cluster - no dofminus adj needed in this case +* Changed reporting so that gaps between panels are not reported as such. +* Added check that weight variable is not transformed by partialling out. +* Changed Stock-Wright S statistic so that it uses straight partialling-out of exog regressors +* (had been, in effect, doing 2SGMM partialling-out) +* Fixed bug where dropped collinear endogenous didn't get a warning or listing +* Removed N*CDEV Wald chi-sq statistic from ffirst output (LM stat enough) +* 3.0.00 Fully rewritten and Mata-ized code. Require min Stata 10.1 and ranktest 1.2.00. +* Mata support for Stock-Watson SEs for fixed effects estimator; doesn't support fweights. +* Changed handling of iweights yielding non-integer N so that (unlike official -regress-) all calcs +* for RMSE etc. use non-integer N and N is rounded down only at the end. +* Added support for Thompson/Cameron-Gelbach-Miller 2-level cluster-robust vcvs. +* 3.0.01 Now exits more gracefully if no regressors survive after collinearity checks +* 3.0.02 -capture- instead of -qui- before reduced form to suppress not-full-rank error warning +* Modified Stock-Wright code to partial out all incl Xs first, to reduce possibility of not-full-rank +* omega and missing sstat. Added check within Stock-Wright code to catch not-full-rank omega. +* Fixed bug where detailed first-stage stats with cluster were disrupted if data had been tsset +* using a different variables. +* Fixed bug that didn't allow regression on just a constant. +* Added trap for no observations. +* Added trap for auto bw with panel data - not allowed. +* 3.0.03 Fixed bug in m_ivreg210_omega that always used Stock-Watson spectral decomp to create invertible shat +* instead of only when (undocumented) spsd option is called. +* Fixed bug where, if matsize too small, exited with wrong error (mistakenly detected as collinearities) +* Removed inefficient call to -ranktest- that unnecessarily requested stats for all ranks, not just full. +* 3.0.04 Fixed coding error in m_ivreg210_omega for cluster+kernel. Was *vcvo.e[tmatrix[.,1]], should have been (*vcvo.e)[tmatrix[.,1]]. +* Fixed bug whereby clusters defined by strings were not handled correctly. +* Updated ranktest version check +* 3.0.05 Added check to catch unwanted transformations of time or panel variables by partial option. +* 3.0.06 Fixed partial bug - partialcons macro saved =0 unless _cons explicitly in partial() varlist +* 3.0.07 kclass was defaulting to LIML - fixed. +* Renamed spsd option to psda (a=abs) following Stock-Watson 2008. Added psd0 option following Politis 2007. +* Fixed bug that would prevent RF and first-stage with cluster and TS operators if cluster code changed sort order. +* Modified action if S matrix is not full rank and 2-step GMM chosen. Now continue but report problem in footer +* and do not report J stat etc. +* 3.0.08 Fixed cluster+bw; was not using all observations of all panel units if panel was unbalanced. +* Fixed inconsequential bug in m_ivreg210_omega that caused kernel loop to be entered (with no impact) even if kernel=="" +* Fixed small bug that compared bw to T instead of (correctly) to T/delta when checking that bw can't be too long. +* Added dkraay option = cluster on t var + kernel-robust +* Added kiefer option = truncated kernel, bw=T (max), and no robust +* Fixed minor reporting bug that reported time-series gaps in entire panel dataset rather than just portion touse-d. +* Recoded bw and kernel checks into subroutine vkernel. Allow non-integer bandwidth within check as in ranktest. +* 3.1.01 First ivreg2 version with accompanying Mata library (shared with -ranktest-). Mata library includes +* struct ms_ivreg210_vcvorthog, m_ivreg210_omega, m_ivreg210_calckw, s_ivreg210_vkernel. +* Fixed bug in 2-way cluster code (now in m_ivreg210_omega in Mata library) - would crash if K>1 (relevant for -ranktest- only). +* 3.1.02 Converted cdsy to Mata code and moved to Mata library. Standardized spelling/caps/etc. of QS as "Quadratic Spectral". +* 3.1.03 Improved partialling out in s_sstat and s_ffirst: replaced qrsolve with invsym. +* 3.1.04 Fixed minor bug in s_crossprod - would crash with L1=0 K1>0, and also with K=0 +* 3.1.05 Fixed minor bug in orthog - wasn't saving est results if eqn w/o suspect instruments did not execute properly +* Fixed minor bug in s_cccollin() - didn't catch perverse case of K1>0 (endog regressors) and L1=0 (no excl IVs) +* 3.1.06 Spelling fix for Danielle kernel, correct error check for bw vs T-1 +* 3.1.07 Fixed bug that would prevent save of e(sample) when partialling out just a constant +* 3.1.08 01Jan14. Fixed reporting bug with 2-way clustering and kernel-robust that would give wrong count for 2nd cluster variable. +* 3.1.09 13July14. _rmcollright under version control has serious bug for v10 and earlier. Replaced with canon corr approach. +* Fixed obscure bug in estimation sample - was not using obs when tsset tvar is missing, even if TS operators not used. +* Fixed bug in auto bw code so now ivreg2 and ivregress agree. Also, ivreg2 auto bw code handles gaps in TS correctly. +* 3.1.10 17Jan15. First ivreg210. Mata library and ranktest now internal with names incorporating "_ivreg210_". +* Fixed bug in collinearity code - was not detecting some collinearities in joint inexog/exog list, +* so added extra call to _rmcoll to catch any remaining ones. + +* Version notes for imported version of ranktest: +* 1.0.00 First distributed version +* 1.0.01 With iweights, rkstat truncates N to mimic official Stata treatment of noninteger iweights +* Added warning if shat/vhat/vlab not of full rank. +* 1.0.02 Added NULLrank option +* Added eq names to saved V and S matrices +* 1.0.03 Added error catching for collinearities between varlists +* Not saving S matrix; V matrix now as in paper (without 1/N factor) +* Statistic, p-value etc set to missing if vcv not of full rank (Assumpt 2 in paper fails) +* 1.0.04 Fixed touse bug - was treating missings as touse-able +* Change some cross-products in robust loops to quadcross +* 1.0.05 Fixed bug with col/row names and ts operators. Added eval to saved matrices. +* 1.1.00 First ssc-ideas version. Added version 9.2 prior to Mata compiled section. +* 1.1.01 Allow non-integer bandwidth +* 1.1.02 Changed calc of yhat, zhat and pihat to avoid needlessly large intermediate matrices +* and to use more accurate qrsolve instead of inverted X'X. +* 1.1.03 Fixed touse bug that didn't catch missing cluster variable +* Fixed cluster bug - data needed to be sorted by cluster for Mata panel functions to work properly +* 1.2.00 Changed reporting so that gaps between panels are not reported as such. +* Added support for tdelta in tsset data. +* Changed tvar and ivar setup so that data must be tsset or xtset. +* Removed unnecessary loops through panel data with spectral kernels +* shat vcv now also saved. +* Added support for Thompson/Cameron-Gelbach-Miller 2-level cluster-robust vcvv +* Added support for Stock-Watson vcv - but requires data to have FEs partialled out, & doesn't support fweights +* Removed mimicking of Stata mistake of truncated N with iweights to nearest integer +* Fixed small bug with quadratic kernel (wasn't using negative weights) +* Optimised code dealing with time-series data +* 1.2.01 Fixed bug that always used Stock-Watson spectral decomp to create invertible shat +* instead of only when (undocumented) spsd option is called. +* 1.2.02 Fixed bug that did not allow string cluster variables +* 1.2.03 Fixed bug in code for cluster+kernel robust (typo in imported code from ivreg2=>crash) +* 1.2.04 Replaced code for S with ivreg2 code modified to support e matrix (cols > 1) +* Code block (m_omega, m_calckw, struct definition) now shared by ranktest and ivreg2. +* Renamed spsd option to psd following ivreg2 3.0.07 +* Added wf ("weight factor") and statement about sum of weights, as in ivreg2 +* Added dofminus option, as in ivreg2 +* Fixed minor reporting bug - was reporting gaps in entire panel, not just touse-d portion +* Recoded kernel & bw checks to use shared ivreg2 subroutine vkernel +* 1.2.05 Fixed weighting bug introduced in 1.2.04. All weights were affected. +* Was result of incompatibility of code shared with ivreg2. +* 1.3.01 First ranktest version with accompanying Mata library (shared with -ivreg2-). +* Mata library includes struct ms_vcvorthog, m_omega, m_calckw, s_vkernel. +* Fixed bug in 2-way cluster code (now in m_omega in Mata library) - would crash if K>1. +* 1.3.02 Improved partialling out and matrix inversion - switched from qrsolve to invsym. +* Use _makesymmetric() instead of symmetrizing by hand. +* 1.3.03 01Jan14. Fixed reporting bug with 2-way clustering and kernel-robust that would give +* wrong count for 2nd cluster variable. +* 1.3.04 24Aug14. Fixed bug in markout - would include obs where some vars were missing + +* Version notes for imported version of Mata library +* 1.1.01 First version of library. +* Contains struct ms_vcvorthog, m_omega, m_calckw, s_vkernel. +* Compiled in Stata 9.2 for compatibility with ranktest 1.3.01 (a 9.2 program). +* 1.1.02 Add routine cdsy. Standardized spelling/caps/etc. of QS as "Quadratic Spectral" +* 1.1.03 Corrected spelling of "Danielle" kernel in m_omega() +* 1.1.04 Fixed weighting bugs in robust and cluster code of m_omega where K>1 +* 1.1.05 Added whichlivreg2(.) to aid in version control +* 1.1.06 Fixed remaining weighting bug (see 1.1.04) in 2-way clustering when interection +* of clustering levels is groups +* 1.1.07 Fixed HAC bug that crashed m_omega(.) when there were no obs for a particular lag diff --git a/110/replication_package/replication/ado/plus/i/ivreg210.sthlp b/110/replication_package/replication/ado/plus/i/ivreg210.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..4b1f310b1518fc86fe92b70b2056535396cf6ed0 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg210.sthlp @@ -0,0 +1,1681 @@ +{smcl} +{* 12Sept2011}{...} +{hline} +help for {hi:ivreg210} +{hline} + +{title:Extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression} + +{p 4}Full syntax + +{p 8 14}{cmd:ivreg210} {it:depvar} [{it:varlist1}] +{cmd:(}{it:varlist2}{cmd:=}{it:varlist_iv}{cmd:)} [{it:weight}] +[{cmd:if} {it:exp}] [{cmd:in} {it:range}] +{bind:[{cmd:,} {cmd:gmm2s}} +{cmd:bw(}{it:#}{cmd:)} +{cmd:kernel(}{it:string}{cmd:)} +{cmd:dkraay(}{it:integer}{cmd:)} +{cmd:kiefer} +{cmd:liml} +{cmd:fuller(}{it:#}{cmd:)} +{cmd:kclass(}{it:#}{cmd:)} +{cmd:coviv} +{cmd:cue} +{cmd:b0}{cmd:(}{it:matrix}{cmd:)} +{cmdab:r:obust} +{cmdab:cl:uster}{cmd:(}{it:varlist}{cmd:)} +{cmd:orthog(}{it:varlist_ex}{cmd:)} +{cmd:endog(}{it:varlist_en}{cmd:)} +{cmdab:red:undant(}{it:varlist_ex}{cmd:)} +{cmd:partial(}{it:varlist}{cmd:)} +{cmdab:sm:all} +{cmdab:noc:onstant} {cmdab:h}ascons +{cmd:smatrix}{cmd:(}{it:matrix}{cmd:)} +{cmd:wmatrix}{cmd:(}{it:matrix}{cmd:)} +{cmd:first} {cmd:ffirst} {cmd:savefirst} {cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:rf} {cmd:saverf} {cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:nocollin} {cmd:noid} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{bind:{cmd:plus} ]} + +{p 4}Replay syntax + +{p 8 14}{cmd:ivreg210} +{bind:[{cmd:,} {cmd:first}} +{cmd:ffirst} {cmd:rf} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{cmd:plus} ]} + +{p 4}Version syntax + +{p 8 14}{cmd:ivreg210}, {cmd:version} + +{p}{cmd:ivreg210} is compatible with Stata version 10.1 or later. + +{p}{cmd:ivreg210} may be used with time-series or panel data, +in which case the data must be {cmd:tsset} +before using {cmd:ivreg210}; see help {help tsset}. + +{p}All {it:varlists} may contain time-series operators, +but factor variables are not currently supported; +see help {help varlist}. + +{p}{cmd:by}, {cmd:rolling}, {cmd:statsby}, {cmd:xi}, +{cmd:bootstrap} and {cmd:jackknife} are allowed; see help {help prefix}. + +{p}{cmd:aweight}s, {cmd:fweight}s, {cmd:iweight}s and {cmd:pweight}s +are allowed; see help {help weights}. + +{p}The syntax of {help predict} following {cmd:ivreg210} is + +{p 8 16}{cmd:predict} [{it:type}] {it:newvarname} [{cmd:if} {it:exp}] +[{cmd:in} {it:range}] [{cmd:,} {it:statistic}] + +{p}where {it:statistic} is + +{p 8 23}{cmd:xb}{space 11}fitted values; the default{p_end} +{p 8 23}{cmdab:r:esiduals}{space 4}residuals{p_end} +{p 8 23}{cmd:stdp}{space 9}standard error of the prediction{p_end} + +{p}These statistics are available both in and out of sample; +type "{cmd:predict} {it:...} {cmd:if e(sample)} {it:...}" +if wanted only for the estimation sample. + +{title:Contents} +{p 2}{help ivreg2##s_description:Description}{p_end} +{p 2}{help ivreg2##s_robust:Robust, cluster and 2-way cluster, AC, HAC, and cluster+HAC SEs and statistics}{p_end} +{p 2}{help ivreg2##s_gmm:GMM estimation}{p_end} +{p 2}{help ivreg2##s_liml:LIML, k-class and GMM-CUE estimation}{p_end} +{p 2}{help ivreg2##s_sumopt:Summary of robust, HAC, AC, GMM, LIML and CUE options}{p_end} +{p 2}{help ivreg2##s_overid:Testing overidentifying restrictions}{p_end} +{p 2}{help ivreg2##s_endog:Testing subsets of regressors and instruments for endogeneity}{p_end} +{p 2}{help ivreg2##s_relevance:Tests of under- and weak identification}{p_end} +{p 2}{help ivreg2##s_redundancy:Testing instrument redundancy}{p_end} +{p 2}{help ivreg2##s_first:First-stage regressions, identification, and weak-id-robust inference}{p_end} +{p 2}{help ivreg2##s_rf:Reduced form estimates}{p_end} +{p 2}{help ivreg2##s_partial:Partialling-out exogenous regressors}{p_end} +{p 2}{help ivreg2##s_ols:OLS and Heteroskedastic OLS (HOLS) estimation}{p_end} +{p 2}{help ivreg2##s_collin:Collinearities}{p_end} +{p 2}{help ivreg2##s_speed:Speed options: nocollin and noid}{p_end} +{p 2}{help ivreg2##s_small:Small sample corrections}{p_end} +{p 2}{help ivreg2##s_options:Options summary}{p_end} +{p 2}{help ivreg2##s_macros:Remarks and saved results}{p_end} +{p 2}{help ivreg2##s_examples:Examples}{p_end} +{p 2}{help ivreg2##s_refs:References}{p_end} +{p 2}{help ivreg2##s_acknow:Acknowledgements}{p_end} +{p 2}{help ivreg2##s_citation:Authors}{p_end} +{p 2}{help ivreg2##s_citation:Citation of ivreg210}{p_end} + +{marker s_description}{title:Description} + +{p}{cmd:ivreg210} implements a range of single-equation estimation methods +for the linear regression model: OLS, instrumental +variables (IV, also known as two-stage least squares, 2SLS), +the generalized method of moments (GMM), +limited-information maximum likelihood (LIML), and k-class estimators. +In the language of IV/GMM, {it:varlist1} are the exogenous +regressors or "included instruments", +{it:varlist_iv} are the exogenous variables excluded +from the regression or "excluded instruments", +and {it:varlist2} the endogenous regressors that are being "instrumented". + +{p}{cmd:ivreg210} will also estimate linear regression models using +robust (heteroskedastic-consistent), +autocorrelation-consistent (AC), +heteroskedastic and autocorrelation-consistent (HAC) +and cluster-robust variance estimates. + +{p}{cmd:ivreg210} is an alternative to Stata's official {cmd:ivregress}. +Its features include: +two-step feasible GMM estimation ({cmd:gmm2s} option) +and continuously-updated GMM estimation ({cmd:cue} option); +LIML and k-class estimation; +automatic output of overidentification and underidentification test statistics; +C statistic test of exogeneity of subsets of instruments +({cmd:orthog()} option); +endogeneity tests of endogenous regressors +({cmd:endog()} option); +test of instrument redundancy +({cmd:redundant()} option); +kernel-based autocorrelation-consistent (AC) +and heteroskedastic and autocorrelation consistent (HAC) standard errors +and covariance estimation ({cmd:bw(}{it:#}{cmd:)} option), +with user-specified choice of kernel ({cmd:kernel()} option); +two-level {cmd:cluster}-robust standard errors and statistics; +default reporting of large-sample statistics +(z and chi-squared rather than t and F); +{cmd:small} option to report small-sample statistics; +first-stage regressions reported with various tests and statistics for +identification and instrument relevance; +{cmd:ffirst} option to report only these identification statistics +and not the first-stage regression results themselves. +{cmd:ivreg210} can also be used for ordinary least squares (OLS) estimation +using the same command syntax as official {cmd:regress} and {cmd:newey}. + +{marker s_robust}{dlgtab:Robust, cluster and 2-level cluster, AC, HAC, and cluster+HAC SEs and statistics} + +{p}The standard errors and test statistics reported by {cmd:ivreg210} can be made consistent +to a variety of violations of the assumption of i.i.d. errors. +When these options are combined with +either the {cmd:gmm2s} or {cmd:cue} options (see below), +the parameter estimators reported are also efficient +in the presence of the same violation of i.i.d. errors. + +{p}The options for SEs and statistics are:{break} +{bind:(1) {cmd:robust}} causes {cmd:ivreg210} to report SEs and statistics that are +robust to the presence of arbitrary heteroskedasticity.{break} +{bind:(2) {cmd:cluster}({it:varname})} SEs and statistics are robust to both +arbitrary heteroskedasticity and arbitrary intra-group correlation, +where {it:varname} identifies the group. +See the relevant Stata manual entries on obtaining robust covariance estimates +for further details.{break} +{bind:(3) {cmd:cluster}({it:varname1 varname2})} provides 2-way clustered SEs +and statistics (Cameron et al. 2006, Thompson 2009) +that are robust to arbitrary heteroskedasticity and intra-group correlation +with respect to 2 non-nested categories defined by {it:varname1} and {it:varname2}. +See below for a detailed description.{break} +{bind:(4) {cmd:bw(}{it:#}{cmd:)}} requests AC SEs and statistics that are +robust to arbitrary autocorrelation.{break} +{bind:(5) {cmd:bw(}{it:#}{cmd:)}} combined with {cmd:robust} +requests HAC SEs and statistics that are +robust to both arbitrary heteroskedasticity and arbitrary autocorrelation.{break} +{bind:(6) {cmd:bw(}{it:#}{cmd:)}} combined with {cmd:cluster}({it:varname}) +is allowed with either 1- or 2-level clustering if the data are panel data +that are {cmd:tsset} on the time variable {it:varname}. +Following Driscoll and Kray (1998), +the SEs and statistics reported will be robust to disturbances +that are common to panel units and that are persistent, i.e., autocorrelated.{break} +{bind:(7) {cmd:dkraay(}{it:#}{cmd:)}} is a shortcut for the Driscoll-Kraay SEs +for panel data in (6). +It is equivalent to clustering on the {cmd:tsset} time variable +and the bandwidth supplied as {it:#}. +The default kernel Bartlett kernel can be overridden with the {cmd:kernel} option.{break} +{bind:(8) {cmd:kiefer}} implements SEs and statistics for panel data +that are robust to arbitrary intra-group autocorrelation +(but {it:not} heteroskedasticity) as per Kiefer (1980). +It is equivalent to to specifying the truncated kernel with {cmd:kernel(tru)} +and {cmd:bw(}{it:#}{cmd:)} where {it:#} is the full length of the panel. + +{p}Details: + +{p}{cmd:cluster}({it:varname1 varname2}) provides 2-way cluster-robust SEs +and statistics as proposed by Cameron, Gelbach and Miller (2006) and Thompson (2009). +"Two-way cluster-robust" means the SEs and statistics +are robust to arbitrary within-group correlation in two distinct non-nested categories +defined by {it:varname1} and {it:varname2}. +A typical application would be panel data where one "category" is the panel +and the other "category" is time; +the resulting SEs are robust +to arbitrary within-panel autocorrelation (clustering on panel id) +and to arbitrary contemporaneous cross-panel correlation (clustering on time). +There is no point in using 2-way cluster-robust SEs if the categories are nested, +because the resulting SEs are equivalent to clustering on the larger category. +{it:varname1} and {it:varname2} do not have to +uniquely identify observations. +The order of {it:varname1} and {it:varname2} does not matter for the results, +but processing may be faster if the category with the larger number of categories +(typically the panel dimension) is listed first. + +{p}Cameron, Gelbach and Miller (2006) show how this approach can accommodate +multi-way clustering, where the number of different non-nested categories is arbitary. +Their Stata command {cmd:cgmreg} implements 2-way and multi-way clustering +for OLS estimation. +The two-way clustered variance-covariance estimator +is calculated using 3 different VCEs: one clustered on {it:varname1}, +the second clustered on {it:varname2}, and the third clustered on the +intersection of {it:varname1} and {it:varname2}. +Cameron et al. (2006, pp. 8-9) discuss two possible small-sample adjustments +using the number of clusters in each category. +{cmd:cgmreg} uses one method (adjusting the 3 VCEs separately based on +the number of clusters in the categories VCE clusters on); +{cmd:ivreg210} uses the second (adjusting the final 2-way cluster-robust VCE +using the smaller of the two numbers of clusters). +For this reason, {cmd:ivreg210} and {cmd:cgmreg} will produce slightly different SEs. +See also {help ivreg2##s_small:small sample corrections} below. + +{p}{cmd:ivreg210} allows a variety of options for kernel-based HAC and AC estimation. +The {cmd:bw(}{it:#}{cmd:)} option sets the bandwidth used in the estimation +and {cmd:kernel(}{it:string}{cmd:)} is the kernel used; +the default kernel is the Bartlett kernel, +also known in econometrics as Newey-West (see help {help newey}). +The full list of kernels available is (abbreviations in parentheses): +Bartlett (bar); Truncated (tru); Parzen (par); Tukey-Hanning (thann); +Tukey-Hamming (thamm); Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs). +When using the Bartlett, Parzen, or Quadratic spectral kernels, the automatic +bandwidth selection procedure of Newey and West (1994) can be chosen +by specifying {cmd:bw(}{it:auto}{cmd:)}. +{cmd:ivreg210} can also be used for kernel-based estimation +with panel data, i.e., a cross-section of time series. +Before using {cmd:ivreg210} for kernel-based estimation +of time series or panel data, +the data must be {cmd:tsset}; see help {help tsset}. + +{p}Following Driscoll and Kraay (1998), +{cmd:bw(}{it:#}{cmd:)} combined with {cmd:cluster}({it:varname}) +and applied to panel data produces SEs that are +robust to arbitary common autocorrelated disturbances. +The data must be {cmd:tsset} with the time variable specified as {it:varname}. +Driscoll-Kraay SEs also can be specified using the {cmd:dkraay(}{it:#}{cmd:)}} option, +where {it:#} is the bandwidth. +The default Bartlett kernel can be overridden with the {cmd:kernel} option. +Note that the Driscoll-Kraay variance-covariance estimator is a large-T estimator, +i.e., the panel should have a long-ish time-series dimension. + +{p}Used with 2-way clustering as per Thompson (2009), +{cmd:bw(}{it:#}{cmd:)} combined with {cmd:cluster}({it:varname}) +provides SEs and statistics that are robust +to autocorrelated within-panel disturbances (clustering on panel id) +and to autocorrelated across-panel disturbances (clustering on time +combined with kernel-based HAC). +The approach proposed by Thompson (2009) can be implemented in {cmd:ivreg210} +by choosing the truncated kernel {cmd:kernel(}{it:tru}{cmd:)} +and {cmd:bw(}{it:#}{cmd:)}, where the researcher knows or assumes +that the common autocorrelated disturbances can be ignored after {it:#} periods. + +{p}{cmd:Important:} Users should be aware of the asymptotic requirements +for the consistency of the chosen VCE. +In particular: consistency of the 1-way cluster-robust VCE requires +the number of clusters to go off to infinity; +consistency of the 2-way cluster-robust VCE requires the numbers of +clusters in both categories to go off to infinity; +consistency of kernel-robust VCEs requires the numbers of +observations in the time dimension to go off to infinity. +See Angrist and Pischke (2009), Cameron et al. (2006) and Thompson (2009) +for detailed discussions of the performance of the cluster-robust VCE +when the numbers of clusters is small. + +{marker s_gmm}{dlgtab:GMM estimation} + +{p}When combined with the above options, the {cmd:gmm2s} option generates +efficient estimates of the coefficients as well as consistent +estimates of the standard errors. +The {cmd:gmm2s} option implements the two-step efficient +generalized method of moments (GMM) estimator. +The efficient GMM estimator minimizes the GMM criterion function +J=N*g'*W*g, where N is the sample size, +g are the orthogonality or moment conditions +(specifying that all the exogenous variables, or instruments, +in the equation are uncorrelated with the error term) +and W is a weighting matrix. +In two-step efficient GMM, the efficient or optimal weighting matrix +is the inverse of an estimate of the covariance matrix of orthogonality conditions. +The efficiency gains of this estimator relative to the +traditional IV/2SLS estimator derive from the use of the optimal +weighting matrix, the overidentifying restrictions of the model, +and the relaxation of the i.i.d. assumption. +For an exactly-identified model, +the efficient GMM and traditional IV/2SLS estimators coincide, +and under the assumptions of conditional homoskedasticity and independence, +the efficient GMM estimator is the traditional IV/2SLS estimator. +For further details, see Hayashi (2000), pp. 206-13 and 226-27. + +{p}The {cmd:wmatrix} option allows the user to specify a weighting matrix +rather than computing the optimal weighting matrix. +Estimation with the {cmd:wmatrix} option yields a possibly inefficient GMM estimator. +{cmd:ivreg210} will use this inefficient estimator as the first-step GMM estimator +in two-step efficient GMM when combined with the {cmd:gmm2s} option; +otherwise, {cmd:ivreg210} reports the regression results +using this inefficient GMM estimator. + +{p}The {cmd:smatrix} option allows the user to directly +specify the matrix S, the covariance matrix of orthogonality conditions. +{cmd:ivreg210} will use this matrix in the calculation of the variance-covariance +matrix of the estimator, the J statistic, +and, if the {cmd:gmm2s} option is specified, +the two-step efficient GMM coefficients. +The {cmd:smatrix} can be useful for guaranteeing a positive test statistic +in user-specified "GMM-distance tests" (see {help ivreg2##s_endog:below}). +For further details, see Hayashi (2000), pp. 220-24. + +{marker s_liml}{dlgtab:LIML, k-class and GMM-CUE estimation} + +{marker liml}{p} Maximum-likelihood estimation of a single equation of this form +(endogenous RHS variables and excluded instruments) +is known as limited-information maximum likelihood or LIML. +The overidentifying restrictions test +reported after LIML estimation is the Anderson-Rubin (1950) overidentification +statistic in a homoskedastic context. +LIML, OLS and IV/2SLS are examples of k-class estimators. +LIML is a k-class estimator with k=the LIML eigenvalue lambda; +2SLS is a k-class estimator with k=1; +OLS is a k-class esimator with k=0. +Estimators based on other values of k have been proposed. +Fuller's modified LIML (available with the {cmd:fuller(}{it:#}{cmd:)} option) +sets k = lambda - alpha/(N-L), where lambda is the LIML eigenvalue, +L = number of instruments (L1 excluded and L2 included), +and the Fuller parameter alpha is a user-specified positive constant. +Nagar's bias-adjusted 2SLS estimator can be obtained with the +{cmd:kclass(}{it:#}{cmd:)} option by setting +k = 1 + (L-K)/N, where L-K = number of overidentifying restrictions, +K = number of regressors (K1 endogenous and K2=L2 exogenous) +and N = the sample size. +For a discussion of LIML and k-class estimators, +see Davidson and MacKinnon (1993, pp. 644-51). + +{p} The GMM generalization of the LIML estimator +to the case of possibly heteroskedastic +and autocorrelated disturbances +is the "continuously-updated" GMM estimator or CUE +of Hansen, Heaton and Yaron (1996). +The CUE estimator directly maximizes the GMM objective function +J=N*g'*W(b_cue)*g, where W(b_cue) is an optimal weighting matrix +that depends on the estimated coefficients b_cue. +{cmd:cue}, combined with {cmd:robust}, {cmd:cluster}, and/or {cmd:bw}, +generates coefficient estimates that are efficient in the presence +of the corresponding deviations from homoskedasticity. +Specifying {cmd:cue} with no other options +is equivalent to the combination of the options {cmd:liml} and {cmd:coviv}. +The CUE estimator requires numerical optimization methods, +and the implementation here uses Mata's {cmd:optimize} routine. +The starting values are either IV or two-step efficient GMM +coefficient estimates. +If the user wants to evaluate the CUE objective function at +an arbitrary user-defined coefficient vector instead of having {cmd:ivreg210} +find the coefficient vector that minimizes the objective function, +the {cmd:b0(}{it:matrix}{cmd:)} option can be used. +The value of the CUE objective function at {cmd:b0} +is the Sargan or Hansen J statistic reported in the output. + +{marker s_sumopt}{dlgtab:Summary of robust, HAC, AC, GMM, LIML and CUE options} + + + +Estimator {col 20}No VCE option specificed {col 65}VCE option + option {col 60}{cmd:robust}, {cmd:cluster}, {cmd:bw}, {cmd:kernel} +{hline} +(none){col 15}IV/2SLS{col 60}IV/2SLS with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:liml}{col 15}LIML{col 60}LIML with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:gmm2s}{col 15}IV/2SLS{col 60}Two-step GMM with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:cue}{col 15}LIML{col 60}CUE GMM with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:kclass}{col 15}k-class estimator{col 60}k-class estimator with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:wmatrix}{col 15}Possibly inefficient GMM{col 60}Ineff GMM with +{col 15}SEs consistent under homoskedasticity{col 60}robust SEs + +{cmd:gmm2s} + {col 15}Two-step GMM{col 60}Two-step GMM with +{cmd:wmatrix}{col 15}with user-specified first step{col 60}robust SEs +{col 15}SEs consistent under homoskedasticity + + +{p}With the {cmd:bw} or {cmd:bw} and {cmd:kernel} VCE options, +SEs are autocorrelation-robust (AC). +Combining the {cmd:robust} option with {cmd:bw}, SEs are heteroskedasticity- and +autocorrelation-robust (HAC). + +{p}For further details, see Hayashi (2000), pp. 206-13 and 226-27 +(on GMM estimation), Wooldridge (2002), p. 193 (on cluster-robust GMM), +and Hayashi (2000), pp. 406-10 or Cushing and McGarvey (1999) +(on kernel-based covariance estimation). + +{marker s_overid}{marker overidtests}{dlgtab:Testing overidentifying restrictions} + +{p}The Sargan-Hansen test is a test of overidentifying restrictions. +The joint null hypothesis is that the instruments are valid +instruments, i.e., uncorrelated with the error term, +and that the excluded instruments are correctly excluded from the estimated equation. +Under the null, the test statistic is distributed as chi-squared +in the number of (L-K) overidentifying restrictions. +A rejection casts doubt on the validity of the instruments. +For the efficient GMM estimator, the test statistic is +Hansen's J statistic, the minimized value of the GMM criterion function. +For the 2SLS estimator, the test statistic is Sargan's statistic, +typically calculated as N*R-squared from a regression of the IV residuals +on the full set of instruments. +Under the assumption of conditional homoskedasticity, +Hansen's J statistic becomes Sargan's statistic. +The J statistic is consistent in the presence of heteroskedasticity +and (for HAC-consistent estimation) autocorrelation; +Sargan's statistic is consistent if the disturbance is homoskedastic +and (for AC-consistent estimation) if it is also autocorrelated. +With {cmd:robust}, {cmd:bw} and/or {cmd:cluster}, +Hansen's J statistic is reported. +In the latter case the statistic allows observations +to be correlated within groups. +For further discussion see e.g. Hayashi (2000, pp. 227-8, 407, 417). + +{p}The Sargan statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg210} by the command {cmd:overid}. +The features of {cmd:ivreg210} that are unavailable in {cmd:overid} +are the J statistic and the C statistic; +the {cmd:overid} options unavailable in {cmd:ivreg210} +are various small-sample and pseudo-F versions of Sargan's statistic +and its close relative, Basmann's statistic. +See help {help overid} (if installed). + +{marker s_endog}{dlgtab:Testing subsets of regressors and instruments for endogeneity} + +{marker ctest}{p}The C statistic +(also known as a "GMM distance" +or "difference-in-Sargan" statistic) +implemented using the {cmd:orthog} option, +allows a test of a subset of the orthogonality conditions, i.e., +it is a test of the exogeneity of one or more instruments. +It is defined as +the difference of the Sargan-Hansen statistic +of the equation with the smaller set of instruments +(valid under both the null and alternative hypotheses) +and the equation with the full set of instruments, +i.e., including the instruments whose validity is suspect. +Under the null hypothesis that +both the smaller set of instruments +and the additional, suspect instruments are valid, +the C statistic is distributed as chi-squared +in the number of instruments tested. +Note that failure to reject the null hypothesis +requires that the full set of orthogonality conditions be valid; +the C statistic and the Sargan-Hansen test statistics +for the equations with both the smaller and full set of instruments +should all be small. +The instruments tested may be either excluded or included exogenous variables. +If excluded exogenous variables are being tested, +the equation that does not use these orthogonality conditions +omits the suspect instruments from the excluded instruments. +If included exogenous variables are being tested, +the equation that does not use these orthogonality conditions +treats the suspect instruments as included endogenous variables. +To guarantee that the C statistic is non-negative in finite samples, +the estimated covariance matrix of the full set orthogonality conditions +is used to calculate both Sargan-Hansen statistics +(in the case of simple IV/2SLS, this amounts to using the MSE +from the unrestricted equation to calculate both Sargan statistics). +If estimation is by LIML, the C statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For further discussion, see Hayashi (2000), pp. 218-22 and pp. 232-34. + +{marker endogtest}{p}Endogeneity tests of one or more endogenous regressors +can implemented using the {cmd:endog} option. +Under the null hypothesis that the specified endogenous regressors +can actually be treated as exogenous, the test statistic is distributed +as chi-squared with degrees of freedom equal to the number of regressors tested. +The endogeneity test implemented by {cmd:ivreg210}, is, like the C statistic, +defined as the difference of two Sargan-Hansen statistics: +one for the equation with the smaller set of instruments, +where the suspect regressor(s) are treated as endogenous, +and one for the equation with the larger set of instruments, +where the suspect regressors are treated as exogenous. +Also like the C statistic, the estimated covariance matrix used +guarantees a non-negative test statistic. +Under conditional homoskedasticity, +this endogeneity test statistic is numerically equal to +a Hausman test statistic; see Hayashi (2000, pp. 233-34). +The endogeneity test statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg210} by the command {cmd:ivendog}. +Unlike the Durbin-Wu-Hausman tests reported by {cmd:ivendog}, +the {cmd:endog} option of {cmd:ivreg210} can report test statistics +that are robust to various violations of conditional homoskedasticity; +the {cmd:ivendog} option unavailable in {cmd:ivreg210} +is the Wu-Hausman F-test version of the endogeneity test. +See help {help ivendog} (if installed). + +{marker s_relevance}{dlgtab:Tests of under- and weak identification} + +{marker idtest}{p}{cmd:ivreg210} automatically reports tests of +both underidentification and weak identification. +The underidentification test is an LM test of whether the equation is identified, +i.e., that the excluded instruments are "relevant", +meaning correlated with the endogenous regressors. +The test is essentially the test of the rank of a matrix: +under the null hypothesis that the equation is underidentified, +the matrix of reduced form coefficients on the L1 excluded instruments +has rank=K1-1 where K1=number of endogenous regressors. +Under the null, +the statistic is distributed as chi-squared +with degrees of freedom=(L1-K1+1). +A rejection of the null indicates that the matrix is full column rank, +i.e., the model is identified. + +{p}For a test of whether a particular endogenous regressor alone is identified, +see the discussion {help ivreg2##apstats:below} of the Angrist-Pischke (2009) procedure. + +{p}When errors are assumed to be i.i.d., +{cmd:ivreg210} automatically reports an LM version of +the Anderson (1951) canonical correlations test. +Denoting the minimum eigenvalue of the canonical correlations as CCEV, +the smallest canonical correlation between the K1 endogenous regressors +and the L1 excluded instruments +(after partialling out the K2=L2 exogenous regressors) +is sqrt(CCEV), +and the Anderson LM test statistic is N*CCEV, +i.e., N times the square of the smallest canonical correlation. +With the {cmd:first} or {cmd:ffirst} options, +{cmd:ivreg210} also reports the closely-related +Cragg-Donald (1993) Wald test statistic. +Again assuming i.i.d. errors, +and denoting the minimum eigenvalue of the Cragg-Donald statistic as CDEV, +CDEV=CCEV/(1-CCEV), +and the Cragg-Donald Wald statistic is N*CDEV. +Like the Anderson LM statistic, the Cragg-Donald Wald statistic +is distributed as chi-squred with (L1-K1+1) degrees of freedom. +Note that a result of rejection of the null +should be treated with caution, +because weak instrument problems may still be present. +See Hall et al. (1996) for a discussion of this test, +and below for discussion of testing for the presence of weak instruments. + +{p}When the i.i.d. assumption is dropped +and {cmd:ivreg210} reports heteroskedastic, AC, HAC +or cluster-robust statistics, +the Anderson LM and Cragg-Donald Wald statistics are no longer valid. +In these cases, {cmd:ivreg210} reports the LM and Wald versions +of the Kleibergen-Paap (2006) rk statistic, +also distributed as chi-squared with (L1-K1+1) degrees of freedom. +The rk statistic can be seen as a generalization of these tests +to the case of non-i.i.d. errors; +see Kleibergen and Paap (2006) for discussion, +and Kleibergen and Schaffer (2007) for a Stata implementation, {cmd:ranktest}. +{cmd:ivreg210} requires {cmd:ranktest} to be installed, +and will prompt the user to install it if necessary. +If {cmd:ivreg210} is invoked with the {cmd:robust} option, +the rk underidentification test statistics will be heteroskedastic-robust, +and similarly with {cmd:bw} and {cmd:cluster}. + +{marker widtest}{p}"Weak identification" arises when the excluded instruments are correlated +with the endogenous regressors, but only weakly. +Estimators can perform poorly when instruments are weak, +and different estimators are more robust to weak instruments (e.g., LIML) +than others (e.g., IV); +see, e.g., Stock and Yogo (2002, 2005) for further discussion. +When errors are assumed to be i.i.d., +the test for weak identification automatically reported +by {cmd:ivreg210} is an F version of the Cragg-Donald Wald statistic, (N-L)/L1*CDEV, +where L is the number of instruments and L1 is the number of excluded instruments. +Stock and Yogo (2005) have compiled critical values +for the Cragg-Donald F statistic for +several different estimators (IV, LIML, Fuller-LIML), +several different definitions of "perform poorly" (based on bias and test size), +and a range of configurations (up to 100 excluded instruments +and up to 2 or 3 endogenous regressors, +depending on the estimator). +{cmd:ivreg210} will report the Stock-Yogo critical values +if these are available; +missing values mean that the critical values +haven't been tabulated or aren't applicable. +See Stock and Yogo (2002, 2005) for details. + +{p}When the i.i.d. assumption is dropped +and {cmd:ivreg210} is invoked with the {cmd:robust}, {cmd:bw} or {cmd:cluster} options, +the Cragg-Donald-based weak instruments test is no longer valid. +{cmd:ivreg210} instead reports a correspondingly-robust +Kleibergen-Paap Wald rk F statistic. +The degrees of freedom adjustment for the rk statistic is (N-L)/L1, +as with the Cragg-Donald F statistic, +except in the cluster-robust case, +when the adjustment is N/(N-1) * (N_clust-1)/N_clust, +following the standard Stata small-sample adjustment for cluster-robust. In the case of two-way clustering, N_clust is the minimum of N_clust1 and N_clust2. +The critical values reported by {cmd:ivreg210} for the Kleibergen-Paap statistic +are the Stock-Yogo critical values for the Cragg-Donald i.i.d. case. +The critical values reported with 2-step GMM +are the Stock-Yogo IV critical values, +and the critical values reported with CUE +are the LIML critical values. + +{marker s_redundancy}{dlgtab:Testing instrument redundancy} + +{marker redtest}{p}The {cmd:redundant} option allows a test of +whether a subset of excluded instruments is "redundant". +Excluded instruments are redundant if the asymptotic efficiency +of the estimation is not improved by using them. +Breusch et al. (1999) show that the condition for the redundancy of a set of instruments +can be stated in several equivalent ways: +e.g., in the reduced form regressions of the endogenous regressors +on the full set of instruments, +the redundant instruments have statistically insignificant coefficients; +or the partial correlations between the endogenous regressors +and the instruments in question are zero. +{cmd:ivreg210} uses a formulation based on testing the rank +of the matrix cross-product between the endogenous regressors +and the possibly-redundant instruments after both have +all other instruments partialled-out; +{cmd:ranktest} is used to test whether the matrix has zero rank. +The test statistic is an LM test +and numerically equivalent to a regression-based LM test. +Under the null that the specified instruments are redundant, +the statistic is distributed as chi-squared +with degrees of freedom=(#endogenous regressors)*(#instruments tested). +Rejection of the null indicates that +the instruments are not redundant. +When the i.i.d. assumption is dropped +and {cmd:ivreg210} reports heteroskedastic, AC, HAC +or cluster-robust statistics, +the redundancy test statistic is similarly robust. +See Baum et al. (2007) for further discussion. + +{p}Calculation and reporting of all underidentification +and weak identification statistics +can be supressed with the {cmd:noid} option. + +{marker s_first}{dlgtab:First-stage regressions, identification, and weak-id-robust inference} + +{marker apstats}{p}The {cmd:first} and {cmd:ffirst} options report +various first-stage results and identification statistics. +Tests of both underidentification and weak identification are reported +for each endogenous regressor separately, +using the method described by Angrist and Pischke (2009), pp. 217-18 +(see also the note on their "Mostly Harmless Econometrics" +{browse "http://www.mostlyharmlesseconometrics.com/2009/10/multivariate-first-stage-f-not/" :blog}. + +{p}The Angrist-Pischke (AP) first-stage chi-squared and F statistics +are tests of underidentification and weak identification, respectively, +of individual endogenous regressors. +They are constructed by "partialling-out" linear projections of the +remaining endogenous regressors. +The AP chi-squared Wald statistic is distributed as chi2(L1-K1+1)) +under the null that the particular endogenous regressor +in question is unidentified. +In the special case of a single endogenous regressor, +the AP statistic reported is identical to underidentification statistics reported +in the {cmd:ffirst} output, +namely the Cragg-Donald Wald statistic (if i.i.d.) +or the Kleibergen-Paap rk Wald statistic (if robust, cluster-robust, AC or HAC +statistics have been requested); +see {help ivreg2##idtest:above}. +Note the difference in the null hypotheses if there are two or more endogenous regressors: +the AP test will fail to reject if a particular endogenous regressor is unidentified, +whereas the Anderson/Cragg-Donald/Kleibergen-Paap tests of underidentification +will fail to reject if {it:any} of the endogenous regressors is unidentified. + +{p}The AP first-stage F statistic is the F form of the same test statistic. +It can be used as a diagnostic for whether a particular endogenous regressor +is "weakly identified" (see {help ivreg2##widtest:above}). +Critical values for the AP first-stage F as a test of weak identification are not available, +but the test statistic can be compared to the Stock-Yogo (2002, 2005) critical +values for the Cragg-Donald F statistic with K1=1. + +{p}The first-stage results are always reported with small-sample statistics, +to be consistent with the recommended use of the first-stage F-test as a diagnostic. +If the estimated equation is reported with robust standard errors, +the first-stage F-test is also robust. + +{p}A full set of first-stage statistics for each of the K1 endogenous regressors +is saved in the matrix e(first). +These include (a) the AP F and chi-squared statistics; (b) the "partial R-squared" +(squared partial correlation) corresponding to the AP statistics; +(c) Shea's (1997) partial R-squared measure (closely related to the AP statistic, +but not amenable to formal testing); (d) the simple F and partial R-squared +statistics for each of the first-stage equations, +with no adjustments if there is more than one endogenous regressor. +In the special case of a single endogenous regressor, +these F statistics and partial R-squareds are identical. + +{marker wirobust}{p}The first-stage output also includes +two statistics that provide weak-instrument robust inference +for testing the significance of the endogenous regressors in the structural equation being estimated. +The first statistic is the Anderson-Rubin (1949) test +(not to be confused with the Anderson-Rubin overidentification test for LIML estimation; +see {help ivreg2##s_liml:above}). +The second is the closely related Stock-Wright (2000) S statistic. +The null hypothesis tested in both cases is that +the coefficients of the endogenous regressors in the structural equation are jointly equal to zero, +and, in addition, that the overidentifying restrictions are valid. +Both tests are robust to the presence of weak instruments. +The tests are equivalent to estimating the reduced form of the equation +(with the full set of instruments as regressors) +and testing that the coefficients of the excluded instruments are jointly equal to zero. +In the form reported by {cmd:ivreg210},the Anderson-Rubin statistic is a Wald test +and the Stock-Wright S statistic is a GMM-distance test. +Both statistics are distributed as chi-squared with L1 degrees of freedom, +where L1=number of excluded instruments. +The traditional F-stat version of the Anderson-Rubin test is also reported. +See Stock and Watson (2000), Dufour (2003), Chernozhukov and Hansen (2005) and Kleibergen (2007) +for further discussion. +For related alternative test statistics that are also robust to weak instruments, +see {help condivreg} and {help rivtest}, +and the corresponding discussions +in Moreira and Poi (2003) and Mikusheva and Poi (2006), +and in Finlay and Magnusson (2009), respectively. + +{p}The {cmd:savefirst} option requests that the individual first-stage regressions +be saved for later access using the {cmd:estimates} command. +If saved, they can also be displayed using {cmd:first} or {cmd:ffirst} and the {cmd:ivreg210} replay syntax. +The regressions are saved with the prefix "_ivreg2_", +unless the user specifies an alternative prefix with the +{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} option. + +{marker s_rf}{dlgtab:Reduced form estimates} + +{p}The {cmd:rf} option requests that the reduced form estimation of the equation be displayed. +The {cmd:saverf} option requests that the reduced form estimation is saved +for later access using the {cmd:estimates} command. +If saved, it can also be displayed using the {cmd:rf} and the {cmd:ivreg210} replay syntax. +The regression is saved with the prefix "_ivreg2_", +unless the user specifies an alternative prefix with the +{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} option. + +{marker s_partial}{dlgtab:Partialling-out exogenous regressors} + +{marker partial}{p}The {cmd:partial(}{it:varlist}{cmd:)} option requests that +the exogenous regressors in {it:varlist} are "partialled out" +from all the other variables (other regressors and excluded instruments) in the estimation. +If the equation includes a constant, it is also automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} are not calculated. +By the Frisch-Waugh-Lovell (FWL) theorem, in IV, +two-step GMM and LIML estimation the coefficients for the remaining regressors +are the same as those that would be obtained if the variables were not partialled out. +(NB: this does not hold for CUE or GMM iterated more than two steps.) +The {cmd:partial} option is most useful when using {cmd:cluster} +and #clusters < (#exogenous regressors + #excluded instruments). +In these circumstances, +the covariance matrix of orthogonality conditions S is not of full rank, +and efficient GMM and overidentification tests are infeasible +since the optimal weighting matrix W = {bind:S^-1} +cannot be calculated. +The problem can be addressed by using {cmd:partial} +to partial out enough exogenous regressors for S to have full rank. +A similar problem arises when the regressors include a variable that is a singleton dummy, +i.e., a variable with one 1 and N-1 zeros or vice versa, +if a robust covariance matrix is requested. +The singleton dummy causes the robust covariance matrix estimator +to be less than full rank. +In this case, partialling-out the variable with the singleton dummy solves the problem. +Specifying {cmd:partial(_cons)} will cause just the constant to be partialled-out, +i.e., the equation will be estimated in deviations-from-means form. +When {cmd:ivreg210} is invoked with {cmd:partial}, +it reports test statistics with the same small-sample adjustments +as if estimating without {cmd:partial}. +Note that after estimation using the {cmd:partial} option, +the post-estimation {cmd:predict} can be used only to generate residuals, +and that in the current implementation, +{cmd:partial} is not compatible with endogenous variables or instruments (included or excluded) +that use time-series operators. + +{marker s_ols}{dlgtab:OLS and Heteroskedastic OLS (HOLS) estimation} + +{p}{cmd:ivreg21-} also allows straightforward OLS estimation +by using the same syntax as {cmd:regress}, i.e., +{it:ivreg210 depvar varlist1}. +This can be useful if the user wishes to use one of the +features of {cmd:ivreg210} in OLS regression, e.g., AC or +HAC standard errors. + +{p}If the list of endogenous variables {it:varlist2} is empty +but the list of excluded instruments {it:varlist_iv} is not, +and the option {cmd:gmm2s} is specified, +{cmd:ivreg210} calculates Cragg's "heteroskedastic OLS" (HOLS) estimator, +an estimator that is more efficient than OLS +in the presence of heteroskedasticity of unknown form +(see Davidson and MacKinnon (1993), pp. 599-600). +If the option {cmd:bw(}{it:#}{cmd:)} is specified, +the HOLS estimator is efficient in the presence of +arbitrary autocorrelation; +if both {cmd:bw(}{it:#}{cmd:)} and {cmd:robust} are specified +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and autocorrelation; +and if {cmd:cluster(}{it:varlist}{cmd:)} is used, +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and within-group correlation. +The efficiency gains of HOLS derive from the orthogonality conditions +of the excluded instruments listed in {it:varlist_iv}. +If no endogenous variables are specified and {cmd:gmm2s} is not specified, +{cmd:ivreg210} reports standard OLS coefficients. +The Sargan-Hansen statistic reported +when the list of endogenous variables {it:varlist2} is empty +is a Lagrange multiplier (LM) test +of the hypothesis that the excluded instruments {it:varlist_iv} are +correctly excluded from the restricted model. +If the estimation is LIML, the LM statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For more on LM tests, see e.g. Wooldridge (2002), pp. 58-60. +Note that because the approach of the HOLS estimator +has applications beyond heteroskedastic disturbances, +and to avoid confusion concerning the robustness of the estimates, +the estimators presented above as "HOLS" +are described in the output of {cmd:ivreg210} +as "2-Step GMM", "CUE", etc., as appropriate. + +{marker s_collin}{dlgtab:Collinearities} + +{p}{cmd:ivreg210} checks the lists of included instruments, +excluded instruments, and endogenous regressors +for collinearities and duplicates. If an endogenous regressor is +collinear with the instruments, it is reclassified as exogenous. If any +endogenous regressors are collinear with each other, some are dropped. +If there are any collinearities among the instruments, some are dropped. +In Stata 9+, excluded instruments are dropped before included instruments. +If any variables are dropped, a list of their names are saved +in the macros {cmd:e(collin)} and/or {cmd:e(dups)}. +Lists of the included and excluded instruments +and the endogenous regressors with collinear variables and duplicates removed +are also saved in macros with "1" appended +to the corresponding macro names. + +{p}Collinearity checks can be supressed with the {cmd:nocollin} option. + +{marker s_speed}{dlgtab:Speed options: nocollin and noid} + +{p}Two options are available for speeding execution. +{cmd:nocollin} specifies that the collinearity checks not be performed. +{cmd:noid} suspends calculation and reporting of +the underidentification and weak identification statistics +in the main output. + +{marker s_small}{dlgtab:Small sample corrections} + +{p}Mean square error = sqrt(RSS/(N-K)) if {cmd:small}, = sqrt(RSS/N) otherwise. + +{p}If {cmd:robust} is chosen, the finite sample adjustment +(see {hi:[R] regress}) to the robust variance-covariance matrix +qc = N/(N-K) if {cmd:small}, qc = 1 otherwise. + +{p}If {cmd:cluster} is chosen, the finite sample adjustment +qc = (N-1)/(N-K)*M/(M-1) if {cmd:small}, where M=number of clusters, +qc = 1 otherwise. +If 2-way clustering is used, M=min(M1,M2), +where M1=number of clusters in group 1 +and M2=number of clusters in group 2. + +{p}The Sargan and C (difference-in-Sargan) statistics use +error variance = RSS/N, i.e., there is no small sample correction. + +{p}A full discussion of these computations and related topics +can be found in Baum, Schaffer, and Stillman (2003) and Baum, Schaffer and +Stillman (2007). Some features of the program postdate the former article and are described in the latter paper. +Some features, such as two-way clustering, postdate the latter article as well. + + +{marker s_options}{title:Options summary} + +{p 0 4}{cmd:gmm2s} requests the two-step efficient GMM estimator. +If no endogenous variables are specified, the estimator is Cragg's HOLS estimator. + +{p 0 4}{cmd:liml} requests the limited-information maximum likelihood estimator. + +{p 0 4}{cmd:fuller(}{it:#}{cmd:)} specifies that Fuller's modified LIML estimator +is calculated using the user-supplied Fuller parameter alpha, +a non-negative number. +Alpha=1 has been suggested as a good choice. + +{p 0 4}{cmd:kclass(}{it:#}{cmd:)} specifies that a general k-class estimator is calculated +using the user-supplied #, a non-negative number. + +{p 0 4}{cmd:coviv} specifies that the matrix used to calculate the +covariance matrix for the LIML or k-class estimator +is based on the 2SLS matrix, i.e., with k=1. +In this case the covariance matrix will differ from that calculated for the 2SLS +estimator only because the estimate of the error variance will differ. +The default is for the covariance matrix to be based on the LIML or k-class matrix. + +{p 0 4}{cmd:cue} requests the GMM continuously-updated estimator (CUE). + +{p 0 4}{cmd:b0(}{it:matrix}{cmd:)} specifies that the J statistic +(i.e., the value of the CUE objective function) +should be calculated for an arbitrary coefficient vector {cmd:b0}. +That vector must be provided as a matrix with appropriate row and column names. +Under- and weak-identification statistics are not reported +in the output. + +{p 0 4}{cmd:robust} specifies that the Eicker/Huber/White/sandwich estimator of +variance is to be used in place of the traditional calculation. {cmd:robust} +combined with {cmd:cluster()} further allows residuals which are not +independent within cluster (although they must be independent between +clusters). See {hi:[U] Obtaining robust variance estimates}. + +{p 0 4}{cmd:cluster}{cmd:(}{it:varlist}{cmd:)} specifies that the observations +are independent across groups (clusters) but not necessarily independent +within groups. +With 1-way clustering, {cmd:cluster}{cmd:(}{it:varname}{cmd:)} +specifies to which group each observation +belongs; e.g., {cmd:cluster(personid)} in data with repeated observations on +individuals. +With 2-way clustering, {cmd:cluster}{cmd:(}{it:varname1 varname2}{cmd:)} +specifies the two (non-nested) groups to which each observation belongs. +Specifying {cmd:cluster()} implies {cmd:robust}. + +{p 0 4}{cmd:bw(}{it:#}{cmd:)} impements AC or HAC covariance estimation +with bandwidth equal to {it:#}, where {it:#} is an integer greater than zero. +Specifying {cmd:robust} implements HAC covariance estimation; +omitting it implements AC covariance estimation. +If the Bartlett (default), Parzen or Quadratic Spectral kernels are selected, +the value {cmd:auto} may be given (rather than an integer) +to invoke Newey and West's (1994) automatic bandwidth selection procedure. + +{p 0 4}{cmd:kernel(}{it:string)}{cmd:)} specifies the kernel +to be used for AC and HAC covariance estimation; +the default kernel is Bartlett (also known in econometrics +as Newey-West). +The full list of kernels available is (abbreviations in parentheses): +Bartlett (bar); Truncated (tru); Parzen (par); Tukey-Hanning (thann); +Tukey-Hamming (thamm); Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs). + +{p 4 4}Note: in the cases of the Bartlett, Parzen, +and Tukey-Hanning/Hamming kernels, the number of lags used +to construct the kernel estimate equals the bandwidth minus one. +Stata's official {cmd:newey} implements +HAC standard errors based on the Bartlett kernel, +and requires the user to specify +the maximum number of lags used and not the bandwidth; +see help {help newey}. +If these kernels are used with {cmd:bw(1)}, +no lags are used and {cmd:ivreg210} will report the usual +Eicker/Huber/White/sandwich variance estimates. + +{p 0 4}{cmd:wmatrix(}{it:matrix}{cmd:)} specifies a user-supplied weighting matrix +in place of the computed optimal weighting matrix. +The matrix must be positive definite. +The user-supplied matrix must have the same row and column names +as the instrument variables in the regression model (or a subset thereof). + +{p 0 4}{cmd:smatrix(}{it:matrix}{cmd:)} specifies a user-supplied covariance matrix +of the orthogonality conditions to be used in calculating the covariance matrix of the estimator. +The matrix must be positive definite. +The user-supplied matrix must have the same row and column names +as the instrument variables in the regression model (or a subset thereof). + +{p 0 4}{cmd:orthog}{cmd:(}{it:varlist_ex}{cmd:)} requests that a C-statistic +be calculated as a test of the exogeneity of the instruments in {it:varlist_ex}. +These may be either included or excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instruments must still be identified. + +{p 0 4}{cmd:endog}{cmd:(}{it:varlist_en}{cmd:)} requests that a C-statistic +be calculated as a test of the endogeneity +of the endogenous regressors in {it:varlist_en}. + +{p 0 4}{cmd:redundant}{cmd:(}{it:varlist_ex}{cmd:)} requests an LM test +of the redundancy of the instruments in {it:varlist_ex}. +These must be excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instrumenst must still be identified. + +{p 0 4}{cmd:small} requests that small-sample statistics (F and t-statistics) +be reported instead of large-sample statistics (chi-squared and z-statistics). +Large-sample statistics are the default. +The exception is the statistic for the significance of the regression, +which is always reported as a small-sample F statistic. + +{p 0 4}{cmd:noconstant} suppresses the constant term (intercept) in the +regression. If {cmd:noconstant} is specified, the constant term is excluded +from both the final regression and the first-stage regression. To include a +constant in the first-stage when {cmd:noconstant} is specified, explicitly +include a variable containing all 1's in {it:varlist_iv}. + +{p 0 4}{cmd:first} requests that the full first-stage regression results be displayed, +along with the associated diagnostic and identification statistics. + +{p 0 4}{cmd:ffirst} requests the first-stage diagnostic and identification statistics. +The results are saved in various e() macros. + +{p 0 4}{cmd:nocollin} suppresses the checks for collinearities +and duplicate variables. + +{p 0 4}{cmd:noid} suppresses the calculation and reporting +of underidentification and weak identification statistics. + +{p 0 4}{cmd:savefirst} requests that the first-stage regressions results +are saved for later access using the {cmd:estimates} command. +The names under which the first-stage regressions are saved +are the names of the endogenous regressors prefixed by "_ivreg2_". +If these use Stata's time-series operators, +the "." is replaced by a "_". +The maximum number of first-stage estimation results that can be saved +depends on how many other estimation results the user has already saved +and on the maximum supported by Stata. + +{p 0 4}{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the first-stage regression results be saved using the user-specified prefix +instead of the default "_ivreg2_". + +{p 0 4}{cmd:rf} requests that the reduced-form estimation of the equation +be displayed. + +{p 0 4}{cmd:saverf} requests that the reduced-form estimation of the equation +be saved for later access using the {cmd:estimates} command. +The estimation is stored under the name of the dependent variable +prefixed by "_ivreg2_". +If this uses Stata's time-series operators, +the "." is replaced by a "_". + +{p 0 4}{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the reduced-form estimation be saved using the user-specified prefix +instead of the default "_ivreg2_". + +{p 0 4}{cmd:partial(}{it:varlist}{cmd:)} requests that +the exogenous regressors in {it:varlist} be partialled out +from the other variables in the equation. +If the equation includes a constant, +it is automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} +are not calculated. + +{p 0 4}{cmd:level(}{it:#}{cmd:)} specifies the confidence level, in percent, +for confidence intervals of the coefficients; see help {help level}. + +{p 0 4}{cmd:noheader}, {cmd:eform()}, {cmd:depname()} and {cmd:plus} +are for ado-file writers; see {hi:[R] ivreg} and {hi:[R] regress}. + +{p 0 4}{cmd:nofooter} suppresses the display of the footer containing +identification and overidentification statistics, +exogeneity and endogeneity tests, +lists of endogenous variables and instruments, etc. + +{p 0 4}{cmd:version} causes {cmd:ivreg210} to display its current version number +and to leave it in the macro {cmd:e(version)}. +It cannot be used with any other options. +and will clear any existing {cmd:e()} saved results. + +{marker s_macros}{title:Remarks and saved results} + +{p}{cmd:ivreg210} does not report an ANOVA table. +Instead, it reports the RSS and both the centered and uncentered TSS. +It also reports both the centered and uncentered R-squared. +NB: the TSS and R-squared reported by official {cmd:ivreg} is centered +if a constant is included in the regression, and uncentered otherwise. + +{p}{cmd:ivreg210} saves the following results in {cmd:e()}: + +Scalars +{col 4}{cmd:e(N)}{col 18}Number of observations +{col 4}{cmd:e(yy)}{col 18}Total sum of squares (SS), uncentered (y'y) +{col 4}{cmd:e(yyc)}{col 18}Total SS, centered (y'y - ((1'y)^2)/n) +{col 4}{cmd:e(rss)}{col 18}Residual SS +{col 4}{cmd:e(mss)}{col 18}Model SS =yyc-rss if the eqn has a constant, =yy-rss otherwise +{col 4}{cmd:e(df_m)}{col 18}Model degrees of freedom +{col 4}{cmd:e(df_r)}{col 18}Residual degrees of freedom +{col 4}{cmd:e(r2u)}{col 18}Uncentered R-squared, 1-rss/yy +{col 4}{cmd:e(r2c)}{col 18}Centered R-squared, 1-rss/yyc +{col 4}{cmd:e(r2)}{col 18}Centered R-squared if the eqn has a constant, uncentered otherwise +{col 4}{cmd:e(r2_a)}{col 18}Adjusted R-squared +{col 4}{cmd:e(ll)}{col 18}Log likelihood +{col 4}{cmd:e(rankxx)}{col 18}Rank of the matrix of observations on rhs variables=K +{col 4}{cmd:e(rankzz)}{col 18}Rank of the matrix of observations on instruments=L +{col 4}{cmd:e(rankV)}{col 18}Rank of covariance matrix V of coefficients +{col 4}{cmd:e(rankS)}{col 18}Rank of covariance matrix S of orthogonality conditions +{col 4}{cmd:e(rmse)}{col 18}root mean square error=sqrt(rss/(N-K)) if -small-, =sqrt(rss/N) if not +{col 4}{cmd:e(F)}{col 18}F statistic +{col 4}{cmd:e(N_clust)}{col 18}Number of clusters (or min(N_clust1,N_clust2) if 2-way clustering) +{col 4}{cmd:e(N_clust1)}{col 18}Number of clusters in dimension 1 (if 2-way clustering) +{col 4}{cmd:e(N_clust2)}{col 18}Number of clusters in dimension 2 (if 2-way clustering) +{col 4}{cmd:e(bw)}{col 18}Bandwidth +{col 4}{cmd:e(lambda)}{col 18}LIML eigenvalue +{col 4}{cmd:e(kclass)}{col 18}k in k-class estimation +{col 4}{cmd:e(fuller)}{col 18}Fuller parameter alpha +{col 4}{cmd:e(sargan)}{col 18}Sargan statistic +{col 4}{cmd:e(sarganp)}{col 18}p-value of Sargan statistic +{col 4}{cmd:e(sargandf)}{col 18}dof of Sargan statistic = degree of overidentification = L-K +{col 4}{cmd:e(j)}{col 18}Hansen J statistic +{col 4}{cmd:e(jp)}{col 18}p-value of Hansen J statistic +{col 4}{cmd:e(jdf)}{col 18}dof of Hansen J statistic = degree of overidentification = L-K +{col 4}{cmd:e(arubin)}{col 18}Anderson-Rubin overidentification LR statistic N*ln(lambda) +{col 4}{cmd:e(arubinp)}{col 18}p-value of Anderson-Rubin overidentification LR statistic +{col 4}{cmd:e(arubin_lin)}{col 18}Anderson-Rubin linearized overidentification statistic N*(lambda-1) +{col 4}{cmd:e(arubin_linp)}{col 18}p-value of Anderson-Rubin linearized overidentification statistic +{col 4}{cmd:e(arubindf)}{col 18}dof of A-R overid statistic = degree of overidentification = L-K +{col 4}{cmd:e(idstat)}{col 18}LM test statistic for underidentification (Anderson or Kleibergen-Paap) +{col 4}{cmd:e(idp)}{col 18}p-value of underidentification LM statistic +{col 4}{cmd:e(iddf)}{col 18}dof of underidentification LM statistic +{col 4}{cmd:e(widstat)}{col 18}F statistic for weak identification (Cragg-Donald or Kleibergen-Paap) +{col 4}{cmd:e(arf)}{col 18}Anderson-Rubin F-test of significance of endogenous regressors +{col 4}{cmd:e(arfp)}{col 18}p-value of Anderson-Rubin F-test of endogenous regressors +{col 4}{cmd:e(archi2)}{col 18}Anderson-Rubin chi-sq test of significance of endogenous regressors +{col 4}{cmd:e(archi2p)}{col 18}p-value of Anderson-Rubin chi-sq test of endogenous regressors +{col 4}{cmd:e(ardf)}{col 18}degrees of freedom of Anderson-Rubin tests of endogenous regressors +{col 4}{cmd:e(ardf_r)}{col 18}denominator degrees of freedom of AR F-test of endogenous regressors +{col 4}{cmd:e(redstat)}{col 18}LM statistic for instrument redundancy +{col 4}{cmd:e(redp)}{col 18}p-value of LM statistic for instrument redundancy +{col 4}{cmd:e(reddf)}{col 18}dof of LM statistic for instrument redundancy +{col 4}{cmd:e(cstat)}{col 18}C-statistic +{col 4}{cmd:e(cstatp)}{col 18}p-value of C-statistic +{col 4}{cmd:e(cstatdf)}{col 18}Degrees of freedom of C-statistic +{col 4}{cmd:e(cons)}{col 18}1 when equation has a Stata-supplied constant; 0 otherwise +{col 4}{cmd:e(partialcons)}{col 18}as above but prior to partialling-out (see {cmd:e(partial)}) +{col 4}{cmd:e(partial_ct)}{col 18}Number of partialled-out variables (see {cmd:e(partial)}) + +Macros +{col 4}{cmd:e(cmd)}{col 18}ivreg210 +{col 4}{cmd:e(cmdline)}{col 18}Command line invoking ivreg210 +{col 4}{cmd:e(version)}{col 18}Version number of ivreg210 +{col 4}{cmd:e(model)}{col 18}ols, iv, gmm, liml, or kclass +{col 4}{cmd:e(depvar)}{col 18}Name of dependent variable +{col 4}{cmd:e(instd)}{col 18}Instrumented (RHS endogenous) variables +{col 4}{cmd:e(insts)}{col 18}Instruments +{col 4}{cmd:e(inexog)}{col 18}Included instruments (regressors) +{col 4}{cmd:e(exexog)}{col 18}Excluded instruments +{col 4}{cmd:e(collin)}{col 18}Variables dropped because of collinearities +{col 4}{cmd:e(dups)}{col 18}Duplicate variables +{col 4}{cmd:e(ecollin)}{col 18}Endogenous variables reclassified as exogenous because of +{col 20}collinearities with instruments +{col 4}{cmd:e(clist)}{col 18}Instruments tested for orthogonality +{col 4}{cmd:e(redlist)}{col 18}Instruments tested for redundancy +{col 4}{cmd:e(partial)}{col 18}Partialled-out exogenous regressors +{col 4}{cmd:e(small)}{col 18}small +{col 4}{cmd:e(wtype)}{col 18}weight type +{col 4}{cmd:e(wexp)}{col 18}weight expression +{col 4}{cmd:e(clustvar)}{col 18}Name of cluster variable +{col 4}{cmd:e(vcetype)}{col 18}Covariance estimation method +{col 4}{cmd:e(kernel)}{col 18}Kernel +{col 4}{cmd:e(tvar)}{col 18}Time variable +{col 4}{cmd:e(ivar)}{col 18}Panel variable +{col 4}{cmd:e(firsteqs)}{col 18}Names of stored first-stage equations +{col 4}{cmd:e(rfeq)}{col 18}Name of stored reduced-form equation +{col 4}{cmd:e(predict)}{col 18}Program used to implement predict + +Matrices +{col 4}{cmd:e(b)}{col 18}Coefficient vector +{col 4}{cmd:e(V)}{col 18}Variance-covariance matrix of the estimators +{col 4}{cmd:e(S)}{col 18}Covariance matrix of orthogonality conditions +{col 4}{cmd:e(W)}{col 18}GMM weighting matrix (=inverse of S if efficient GMM estimator) +{col 4}{cmd:e(first)}{col 18}First-stage regression results +{col 4}{cmd:e(ccev)}{col 18}Eigenvalues corresponding to the Anderson canonical correlations test +{col 4}{cmd:e(cdev)}{col 18}Eigenvalues corresponding to the Cragg-Donald test + +Functions +{col 4}{cmd:e(sample)}{col 18}Marks estimation sample + + + +{marker s_examples}{title:Examples} + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta" : . use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta }{p_end} +{p 8 12}(Wages of Very Young Men, Zvi Griliches, J.Pol.Ec. 1976) + +{p 8 12}{stata "xi i.year" : . xi i.year} + +{col 0}(Instrumental variables. Examples follow Hayashi 2000, p. 255.) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt)} + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), small ffirst" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), small ffirst} + +{col 0}(Testing for the presence of heteroskedasticity in IV/GMM estimation) + +{p 8 12}{stata "ivhettest, fitlev" : . ivhettest, fitlev} + +{col 0}(Two-step GMM efficient in the presence of arbitrary heteroskedasticity) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust} + +{p 0}(GMM with user-specified first-step weighting matrix or matrix of orthogonality conditions) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), robust" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), robust} + +{p 8 12}{stata "predict double uhat if e(sample), resid" : . predict double uhat if e(sample), resid} + +{p 8 12}{stata "mat accum S = `e(insts)' [iw=uhat^2]" : . mat accum S = `e(insts)' [iw=uhat^2]} + +{p 8 12}{stata "mat S = 1/`e(N)' * S" : . mat S = 1/`e(N)' * S} + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust smatrix(S)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust smatrix(S)} + +{p 8 12}{stata "mat W = invsym(S)" : . mat W = invsym(S)} + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust wmatrix(W)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust wmatrix(W)} + +{p 0}(Equivalence of J statistic and Wald tests of included regressors, irrespective of instrument choice (Ahn, 1997)) + +{p 8 12}{stata "ivreg210 lw (iq=med kww age), gmm2s" : . ivreg210 lw (iq=med kww age), gmm2s} + +{p 8 12}{stata "mat S0 = e(S)" : . mat S0 = e(S)} + +{p 8 12}{stata "qui ivreg210 lw (iq=kww) med age, gmm2s smatrix(S0)" : . qui ivreg210 lw (iq=kww) med age, gmm2s smatrix(S0)} + +{p 8 12}{stata "test med age" : . test med age} + +{p 8 12}{stata "qui ivreg210 lw (iq=med) kww age, gmm2s smatrix(S0)" : . qui ivreg210 lw (iq=med) kww age, gmm2s smatrix(S0)} + +{p 8 12}{stata "test kww age" : . test kww age} + +{p 8 12}{stata "qui ivreg210 lw (iq=age) med kww, gmm2s smatrix(S0)" : . qui ivreg210 lw (iq=age) med kww, gmm2s smatrix(S0)} + +{p 8 12}{stata "test med kww" : . test med kww} + +{p 0}(Continuously-updated GMM (CUE) efficient in the presence of arbitrary heteroskedasticity. NB: may require 30+ iterations.) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust} + +{col 0}(Sargan-Basmann tests of overidentifying restrictions for IV estimation) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt)} + +{p 8 12}{stata "overid, all" : . overid, all} + +{col 0}(Tests of exogeneity and endogeneity) + +{col 0}(Test the exogeneity of one regressor) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(s)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(s)} + +{col 0}(Test the exogeneity of two excluded instruments) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(age mrt)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(age mrt)} + +{col 0}(Frisch-Waugh-Lovell (FWL): equivalence of estimations with and without partialling-out) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns _I* (iq=kww age), cluster(year)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age), cluster(year)} + +{p 8 12}{stata "ivreg210 lw s expr tenure rns _I* (iq=kww age), cluster(year) partial(_I*)" : . ivreg210 lw s expr tenure rns smsa _I* (iq=med kww age), cluster(year) partial(_I*)} + +{col 0}({cmd:partial()}: efficient GMM with #clusters<#instruments feasible after partialling-out) + +{p 8 12}{stata "ivreg210 lw s expr tenure rns _I* (iq=kww age), cluster(year) partial(_I*) gmm2s" : . ivreg210 lw s expr tenure rns smsa (iq=med kww age), cluster(year) partial(_I*) gmm2s} + +{col 0}(Examples following Wooldridge 2002, pp.59, 61) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta" : . use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta } + +{col 0}(Equivalence of DWH endogeneity test when regressor is endogenous...) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6)" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6)} + +{p 8 12}{stata "ivendog educ" :. ivendog educ} + +{col 0}(... endogeneity test using the {cmd:endog} option) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)} + +{col 0}(...and C-test of exogeneity when regressor is exogenous, using the {cmd:orthog} option) + +{p 8 12}{stata "ivreg210 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)" : . ivreg210 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)} + +{col 0}(Heteroskedastic Ordinary Least Squares, HOLS) + +{p 8 12}{stata "ivreg210 lwage exper expersq educ (=age kidslt6 kidsge6), gmm2s" : . ivreg210 lwage exper expersq educ (=age kidslt6 kidsge6), gmm2s} + +{col 0}(Equivalence of Cragg-Donald Wald F statistic and F-test from first-stage regression +{col 0}in special case of single endogenous regressor. Also illustrates {cmd:savefirst} option.) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), savefirst" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), savefirst} + +{p 8 12}{stata "di e(widstat)" : . di e(widstat)} + +{p 8 12}{stata "estimates restore _ivreg2_educ" : . estimates restore _ivreg2_educ} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Equivalence of Kleibergen-Paap robust rk Wald F statistic and F-test from first-stage +{col 0}regression in special case of single endogenous regressor.) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust savefirst" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust savefirst} + +{p 8 12}{stata "di e(widstat)" : . di e(widstat)} + +{p 8 12}{stata "estimates restore _ivreg2_educ" : . estimates restore _ivreg2_educ} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Equivalence of Kleibergen-Paap robust rk LM statistic for identification and LM test +{col 0}of joint significance of excluded instruments in first-stage regression in special +{col 0}case of single endogenous regressor. Also illustrates use of {cmd:ivreg210} to perform an +{col 0}LM test in OLS estimation.) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust} + +{p 8 12}{stata "di e(idstat)" : . di e(idstat)} + +{p 8 12}{stata "ivreg210 educ exper expersq (=age kidslt6 kidsge6) if e(sample), robust" : . ivreg210 educ exper expersq (=age kidslt6 kidsge6) if e(sample), robust} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(Equivalence of an LM test of an excluded instrument for redundancy and an LM test of +{col 0}significance from first-stage regression in special case of single endogenous regressor.) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust redundant(age)" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust redundant(age)} + +{p 8 12}{stata "di e(redstat)" : . di e(redstat)} + +{p 8 12}{stata "ivreg210 educ exper expersq kidslt6 kidsge6 (=age) if e(sample), robust" : . ivreg210 educ exper expersq kidslt6 kidsge6 (=age) if e(sample), robust} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(Weak-instrument robust inference: Anderson-Rubin Wald F and chi-sq and +{col 0}Stock-Wright S statistics. Also illusrates use of {cmd:saverf} option.) + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust ffirst saverf" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust ffirst saverf} + +{p 8 12}{stata "di e(arf)" : . di e(arf)} + +{p 8 12}{stata "di e(archi2)" : . di e(archi2)} + +{p 8 12}{stata "di e(sstat)" : . di e(sstat)} + +{col 0}(Obtaining the Anderson-Rubin Wald F statistic from the reduced-form estimation) + +{p 8 12}{stata "estimates restore _ivreg2_lwage" : . estimates restore _ivreg2_lwage} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Obtaining the Anderson-Rubin Wald chi-sq statistic from the reduced-form estimation. +{col 0}Use {cmd:ivreg210} without {cmd:small} to obtain large-sample test statistic.) + +{p 8 12}{stata "ivreg210 lwage exper expersq age kidslt6 kidsge6, robust" : . ivreg210 lwage exper expersq age kidslt6 kidsge6, robust} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(chi2)" : . di r(chi2)} + +{col 0}(Obtaining the Stock-Wright S statistic as the value of the GMM CUE objective function. +{col 0}Also illustrates use of {cmd:b0} option. Coefficients on included exogenous regressors +{col 0}are OLS coefficients, which is equivalent to partialling them out before obtaining +{col 0}the value of the CUE objective function.) + +{p 8 12}{stata "mat b = 0" : . mat b = 0} + +{p 8 12}{stata "mat colnames b = educ" : . mat colnames b = educ} + +{p 8 12}{stata "qui ivreg210 lwage exper expersq" : . qui ivreg210 lwage exper expersq} + +{p 8 12}{stata "mat b = b, e(b)" : . mat b = b, e(b)} + +{p 8 12}{stata "ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust b0(b)" : . ivreg210 lwage exper expersq (educ=age kidslt6 kidsge6), robust b0(b)} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(LIML and k-class estimation using Klein data) + +{col 9}{stata "webuse klein" :. webuse klein} +{col 9}{stata "tsset yr" :. tsset yr} + +{col 0}(LIML estimates of Klein's consumption function) + +{p 8 12}{stata "ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml" :. ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml} + +{col 0}(Equivalence of LIML and CUE+homoskedasticity+independence) + +{p 8 12}{stata "ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml coviv" :. ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), liml coviv} + +{p 8 12}{stata "ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), cue" :. ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), cue} + +{col 0}(Fuller's modified LIML with alpha=1) + +{p 8 12}{stata "ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), fuller(1)" :. ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), fuller(1)} + +{col 0}(k-class estimation with Nagar's bias-adjusted IV, k=1+(L-K)/N=1+4/21=1.19) + +{p 8 12}{stata "ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), kclass(1.19)" :. ivreg210 consump L.profits (profits wagetot = govt taxnetx year wagegovt capital1 L.totinc), kclass(1.19)} + +{col 0}(Kernel-based covariance estimation using time-series data) + +{col 9}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta" :. use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta} +{col 9}{stata "tsset year, yearly" :. tsset year, yearly} + +{col 0}(Autocorrelation-consistent (AC) inference in an OLS Regression) + +{p 8 12}{stata "ivreg210 cinf unem, bw(3)" :. ivreg210 cinf unem, bw(3)} + +{p 8 12}{stata "ivreg210 cinf unem, kernel(qs) bw(auto)" :. ivreg210 cinf unem, kernel(qs) bw(auto)} + +{col 0}(Heteroskedastic and autocorrelation-consistent (HAC) inference in an OLS regression) + +{p 8 12}{stata "ivreg210 cinf unem, bw(3) kernel(bartlett) robust small" :. ivreg210 cinf unem, bw(3) kernel(bartlett) robust small} + +{p 8 12}{stata "newey cinf unem, lag(2)" :. newey cinf unem, lag(2)} + +{col 0}(AC and HAC in IV and GMM estimation) + +{p 8 12}{stata "ivreg210 cinf (unem = l(1/3).unem), bw(3)" :. ivreg210 cinf (unem = l(1/3).unem), bw(3)} + +{p 8 12}{stata "ivreg210 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(thann)" :. ivreg210 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(thann)} + +{p 8 12}{stata "ivreg210 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(qs) robust orthog(l1.unem)" :. ivreg210 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(qs) robust orthog(l1.unem)} + +{col 0}(Examples using Large N, Small T Panel Data) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta" : . use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta }{p_end} +{p 8 12}{stata "tsset id year" :. tsset id year} + +{col 0}(Two-step effic. GMM in the presence of arbitrary heteroskedasticity and autocorrelation) + +{p 8 12}{stata "ivreg210 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm2s cluster(id)": . ivreg210 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm2s cluster(id)} + +{col 0}(Kiefer (1980) SEs - robust to arbitrary serial correlation but not heteroskedasticity) + +{p 8 12}{stata "ivreg210 n w k, kiefer": . ivreg210 n w k, kiefer} + +{p 8 12}{stata "ivreg210 n w k, bw(9) kernel(tru)": . ivreg210 n w k, bw(9) kernel(tru)} + +{col 0}(Equivalence of cluster-robust and kernel-robust with truncated kernel and max bandwidth) + +{p 8 12}{stata "ivreg210 n w k, cluster(id)": . ivreg210 n w k, cluster(id)} + +{p 8 12}{stata "ivreg210 n w k, bw(9) kernel(tru) robust)": . ivreg210 n w k, bw(9) kernel(tru) robust} + +{col 0}(Examples using Small N, Large T Panel Data. NB: T is actually not very large - only +{col 0}20 - so results should be interpreted with caution) + +{p 8 12}{stata "webuse grunfeld" : . webuse grunfeld }{p_end} +{p 8 12}{stata "tsset" : . tsset }{p_end} + +{col 0}(Autocorrelation-consistent (AC) inference) + +{p 8 12}{stata "ivreg210 invest mvalue kstock, bw(1) kernel(tru)": . ivreg210 invest mvalue kstock, bw(1) kernel(tru)} + +{col 0}(Heteroskedastic and autocorrelation-consistent (HAC) inference) + +{p 8 12}{stata "ivreg210 invest mvalue kstock, robust bw(1) kernel(tru)": . ivreg210 invest mvalue kstock, robust bw(1) kernel(tru)} + +{col 0}(HAC inference, SEs also robust to disturbances correlated across panels) + +{p 8 12}{stata "ivreg210 invest mvalue kstock, robust cluster(year) bw(1) kernel(tru)": . ivreg210 invest mvalue kstock, robust cluster(year) bw(1) kernel(tru)} + +{col 0}(Equivalence of Driscoll-Kraay SEs as implemented by {cmd:ivreg210} and {cmd:xtscc}) +{col 0}(See Hoeschle (2007) for discussion of {cmd:xtscc}) + +{p 8 12}{stata "ivreg210 invest mvalue kstock, dkraay(2) small": . ivreg210 invest mvalue kstock, dkraay(2) small} + +{p 8 12}{stata "ivreg210 invest mvalue kstock, cluster(year) bw(2) small": . ivreg210 invest mvalue kstock, cluster(year) bw(2) small} + +{p 8 12}{stata "xtscc invest mvalue kstock, lag(1)": . xtscc invest mvalue kstock, lag(1)} + +{col 0}(Examples using Large N, Large T Panel Data. NB: T is again not very large - only +{col 0}20 - so results should be interpreted with caution) + +{p 8 12}{stata "webuse nlswork" : . webuse nlswork }{p_end} +{p 8 12}{stata "tsset" : . tsset }{p_end} + +{col 0}(One-way cluster-robust: SEs robust to arbitrary heteroskedasticity and within-panel +{col 0}autocorrelation) + +{p 8 12}{stata "ivreg210 ln_w grade age ttl_exp tenure, cluster(idcode)": . ivreg210 ln_w grade age ttl_exp tenure, cluster(idcode) }{p_end} + +{col 0}(Two-way cluster-robust: SEs robust to arbitrary heteroskedasticity and within-panel +{col 0}autocorrelation, and contemporaneous cross-panel correlation, i.e., the cross-panel +{col 0}correlation is not autocorrelated) + +{p 8 12}{stata "ivreg210 ln_w grade age ttl_exp tenure, cluster(idcode year)": . ivreg210 ln_w grade age ttl_exp tenure, cluster(idcode year) }{p_end} + +{col 0}(Two-way cluster-robust: SEs robust to arbitrary heteroskedasticity and within-panel +{col 0}autocorrelation and cross-panel autocorrelated disturbances that disappear after 2 lags) + +{p 8 12}{stata "ivreg210 ln_w grade age ttl_exp tenure, cluster(idcode year) bw(2) kernel(tru) ": . ivreg210 ln_w grade age ttl_exp tenure, cluster(idcode year) bw(2) kernel(tru) }{p_end} + + + +{marker s_refs}{title:References} + +{p 0 4}Ahn, Seung C. 1997. Orthogonality tests in linear models. Oxford Bulletin +of Economics and Statistics, Vol. 59, pp. 183-186. + +{p 0 4}Anderson, T.W. 1951. Estimating linear restrictions on regression coefficients +for multivariate normal distributions. Annals of Mathematical Statistics, Vol. 22, pp. 327-51. + +{p 0 4}Anderson, T. W. and H. Rubin. 1949. Estimation of the parameters of a single equation +in a complete system of stochastic equations. Annals of Mathematical Statistics, Vol. 20, +pp. 46-63. + +{p 0 4}Anderson, T. W. and H. Rubin. 1950. The asymptotic properties of estimates of the parameters of a single +equation in a complete system of stochastic equations. Annals of Mathematical Statistics, +Vol. 21, pp. 570-82. + +{p 0 4}Angrist, J.D. and Pischke, J.-S. 2009. Mostly Harmless Econometrics: An Empiricist's Companion. +Princeton: Princeton University Press. + +{p 0 4}Baum, C.F., Schaffer, M.E., and Stillman, S. 2003. Instrumental Variables and GMM: +Estimation and Testing. The Stata Journal, Vol. 3, No. 1, pp. 1-31. +{browse "http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html":http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html}. +Working paper version: Boston College Department of Economics Working Paper No. 545. +{browse "http://ideas.repec.org/p/boc/bocoec/545.html":http://ideas.repec.org/p/boc/bocoec/545.html}. +Citations in {browse "http://scholar.google.com/scholar?oi=bibs&hl=en&cites=9432785573549481148":published work}. + +{p 0 4}Baum, C. F., Schaffer, M.E., and Stillman, S. 2007. Enhanced routines for instrumental variables/GMM estimation and testing. +The Stata Journal, Vol. 7, No. 4, pp. 465-506. +{browse "http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html":http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html}. +Working paper version: Boston College Department of Economics Working Paper No. 667. +{browse "http://ideas.repec.org/p/boc/bocoec/667.html":http://ideas.repec.org/p/boc/bocoec/667.html}. +Citations in {browse "http://scholar.google.com/scholar?oi=bibs&hl=en&cites=1691909976816211536":published work}. + +{p 0 4}Breusch, T., Qian, H., Schmidt, P. and Wyhowski, D. 1999. +Redundancy of moment conditions. +Journal of Econometrics, Vol. 9, pp. 89-111. + +{p 0 4}Cameron, A.C., Gelbach, J.B. and Miller, D.L. 2006. +Robust Inference with Multi-Way Clustering. +NBER Technical Working paper 327. +{browse "http://www.nber.org/papers/t0327":http://www.nber.org/papers/t0327}. +Forthcoming in the Journal of Business and Economic Statistics. +{cmd:cgmreg} is available at +{browse "http://www.econ.ucdavis.edu/faculty/dlmiller/statafiles":http://www.econ.ucdavis.edu/faculty/dlmiller/statafiles}. + +{p 0 4}Chernozhukov, V. and Hansen, C. 2005. The Reduced Form: +A Simple Approach to Inference with Weak Instruments. +Working paper, University of Chicago, Graduate School of Business. + +{p 0 4}Cragg, J.G. and Donald, S.G. 1993. Testing Identfiability and Specification in +Instrumental Variables Models. Econometric Theory, Vol. 9, pp. 222-240. + +{p 0 4}Cushing, M.J. and McGarvey, M.G. 1999. Covariance Matrix Estimation. +In L. Matyas (ed.), Generalized Methods of Moments Estimation. +Cambridge: Cambridge University Press. + +{p 0 4}Davidson, R. and MacKinnon, J. 1993. Estimation and Inference in Econometrics. +1993. New York: Oxford University Press. + +{p 0 4}Driscoll, J.C. and Kraay, A. 1998. Consistent Covariance Matrix Estimation With Spatially Dependent Panel Data. +Review of Economics and Statistics. Vol. 80, No. 4, pp. 549-560. + +{p 0 4}Dufour, J.M. 2003. Identification, Weak Instruments and Statistical Inference +in Econometrics. Canadian Journal of Economics, Vol. 36, No. 4, pp. 767-808. +Working paper version: CIRANO Working Paper 2003s-49. +{browse "http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf":http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf}. + +{p 0 4}Finlay, K., and Magnusson, L.M. 2009. Implementing Weak-Instrument Robust Tests +for a General Class of Instrumental-Variables Models. +The Stata Journal, Vol. 9, No. 3, pp. 398-421. +{browse "http://www.stata-journal.com/article.html?article=st0171":http://www.stata-journal.com/article.html?article=st0171}. + +{p 0 4}Hall, A.R., Rudebusch, G.D. and Wilcox, D.W. 1996. Judging Instrument Relevance in +Instrumental Variables Estimation. International Economic Review, Vol. 37, No. 2, pp. 283-298. + +{p 0 4}Hayashi, F. Econometrics. 2000. Princeton: Princeton University Press. + +{p 0 4}Hansen, L.P., Heaton, J., and Yaron, A. 1996. Finite Sample Properties +of Some Alternative GMM Estimators. Journal of Business and Economic Statistics, Vol. 14, No. 3, pp. 262-280. + +{p 0 4}Hoechle, D. 2007. Robust Standard Errors for Panel Regressions with Cross�sectional Dependence. +Stata Journal, Vol. 7, No. 3, pp. 281-312. +{browse "http://www.stata-journal.com/article.html?article=st0128":http://www.stata-journal.com/article.html?article=st0128}. + +{p 0 4}Kiefer, N.M. 1980. Estimation of Fixed Effect Models for Time Series of Cross-Sections with +Arbitrary Intertemporal Covariance. Journal of Econometrics, Vol. 14, No. 2, pp. 195-202. + +{p 0 4}Kleibergen, F. 2007. Generalizing Weak Instrument Robust Statistics Towards Multiple Parameters, Unrestricted Covariance Matrices and Identification Statistics. Journal of Econometrics, forthcoming. + +{p 0 4}Kleibergen, F. and Paap, R. 2006. Generalized Reduced Rank Tests Using the Singular Value Decomposition. +Journal of Econometrics, Vol. 133, pp. 97-126. + +{p 0 4}Kleibergen, F. and Schaffer, M.E. 2007. ranktest: Stata module for testing the rank +of a matrix using the Kleibergen-Paap rk statistic. +{browse "http://ideas.repec.org/c/boc/bocode/s456865.html":http://ideas.repec.org/c/boc/bocode/s456865.html}. + +{p 0 4}Mikusheva, A. and Poi, B.P. 2006. +Tests and Confidence Sets with Correct Size When Instruments are Potentially Weak. The Stata Journal, Vol. 6, No. 3, pp. 335-347. + +{p 0 4}Moreira, M.J. and Poi, B.P. 2003. Implementing Tests with the Correct Size in the Simultaneous Equations Model. The Stata Journal, Vol. 3, No. 1, pp. 57-70. + +{p 0 4}Newey, W.K. and K.D. West, 1994. Automatic Lag Selection in Covariance Matrix Estimation. Review of Economic Studies, Vol. 61, No. 4, pp. 631-653. + +{p 0 4}Shea, J. 1997. Instrument Relevance in Multivariate Linear Models: +A Simple Measure. +Review of Economics and Statistics, Vol. 49, No. 2, pp. 348-352. + +{p 0 4}Stock, J.H. and Wright, J.H. 2000. GMM with Weak Identification. +Econometrica, Vol. 68, No. 5, September, pp. 1055-1096. + +{p 0 4}Stock, J.H. and Yogo, M. 2005. Testing for Weak Instruments in Linear IV Regression. In D.W.K. Andrews and J.H. Stock, eds. Identification and Inference for Econometric Models: Essays in Honor of Thomas Rothenberg. Cambridge: Cambridge University Press, 2005, pp. 80�108. +Working paper version: NBER Technical Working Paper 284. +{browse "http://www.nber.org/papers/T0284":http://www.nber.org/papers/T0284}. + +{p 0 4}Thompson, S.B. 2009. Simple Formulas for Standard Errors that Cluster by Both Firm and Time. +{browse "http://ssrn.com/abstract=914002":http://ssrn.com/abstract=914002}. + +{p 0 4}Wooldridge, J.M. 2002. Econometric Analysis of Cross Section and Panel Data. Cambridge, MA: MIT Press. + + +{marker s_acknow}{title:Acknowledgements} + +{p}We would like to thanks various colleagues who helped us along the way, including +David Drukker, +Frank Kleibergen, +Austin Nichols, +Brian Poi, +Vince Wiggins, +and, not least, the users of {cmd:ivreg2} +who have provided suggestions, +spotted bugs, +and helped test the package. +We are also grateful to Jim Stock and Moto Yogo for permission to reproduce +their critical values for the Cragg-Donald statistic. + +{marker s_citation}{title:Citation of ivreg210} + +{p}{cmd:ivreg210} is not an official Stata command. It is a free contribution +to the research community, like a paper. Please cite it as such: {p_end} + +{phang}Baum, C.F., Schaffer, M.E., Stillman, S. 2015. +ivreg210: Stata module for extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression. +{browse "http://ideas.repec.org/c/boc/bocode/sS457955.html":http://ideas.repec.org/c/boc/bocode/sS457955.html}{p_end} + +{title:Authors} + + Christopher F Baum, Boston College, USA + baum@bc.edu + + Mark E Schaffer, Heriot-Watt University, UK + m.e.schaffer@hw.ac.uk + + Steven Stillman, Motu Economic and Public Policy Research + stillman@motu.org.nz + + +{title:Also see} + +{p 1 14}Articles:{it:Stata Journal}, volume 3, number 1: {browse "http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html":st0030}{p_end} +{p 10 14}{it:Stata Journal}, volume 7, number 4: {browse "http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html":st0030_3}{p_end} + +{p 1 14}Manual: {hi:[U] 23 Estimation and post-estimation commands}{p_end} +{p 10 14}{hi:[U] 29 Overview of model estimation in Stata}{p_end} +{p 10 14}{hi:[R] ivreg}{p_end} + +{p 1 10}On-line: help for {help ivregress}, {help ivreg}, {help newey}; +{help overid}, {help ivendog}, {help ivhettest}, {help ivreset}, +{help xtivreg2}, {help xtoverid}, {help ranktest}, +{help condivreg} (if installed); +{help rivtest} (if installed); +{help cgmreg} (if installed); +{help xtscc} (if installed); +{help est}, {help postest}; +{help regress}{p_end} diff --git a/110/replication_package/replication/ado/plus/i/ivreg210_p.ado b/110/replication_package/replication/ado/plus/i/ivreg210_p.ado new file mode 100644 index 0000000000000000000000000000000000000000..323edc718bcbc5b2c6e600ddc7f46a915b544ca5 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg210_p.ado @@ -0,0 +1,112 @@ +*! ivreg210_p 1.0.8 19Jan2015 +*! based on ivreg2_p 1.0.8 30Jan2011 +*! author mes +* 1.0.1: 25apr2002 original version +* 1.0.2: 28jun2005 version 8.2 +* 1.0.3: 1Aug2006 complete rewrite plus fwl option +* 1.0.4: 26Jan2007 eliminated double reporting of #MVs +* 1.0.5: 2Feb2007 small fix to allow fwl of just _cons +* 1.0.6: 19Aug2007 replacement of "fwl" with "partial" in conjuction with new ivreg2 syntax +* 1.0.7: 4Feb2010 version check update +* 1.0.8: 30Jan2011 re-introduced stdp option (hadn't been supported after fwl/partial) +* and added labelling of created residual variable + +program define ivreg210_p + version 8.2 + syntax newvarname [if] [in] , [XB Residuals stdp] + marksample touse, novarlist + +* Check ivreg2 version is compatible. +* fwl becomes partial starting in ivreg2 02.2.07 + local vernum "`e(version)'" + if ("`vernum'" < "03.0.00") | ("`vernum'" > "09.9.99") { +di as err "Error: incompatible versions of ivreg2 and ivreg2_p." +di as err "Currently installed version of ivreg2 is `vernum'" +di as err "To update, from within Stata type " _c +di in smcl "{stata ssc install ivreg2, replace :ssc install ivreg2, replace}" + exit 601 + } + + local type "`xb'`residuals'`stdp'" + + if "`type'"=="" { + local type "xb" +di in gr "(option xb assumed; fitted values)" + } + +* e(partialcons) now always exists and is 1 or 0 + if e(partial_ct) { +* partial partial-out block + if "`type'" == "residuals" { + + tempvar esample + tempname ivres + gen byte `esample' = e(sample) + +* Need to strip out time series operators + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + + local rhs : colnames(e(b)) + tsrevar `rhs', substitute + local rhs_t "`r(varlist)'" + + if "`e(partial1)'" != "" { + local partial "`e(partial1)'" + } + else { + local partial "`e(partial)'" + } + tsrevar `partial', substitute + local partial_t "`r(varlist)'" + + if ~e(partialcons) { + local noconstant "noconstant" + } + + local allvars "`lhs_t' `rhs_t'" +* Partial-out block. Uses estimatation sample to get coeffs, markout sample for predict + _estimates hold `ivres', restore + foreach var of local allvars { + tempname `var'_partial + qui regress `var' `partial' if `esample', `noconstant' + qui predict double ``var'_partial' if `touse', resid + local allvars_partial "`allvars_partial' ``var'_partial'" + } + _estimates unhold `ivres' + + tokenize `allvars_partial' + local lhs_partial "`1'" + mac shift + local rhs_partial "`*'" + + tempname b + mat `b'=e(b) + mat colnames `b' = `rhs_partial' +* Use forcezero? + tempvar xb + mat score double `xb' = `b' if `touse' + gen `typlist' `varlist' = `lhs_partial' - `xb' + label var `varlist' "Residuals" + } + else { +di in red "Option `type' not supported with -partial- option" + error 198 + } + } + else if "`type'" == "residuals" { + tempname lhs lhs_t xb + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + qui _predict `typlist' `xb' if `touse' + gen `typlist' `varlist'=`lhs_t'-`xb' + label var `varlist' "Residuals" + } +* Must be either xb or stdp + else { + _predict `typlist' `varlist' if `touse', `type' + } + +end diff --git a/110/replication_package/replication/ado/plus/i/ivreg28.ado b/110/replication_package/replication/ado/plus/i/ivreg28.ado new file mode 100644 index 0000000000000000000000000000000000000000..3380cb1043ab94a99f186472fd5cfc1d5b7b1bca --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg28.ado @@ -0,0 +1,5652 @@ +*! ivreg28 2.1.22 6July2007 +*! authors cfb & mes +*! cloned from official ivreg version 5.0.9 19Dec2001 +*! see end of file for version comments + +* Variable naming: +* lhs = LHS endogenous +* endo = RHS endogenous (instrumented) +* inexog = included exogenous (instruments) +* exexog = excluded exogenous (instruments) +* iv = {inexog exexog} = all instruments +* rhs = {endo inexog} = RHS regressors +* 1 at the end of the name means the varlist after duplicates and collinearities removed +* ..1_ct at the end means a straight count of the list +* .._ct at the end means ..1_ct with any additional detected cnts removed + +program define ivreg28, eclass byable(recall) sortpreserve + version 8.2 + local lversion 02.1.22 + local ivreg2_cmd "ivreg28" + + if replay() { + syntax [, FIRST FFIRST RF Level(integer $S_level) NOHEader NOFOoter dropfirst droprf /* + */ EForm(string) PLUS VERsion] + if "`version'" != "" & "`first'`ffirst'`rf'`noheader'`nofooter'`dropfirst'`droprf'`eform'`plus'" != "" { + di as err "option version not allowed" + error 198 + } + if "`version'" != "" { + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + if `"`e(cmd)'"' != "`ivreg2_cmd'" { + error 301 + } + if "`e(firsteqs)'" != "" & "`dropfirst'" == "" { +* On replay, set flag so saved eqns aren't dropped + local savefirst "savefirst" + } + if "`e(rfeq)'" != "" & "`droprf'" == "" { +* On replay, set flag so saved eqns aren't dropped + local saverf "saverf" + } + } + else { + + syntax [anything(name=0)] [if] [in] [aw fw pw iw/] [, /* + */ FIRST FFIRST NOID NOCOLLIN SAVEFIRST SAVEFPrefix(name) SMall Robust CLuster(varname) /* + */ GMM CUE CUEINIT(string) CUEOPTions(string) ORTHOG(string) ENDOGtest(string) FWL(string) /* + */ NOConstant Level(integer $S_level) Beta hc2 hc3 /* + */ NOHEader NOFOoter NOOUTput title(string) subtitle(string) /* + */ DEPname(string) EForm(string) PLUS /* + */ BW(string) kernel(string) Tvar(varname) Ivar(varname)/* + */ LIML COVIV FULLER(real 0) Kclass(string) /* + */ REDundant(string) RF SAVERF SAVERFPrefix(name) /* + */ B0(string) SMATRIX(string) WMATRIX(string) EWMATRIX(string) sw swpsd dofminus(integer 0) ] + + local n 0 + + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + if `s(stop)' { + error 198 + } + while `s(stop)'==0 { + if "`paren'"=="(" { + local n = `n' + 1 + if `n'>1 { +capture noi error 198 +di in red `"syntax is "(all instrumented variables = instrument variables)""' +exit 198 + } + gettoken p lhs : lhs, parse(" =") + while "`p'"!="=" { + if "`p'"=="" { +capture noi error 198 +di in red `"syntax is "(all instrumented variables = instrument variables)""' +di in red `"the equal sign "=" is required"' +exit 198 + } + local endo `endo' `p' + gettoken p lhs : lhs, parse(" =") + } +* To enable Cragg HOLS estimator, allow for empty endo list + local temp_ct : word count `endo' + if `temp_ct' > 0 { + tsunab endo : `endo' + } +* To enable OLS estimator with (=) syntax, allow for empty exexog list + local temp_ct : word count `lhs' + if `temp_ct' > 0 { + tsunab exexog : `lhs' + } + } + else { + local inexog `inexog' `lhs' + } + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + } + local 0 `"`lhs' `0'"' + + tsunab inexog : `inexog' + tokenize `inexog' + local lhs "`1'" + local 1 " " + local inexog `*' + + if "`gmm'`cue'" != "" & "`exexog'" == "" { + di in red "option `gmm'`cue' invalid: no excluded instruments specified" + exit 102 + } + +* Process options + +* Fuller implies LIML + if "`liml'" == "" & `fuller' != 0 { + local liml "liml" + } + +* b0 implies nooutput and noid + if "`b0'" ~= "" { + local nooutput "nooutput" + local noid "noid" + } + + if "`gmm'" != "" & "`cue'" != "" { +di as err "incompatible options: 2-step efficient gmm and cue gmm" + exit 198 + } + + if "`b0'" != "" & "`wmatrix'" != "" { +di as err "incompatible options: -b0- and -wmatrix-" + exit 198 + } + + if "`ewmatrix'" != "" { + if "`gmm'" != "" { +di as err "incompatible options: -ewmatrix- and 2-step efficient gmm" + exit 198 + } + local wmatrix "`ewmatrix'" + } + +* savefprefix implies savefirst + if "`savefprefix'" != "" & "`savefirst'" == "" { + local savefirst "savefirst" + } + +* default savefprefix is _ivreg2_ + if "`savefprefix'" == "" { + local savefprefix "_`ivreg2_cmd'_" + } + +* saverfprefix implies saverf + if "`saverfprefix'" != "" & "`saverf'" == "" { + local saverf "saverf" + } + +* default saverfprefix is _ivreg2_ + if "`saverfprefix'" == "" { + local saverfprefix "_`ivreg2_cmd'_" + } + +* LIML/kclass incompatibilities + if "`liml'`kclass'" != "" { + if "`gmm'`cue'" != "" { +di as err "GMM estimation not available with LIML or k-class estimators" + exit 198 + } + if `fuller' < 0 { +di as err "invalid Fuller option" + exit 198 + } + if "`liml'" != "" & "`kclass'" != "" { +di as err "cannot use liml and kclass options together" + exit 198 + } +* Process kclass string + tempname kclass2 + scalar `kclass2'=real("`kclass'") + if "`kclass'" != "" & (`kclass2' == . | `kclass2' < 0 ) { +di as err "invalid k-class option" + exit 198 + } + } + +* HAC estimation. +* If bw is omitted, default `bw' is empty string. +* If bw or kernel supplied, check/set `kernel'. +* Macro `kernel' is also used for indicating HAC in use. + if "`bw'" != "" | "`kernel'" != "" { +* Need tvar only for markout with time-series stuff +* but data must be tsset for time-series operators in code to work + if "`tvar'" == "" { + local tvar "`_dta[_TStvar]'" + } + else if "`tvar'"!="`_dta[_TStvar]'" { +di as err "invalid tvar() option - data already tsset" + exit 5 + } + if "`ivar'" == "" { + local ivar "`_dta[_TSpanel]'" + } + else if "`ivar'"!="`_dta[_TSpanel]'" { +di as err "invalid ivar() option - data already tsset" + exit 5 + } + if "`tvar'" == "" & "`ivar'" != "" { +di as err "missing tvar() option with ivar() option" + exit 5 + } + if "`ivar'`tvar'"=="" { + capture tsset + } + else { + capture tsset `ivar' `tvar' + } + capture local tvar "`r(timevar)'" + capture local ivar "`r(panelvar)'" + + if "`tvar'" == "" { +di as err "must tsset data and specify timevar" + exit 5 + } + tsreport if `tvar' != . + if `r(N_gaps)' != 0 & "`ivar'"=="" { +di in gr "Warning: time variable " in ye "`tvar'" in gr " has " /* + */ in ye "`r(N_gaps)'" in gr " gap(s) in relevant range" + } + + if "`bw'" == "" { +di as err "bandwidth option bw() required for HAC-robust estimation" + exit 102 + } + local bw real("`bw'") +* Check it's a valid bandwidth + if `bw' != int(`bw') | /* + */ `bw' == . | /* + */ `bw' <= 0 { +di as err "invalid bandwidth in option bw() - must be integer > 0" + exit 198 + } +* Convert bw macro to simple integer + local bw=`bw' + +* Check it's a valid kernel + local validkernel 0 + if lower(substr("`kernel'", 1, 3)) == "bar" | "`kernel'" == "" { +* Default kernel + local kernel "Bartlett" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Bartlett and bw=1 implies zero lags used. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 3)) == "par" { + local kernel "Parzen" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Parzen and bw=1 implies zero lags used. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 3)) == "tru" { + local kernel "Truncated" + local window "lag" + local validkernel 1 + } + if lower(substr("`kernel'", 1, 9)) == "tukey-han" | lower("`kernel'") == "thann" { + local kernel "Tukey-Hanning" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Tukey-Hanning and bw=1 implies zero lags. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 9)) == "tukey-ham" | lower("`kernel'") == "thamm" { + local kernel "Tukey-Hamming" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Tukey-Hamming and bw=1 implies zero lags. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 3)) == "qua" | lower("`kernel'") == "qs" { + local kernel "Quadratic spectral" + local window "spectral" + local validkernel 1 + } + if lower(substr("`kernel'", 1, 3)) == "dan" { + local kernel "Daniell" + local window "spectral" + local validkernel 1 + } + if lower(substr("`kernel'", 1, 3)) == "ten" { + local kernel "Tent" + local window "spectral" + local validkernel 1 + } + if ~`validkernel' { + di in red "invalid kernel" + exit 198 + } + } + + if "`kernel'" != "" & "`cluster'" != "" { +di as err "cannot use HAC kernel estimator with -cluster- option" + exit 198 + } + +* changed below from `endog' to `endogtest' 2Aug06 MES + if "`orthog'`endogtest'`redundant'`fwl'" != "" { + capture tsunab orthog : `orthog' + capture tsunab endogtest : `endogtest' + capture tsunab redundant : `redundant' + capture tsunab fwl : `fwl' + } + + if "`hc2'`hc3'" != "" { + if "`hc2'"!="" { + di in red "option `hc2' invalid" + } + else di in red "option `hc3' invalid" + exit 198 + } + + if "`beta'" != "" { + di in red "option `beta' invalid" + exit 198 + } + +* Weights +* fweight and aweight accepted as is +* iweight not allowed with robust or gmm and requires a trap below when used with summarize +* pweight is equivalent to aweight + robust +* but in HAC case, robust implied by `kernel' rather than `robust' + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + gen double `wvar'=`exp' + } + if "`weight'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`gmm'`kernel'" !="" { + di in red "iweights not allowed with robust or gmm" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + +* If no kernel (=no HAC) then gmm implies (heteroskedastic-) robust + if "`kernel'" == "" & "`gmm'" != "" { + local robust "robust" + } + if `dofminus' > 0 { + local dofmopt "dofminus(`dofminus')" + } +* Stock-Watson robust SEs. + if "`sw'`swpsd'" ~= "" { + if "`kernel'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -kernel- option" + exit 198 + } + if "`cue'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -cue- option" + exit 198 + } + if "`ivar'"=="" { +di as err "Must specify -ivar- with -sw- option" + exit 198 + } + } + + marksample touse + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster' `tvar', strok + +* Weight statement + if "`weight'" ~= "" { + sum `wvar' if `touse' `wtexp', meanonly +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + +* Set local macro T and check that bw < T +* Also make sure only used sample is checked + if "`bw'" != "" { + sum `tvar' if `touse', meanonly + local T = r(max)-r(min)+1 + if `bw' > `T' { +di as err "invalid bandwidth in option bw() - cannot exceed timespan of data" + exit 198 + } + } + +************* Collinearities and duplicates block ***************** + + if "`noconstant'" != "" { + local rmcnocons "nocons" + } + +* Check for duplicates of variables +* To mimic official ivreg, in the case of duplicates, +* (1) inexog > endo +* (2) inexog > exexog +* (3) endo + exexog = inexog, as if it were "perfectly predicted" + local dupsen1 : list dups endo + local endo1 : list uniq endo + local dupsex1 : list dups exexog + local exexog1 : list uniq exexog + local dupsin1 : list dups inexog + local inexog1 : list uniq inexog +* Remove inexog from endo + local dupsen2 : list endo1 & inexog1 + local endo1 : list endo1 - inexog1 +* Remove inexog from exexog + local dupsex2 : list exexog1 & inexog1 + local exexog1 : list exexog1 - inexog1 +* Remove endo from exexog + local dupsex3 : list exexog1 & endo1 + local exexog1 : list exexog1 - endo1 + local dups "`dupsen1' `dupsex1' `dupsin1' `dupsen2' `dupsex2' `dupsex3'" + local dups : list uniq dups + + if "`nocollin'" == "" { +* First, collinearities check using canonical correlations approach +* Eigenvalue=1 => included endog is really included exogenous +* Eigenvalue=0 => included endog collinear with another included endog +* Corresponding column names give name of variable +* Code block stolen from below, so some repetition + local insts1 `inexog1' `exexog1' + local rhs1 `endo1' `inexog1' + local iv1_ct : word count `insts1' + local rhs1_ct : word count `rhs1' + local endo1_ct : word count `endo1' + local exex1_ct : word count `exexog1' + local endoexex1_ct : word count `endo1' `exexog1' + local inexog1_ct : word count `inexog1' + if `endo1_ct' > 0 { + tempname ccmat ccrealev ccimagev cc A XX XXinv ZZ ZZinv XZ XPZX + qui mat accum `A' = `endo1' `insts1' if `touse' `wtexp', `rmcnocons' + mat `XX' = `A'[1..`endo1_ct',1..`endo1_ct'] + mat `XXinv'=syminv(`XX') + mat `ZZ' = `A'[`endo1_ct'+1...,`endo1_ct'+1...] + mat `ZZinv'=syminv(`ZZ') + mat `XZ' = `A'[1..`endo1_ct',`endo1_ct'+1...] + mat `XPZX'=`XZ'*`ZZinv'*`XZ'' + mat `ccmat' = `XXinv'*`XPZX' + mat eigenvalues `ccrealev' `ccimagev' = `ccmat' + foreach vn of varlist `endo1' { + local i=colnumb(`ccmat',"`vn'") + if round(`ccmat'[`i',`i'],10e-7)==0 { +* Collinear with another endog, so remove from endog list + local endo1 : list endo1-vn + } + if round(`ccmat'[`i',`i'],10e-7)==1 { +* Collinear with exogenous, so remove from endog and add to inexog + local endo1 : list endo1-vn + local inexog1 "`inexog1' `vn'" + local ecollin "`ecollin' `vn'" + } + } +* Loop through endo1 to find Eigenvalues=0 or 1 + } + +* Remove collinearities. Use _rmcollright to enforce same priority as above. + capture version 9.2 + if _rc==0 { +* _rmcollright crashes if no arguments supplied + capture _rmcollright `inexog1' `exexog1' if `touse' `wtexp', `rmcnocons' + } + else { + qui _rmcoll `inexog1' `exexog1' if `touse' `wtexp', `rmcnocons' + } + version 8.2 + +* endo1 has had within-endo collinear removed, so non-colllinear list is _rmcoll result + endo1 + local ncvars `r(varlist)' `endo1' + local allvars1 `endo1' `inexog1' `exexog1' +* collin gets collinear variables to be removed + local collin : list allvars1-ncvars +* Remove collin from exexog1 + local exexog1 : list exexog1-collin +* Remove collin from inexog1 + local inexog1 : list inexog1-collin + +* Collinearity and duplicates warning messages, if necessary + if "`dups'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `dups', _col(21) + } + if "`ecollin'" != "" { +di in gr "Warning - endogenous variable(s) collinear with instruments" +di in gr "Vars now exogenous:" _c + Disp `ecollin', _col(21) + } + if "`collin'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `collin', _col(21) + } + } + +**** End of collinearities block ************ + +**** Partial-out FWL block ****************** + + if "`fwl'" != "" { + preserve + local fwl : subinstr local fwl "_cons" "", all count(local fwlcons) + if `fwlcons' > 0 & "`noconstant'"~="" { +di in r "Error: _cons listed in fwl() but equation specifies -noconstant-." + error 198 + } + else if "`noconstant'"~="" { + local fwlcons 0 + } + else { +* Just in case of multiple _cons + local fwlcons 1 + } + local fwldrop : list inexog - inexog1 + local fwl1 : list fwl - fwldrop + local fwlcheck : list fwl1 - inexog1 + if ("`fwlcheck'"~="") { +di in r "Error: `fwlcheck' listed in fwl() but not in list of regressors." + error 198 + } + local inexog1 : list inexog1 - fwl1 + if "`cluster'"~="" { +* Check that cluster var won't be transformed + local allvars "`lhs' `inexog' `endo' `exexog'" + local clustvarcheck : list cluster in allvars + if `clustvarcheck' { +di in r "Error: cannot use cluster variable `cluster' as dependent variable, regressor or IV" +di in r " in combination with -fwl- option." + error 198 + } + } +* Constant is partialled out, unless nocons already specified in the first place + tempname fwl_resid + foreach var of varlist `lhs' `inexog1' `endo1' `exexog1' { + qui regress `var' `fwl1' if `touse' `wtexp', `noconstant' + qui predict double `fwl_resid' if `touse', resid + qui replace `var' = `fwl_resid' + drop `fwl_resid' + } + local fwl_ct : word count `fwl1' + if "`noconstant'" == "" { +* fwl_ct used for small-sample adjustment to regression F-stat + local fwl_ct = `fwl_ct' + 1 + local noconstant "noconstant" + } + } + else { +* Set count of fwl vars to zero if option not used + local fwl_ct 0 + } + +********************************************* + + local insts1 `inexog1' `exexog1' + local rhs1 `endo1' `inexog1' + local iv1_ct : word count `insts1' + local rhs1_ct : word count `rhs1' + local endo1_ct : word count `endo1' + local exex1_ct : word count `exexog1' + local endoexex1_ct : word count `endo1' `exexog1' + local inexog1_ct : word count `inexog1' + + if "`noconstant'" == "" { + local cons_ct 1 + } + else { + local cons_ct 0 + } + + if `rhs1_ct' > `iv1_ct' { + di in red "equation not identified; must have at " /* + */ "least as many instruments not in" + di in red "the regression as there are " /* + */ "instrumented variables" + exit 481 + } + + if `rhs1_ct' + `cons_ct' == 0 { + di in red "error: no regressors specified" + exit 102 + } + + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + if "`robust'"=="" { + local robust "robust" + } + } + if "`bw'"!="" { + local bwopt "bw(`bw')" + } + if "`kernel'"!="" { + local kernopt "kernel(`kernel')" + } +* If depname not provided (default) name is lhs variable + if "`depname'"=="" { + local depname `lhs' + } + +************************************************************************************************ +* Cross-products and basic IV coeffs, residuals and moment conditions + tempvar iota y2 yhat ivresid ivresid2 gresid gresid2 lresid lresid2 b0resid b0resid2 s1resid + tempname Nprec ysum yy yyc r2u r2c B V ivB gmmB wB lB gmmV ivest + tempname r2 r2_a ivrss lrss wbrss b0rss rss mss rmse sigmasq iv_s2 l_s2 wb_s2 b0_s2 F Fp Fdf2 + tempname S Sinv W s1Zu s2Zu b0Zu wbZu wbresid wbresid2 s1sigmasq + tempname A XZ XZa XZb Zy ZZ ZZinv XPZX XPZXinv XPZy + tempname YY Z2Z2 ZY Z2Y XXa XXb XX Xy Z2Z2inv XXinv + tempname XZWZX XZWZXinv XZWZy XZW + tempname B V B1 uZSinvZu j jp arubin arubinp tempmat + +* Generate cross-products of y, X, Z + qui matrix accum `A' = `lhs' `endo1' `exexog1' `inexog1' /* + */ if `touse' `wtexp', `noconstant' + if "`noconstant'"=="" { + matrix rownames `A' = `lhs' `endo1' `exexog1' /* + */ `inexog1' _cons + matrix colnames `A' = `lhs' `endo1' `exexog1' /* + */ `inexog1' _cons + } + else { + matrix rownames `A' = `lhs' `endo1' `exexog1' `inexog1' + matrix colnames `A' = `lhs' `endo1' `exexog1' `inexog1' + } + if `endo1_ct' > 0 { +* X'Z is [endo1 inexog1]'[exexog1 inexog1] + mat `XZ'=`A'[2..`endo1_ct'+1,`endo1_ct'+2...] +* Append portion corresponding to included exog if they (incl constant) exist + if 2+`endo1_ct'+`iv1_ct'-(`rhs1_ct'-`endo1_ct') /* + */ <= rowsof(`A') { + mat `XZ'=`XZ' \ /* + */ `A'[2+`endo1_ct'+`iv1_ct'- /* + */ (`rhs1_ct'-`endo1_ct')..., /* + */ `endo1_ct'+2...] + } +* If included exog (incl const) exist, create XX matrix in 3 steps + if `inexog1_ct' + `cons_ct' > 0 { + mat `XXa' = `A'[2..`endo1_ct'+1, 2..`endo1_ct'+1], /* + */ `A'[2..`endo1_ct'+1, `endoexex1_ct'+2...] + mat `XXb' = `A'[`endoexex1_ct'+2..., 2..`endo1_ct'+1], /* + */ `A'[`endoexex1_ct'+2..., `endoexex1_ct'+2...] + mat `XX' = `XXa' \ `XXb' + mat `Xy' = `A'[2..`endo1_ct'+1, 1] \ `A'[`endoexex1_ct'+2..., 1] + } + else { + mat `XX' = `A'[2..`endo1_ct'+1, 2..`endo1_ct'+1] + mat `Xy' = `A'[2..`endo1_ct'+1, 1] + } + } + else { +* Cragg HOLS estimator with no endogenous variables + mat `XZ'= `A'[2+`iv1_ct'-(`rhs1_ct'-`endo1_ct')..., /* + */ 2...] + mat `XX' = `A'[`endoexex1_ct'+2..., `endoexex1_ct'+2...] + mat `Xy' = `A'[`endoexex1_ct'+2..., 1] + } + + mat `XX'=(`XX'+`XX'')/2 + mat `XXinv'=syminv(`XX') + mat `Zy'=`A'[`endo1_ct'+2...,1] + mat `ZZ'=`A'[`endo1_ct'+2...,`endo1_ct'+2...] + mat `ZZ'=(`ZZ'+`ZZ'')/2 + mat `ZZinv'=syminv(`ZZ') +* diag0cnt probably superfluous since collinearity checks will catch this unless disabled + local iv_ct = rowsof(`ZZ') - diag0cnt(`ZZinv') + mat `YY'=`A'[1..`endo1_ct'+1, 1..`endo1_ct'+1] + mat `ZY' = `A'[`endo1_ct'+2..., 1..`endo1_ct'+1] + mat `XPZX'=`XZ'*`ZZinv'*`XZ'' + mat `XPZX'=(`XPZX'+`XPZX'')/2 + mat `XPZXinv'=syminv(`XPZX') + mat `XPZy'=`XZ'*`ZZinv'*`Zy' +****************************** + qui gen byte `iota'=1 + qui gen double `y2'=`lhs'^2 +* Stata summarize won't work with iweights, so must use matrix cross-product + qui matrix vecaccum `ysum' = `iota' `y2' `lhs' `wtexp' if `touse' +* Nprec is ob count from mat accum. Use this rather than `N' in calculations +* here and below because in official -regress- `N' is rounded if iweights are used. + scalar `Nprec'=`ysum'[1,3] + if "`weight'" == "iweight" { + scalar `Nprec'=round(`Nprec') + } + local N=round(`Nprec') + scalar `yy'=`ysum'[1,1] + scalar `yyc'=`yy'-`ysum'[1,2]^2/`Nprec' + +******************************************************************************************* +* First-step estimators: b0, wmatrix, LIML-kclass, IV. +* Generate residuals s1resid for used in 2SFEGMM and robust. +* User-supplied b0 provides value of CUE obj fn. + if "`b0'" != "" { + capture drop `yhat' + qui mat score double `yhat' = `b0' if `touse' + qui gen double `b0resid'=`lhs'-`yhat' + qui gen double `b0resid2'=`b0resid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `b0resid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `b0rss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `b0_s2'=`b0rss'/(`Nprec'-`dofminus') + scalar `s1sigmasq'=`b0_s2' + qui gen double `s1resid'=`b0resid' + } + else if "`wmatrix'" != "" { +* GMM with arbitrary weighting matrix provides first-step estimates + local cn : colnames(`ZZ') + matrix `W'=`wmatrix' +* Rearrange/select columns to mat IV matrix + capture matsort `W' "`cn'" + local wrows = rowsof(`W') + local wcols = colsof(`W') + local zcols = colsof(`ZZ') + if _rc ~= 0 | (`wrows'~=`zcols') | (`wcols'~=`zcols') { +di as err "-wmatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + mat `XZWZX'=`XZ'*`W'*`XZ'' + mat `XZWZy'=`XZ'*`W'*`Zy' + mat `XZWZX'=(`XZWZX'+`XZWZX'')/2 + mat `XZWZXinv'=syminv(`XZWZX') + mat `XZW'=`XZ'*`W' + mat `wB'=`XZWZy''*`XZWZXinv'' + + capture drop `yhat' + qui mat score double `yhat' = `wB' if `touse' + qui gen double `wbresid'=`lhs'-`yhat' + qui gen double `wbresid2'=`wbresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `wbresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `wbrss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `wb_s2'=`wbrss'/(`Nprec'-`dofminus') + scalar `s1sigmasq'=`wb_s2' + qui gen double `s1resid'=`wbresid' + } + else if "`liml'`kclass'" != "" { +* LIML and kclass code + tempname WW WW1 Evec Eval Evaldiag target lambda lambda2 khs XhXh XhXhinv ll + if "`kclass'" == "" { +* LIML block + matrix `WW' = `YY' - `ZY''*`ZZinv'*`ZY' + if `inexog1_ct' + `cons_ct' > 0 { + mat `Z2Y' = `A'[`endoexex1_ct'+2..., 1..`endo1_ct'+1] + mat `Z2Z2' = `A'[`endoexex1_ct'+2..., `endoexex1_ct'+2...] + mat `Z2Z2'=(`Z2Z2'+`Z2Z2'')/2 + mat `Z2Z2inv' = syminv(`Z2Z2') + matrix `WW1' = `YY' - `Z2Y''*`Z2Z2inv'*`Z2Y' + } + else { +* Special case of no included exogenous (incl constant) + matrix `WW1' = `YY' + } + matrix `WW'=(`WW'+`WW'')/2 + matrix symeigen `Evec' `Eval' = `WW' + matrix `Evaldiag' = diag(`Eval') +* Replace diagonal elements of Evaldiag with the element raised to the power (-1/2) + local i 1 + while `i' <= rowsof(`Evaldiag') { +* Need to use capture because with collinearities, diag may be virtually zero +* ... but actually negative + capture matrix `Evaldiag'[`i',`i'] = /* + */ `Evaldiag'[`i',`i']^(-0.5) + local i = `i'+1 + } + matrix `target' = (`Evec'*`Evaldiag'*`Evec'') * `WW1' /* + */ * (`Evec'*`Evaldiag'*`Evec'') +* Re-use macro names + matrix `target'=(`target'+`target'')/2 + matrix symeigen `Evec' `Eval' = `target' +* Get smallest eigenvalue +* Note that collinearities can yield a nonsense eigenvalue appx = 0 +* and just-identified will yield an eigenvalue that is ALMOST exactly = 1 +* so require it to be >= 0.9999999999. + local i 1 + scalar `lambda'=. + scalar `lambda2'=. + while `i' <= colsof(`Eval') { + if (`lambda' > `Eval'[1,`i']) & (`Eval'[1,`i'] >=0.9999999999) { + scalar `lambda2' = `lambda' + scalar `lambda' = `Eval'[1,`i'] + } + local i = `i'+1 + } + if `fuller'==0 { +* Basic LIML. Macro kclass2 is the scalar. + scalar `kclass2'=`lambda' + } + else { +* Fuller LIML + if `fuller' > (`N'-`iv_ct') { +di as err "error: invalid choice of Fuller LIML parameter" + exit 198 + } + scalar `kclass2' = `lambda' - `fuller'/(`N'-`iv_ct') + } +* End of LIML block + } + mat `XhXh'=(1-`kclass2')*`XX'+`kclass2'*`XPZX' + mat `XhXh'=(`XhXh'+`XhXh'')/2 + mat `XhXhinv'=syminv(`XhXh') + mat `lB'=`Xy''*`XhXhinv'*(1-`kclass2') + `kclass2'*`Zy''*`ZZinv'*`XZ''*`XhXhinv' + capture drop `yhat' + qui mat score double `yhat'=`lB' if `touse' + qui gen double `lresid'=`lhs' - `yhat' + qui gen double `lresid2'=`lresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `lresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `lrss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `l_s2'=`lrss'/(`Nprec'-`dofminus') + scalar `s1sigmasq'=`l_s2' + qui gen double `s1resid'=`lresid' + } + else { +* IV resids are 1st-step GMM resids +* In these expressions, ignore scaling of W + mat `ivB' = `XPZy''*`XPZXinv'' + mat `XZWZX'=`XPZX' + mat `XZWZXinv'=`XPZXinv' + mat `XZW'=`XZ'*`ZZinv' + capture drop `yhat' + qui mat score double `yhat' = `ivB' if `touse' + qui gen double `ivresid'=`lhs'-`yhat' + qui gen double `ivresid2'=`ivresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `ivresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `ivrss'=`ysum'[1,1] + scalar `iv_s2'=`ivrss'/(`Nprec'-`dofminus') + scalar `s1sigmasq'=`iv_s2' + qui gen double `s1resid'=`ivresid' + } +* Orthogonality conditions using step 1 residuals + qui mat vecaccum `s1Zu'=`s1resid' `exexog1' `inexog1' /* + */ `wtexp' if `touse', `noconstant' + +******************************************************************************************* +* S covariance matrix of orthogonality conditions +******************************************************************************************* +* If user-supplied S matrix is used, use it + if "`smatrix'" != "" { + local cn : colnames(`ZZ') + matrix `S'=`smatrix' +* Rearrange/select columns to mat IV matrix + capture matsort `S' "`cn'" + local srows = rowsof(`S') + local scols = colsof(`S') + local zcols = colsof(`ZZ') + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-smatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + mat `S' = (`S' + `S'') / 2 + mat `Sinv'=syminv(`S') + local rankS = rowsof(`Sinv') - diag0cnt(`Sinv') + } + +******************************************************************************************* +* Start robust block for robust-HAC S and Sinv +* Do not enter if user supplies smatrix or if CUE + if "`robust'`cluster'" != "" & "`cue'"=="" & "`smatrix'"=="" { +* Optimal weighting matrix +* Block calculates S_0 robust matrix +* _robust has same results as +* mat accum `S'=`exexog1' `inexog1' [iweight=`ivresid'^2] if `touse' +* mat `S' = `S'*1/`Nprec' +* _robust doesn't work properly with TS variables, so must first tsrevar + tsrevar `exexog1' `inexog1' + local TSinsts1 `r(varlist)' +* Create identity matrix with matching col/row names + mat `S'=I(colsof(`s1Zu')) + if "`noconstant'"=="" { + mat colnames `S' = `TSinsts1' "_cons" + mat rownames `S' = `TSinsts1' "_cons" + } + else { + mat colnames `S' = `TSinsts1' + mat rownames `S' = `TSinsts1' + } + _robust `s1resid' `wtexp' if `touse', variance(`S') `clopt' minus(0) + if "`cluster'"!="" { + local N_clust=r(N_clust) + } + mat `S' = `S'*1/`Nprec' +* Above doesn't work properly with iweights (i.e. yield same matrix as fw), +* hence iweight trap at start + if "`kernel'" != "" { +* HAC block for S_1 onwards matrices + tempvar vt1 + qui gen double `vt1' = . + tempname tt tx kw karg ow +* Use insts with TS ops removed and with iota (constant) column + if "`noconstant'"=="" { + local insts1c "`TSinsts1' `iota'" + } + else { + local insts1c "`TSinsts1'" + } + local iv1c_ct : word count `insts1c' +* "tau=0 loop" is S_0 block above for all robust code + local tau 1 +* Spectral windows require looping through all T-1 autocovariances + if "`window'" == "spectral" { + local TAU `T'-1 +di in ye "Computing kernel ..." + } + else { + local TAU `bw' + } + if "`weight'" == "" { +* If no weights specified, define neutral ow variable and weight expression for code below + qui gen byte `ow'=1 + local wtexp `"[fweight=`wvar']"' + } + else { +* pweights and aweights + summ `wvar' if `touse', meanonly + qui gen double `ow' = `wvar'/r(mean) + } + while `tau' <= `TAU' { + capture mat drop `tt' + local i 1 + while `i' <= `iv1c_ct' { + local x : word `i' of `insts1c' +* Add lags defined with TS operators + local Lx "L`tau'.`x'" + local Ls1resid "L`tau'.`s1resid'" + local Low "L`tau'.`ow'" + qui replace `vt1' = `Lx'*`s1resid'* /* + */ `Ls1resid'*`Low'*`ow' if `touse' +* Use capture here because there may be insufficient observations, e.g., if +* the IVs include lags and tau=N-1. _rc will be 2000 in this case. + capture mat vecaccum `tx' = `vt1' `insts1c' /* + */ if `touse', nocons + if _rc == 0 { + mat `tt' = nullmat(`tt') \ `tx' + } + local i = `i'+1 + } +* bw = bandwidth, karg is argument to kernel function, kw is kernel function (weight) + scalar `karg' = `tau'/(`bw') + if "`kernel'" == "Truncated" { + scalar `kw'=1 + } + if "`kernel'" == "Bartlett" { + scalar `kw'=(1-`karg') + } + if "`kernel'" == "Parzen" { + if `karg' <= 0.5 { + scalar `kw' = 1-6*`karg'^2+6*`karg'^3 + } + else { + scalar `kw' = 2*(1-`karg')^3 + } + } + if "`kernel'" == "Tukey-Hanning" { + scalar `kw'=0.5+0.5*cos(_pi*`karg') + } + if "`kernel'" == "Tukey-Hamming" { + scalar `kw'=0.54+0.46*cos(_pi*`karg') + } + if "`kernel'" == "Tent" { + scalar `kw'=2*(1-cos(`tau'*`karg')) / (`karg'^2) + } + if "`kernel'" == "Daniell" { + scalar `kw'=sin(_pi*`karg') / (_pi*`karg') + } + if "`kernel'" == "Quadratic spectral" { + scalar `kw'=25/(12*_pi^2*`karg'^2) /* + */ * ( sin(6*_pi*`karg'/5)/(6*_pi*`karg'/5) /* + */ - cos(6*_pi*`karg'/5) ) + } +* Need -capture-s here because tt may not exist (because of insufficient observations/lags) + capture mat `tt' = (`tt'+`tt'')*`kw'*1/`Nprec' + if _rc == 0 { + mat `S' = `S' + `tt' + } + local tau = `tau'+1 + } + if "`weight'" == "" { +* If no weights specified, remove neutral weight variables + local wtexp "" + } + } +* To give S the right col/row names + mat `S'=`S'+0*diag(`s1Zu') +* Right approach is to adjust S by N/(N-dofminus) if NOT cluster +* because clustered S is already "adjusted" + if "`cluster'"=="" { + mat `S'=`S'*`Nprec'/(`Nprec'-`dofminus') + } +* Stock-Watson robust SEs. Requires `wvar' to be defined above. +* Correspondence between S-W (2006) and code below assumes ivreg2 is called on demeaned data. +* Variable ivar identifies the observational unit. +* wvar will simply be 1 for all observations unless weights are used. +* T_i is number of observations for an observational unit, extended to unbalanced data. +* SW consider only balanced data and denote this as T (constant acros units). +* s1resid is (fixed effects) residuals. SW denote this as u_tilda_hat (p. 2, eqn 4). +* s2 is, for an observational unit i, 1/(T-1) * sum of squared (fixed effects) residuals. +* This is the second term in () in the expression for B_hat in SW eqn 6, p. 3. +* mat opaccum calculates a cross-prod of the form A = X1'e1e1'X1 + X2'e2e2'X2 + ... + Xk'ekek'Xk +* ei is s from above. eiei' is a T_i x T_i matrix filled with s_i2s. Thus the cross-prod becomes +* A = s_1^2*X1'X1 + s_2^2*X2'e2e2'X2 + ... + s_k^2*Xk'Xk +* which is the form of B_hat in SW eqn 6 p. 3, except for the missing 1/N and 1/T +* In unbalanced case, 1/T isn't constant, so must incorporate the 1/T that weights the Xi'Xi into the s, +* hence the second division of s2 by T_i. +* S is SW's Sigma_hat_HR-FE, which is the fixed effects S (=Sigma_hat_HR-XS) minus 1/(T-1)*B)hat +* and then multiplied by (T-1)/(T-2). In SW, T is constant because they cover only the balanced case. +* Here, T varies across units, so we use the harmonic mean of T for T_bar. +* PSD code by CFB based on SW point 10 on p. 6. Guarantees S will be PSD. + if "`sw'`swpsd'" ~= "" { + tempname B s s2 T_i T_inv T_bar s1resid2 + qui gen double `s1resid2'=`s1resid'^2 + sort `ivar' `touse' + qui by `ivar' `touse': gen long `T_i' = sum(`wvar') if `touse' + qui by `ivar' `touse': replace `T_i' = `T_i'[_N] if `touse' & _n<_N + qui gen `T_inv' = 1/`T_i' + sum `T_inv' if `touse', meanonly + scalar `T_bar' = 1/r(mean) + qui by `ivar' `touse': gen double `s2'=sum(`s1resid2'*`wvar') if `touse' + qui by `ivar' `touse': replace `s2'=`s2'[_N] if `touse' & _n<_N + qui replace `s2' = `s2'/(`T_i'-1) + qui replace `s2' = `s2'/`T_i' + qui gen double `s' = sqrt(`s2') + qui mat opaccum `B'=`exexog1' `inexog1' `wtexp' if `touse', /* + */ group(`ivar') opvar(`s') `noconstant' + mat `B' = `B' * 1/`Nprec' + mat `S' = (`T_bar'-1)/(`T_bar'-2)*(`S' - `B'*1/(`T_bar'-1)) + if "`swpsd'" ~= "" { + mat `S'=(`S'+`S'')/2 + tempname X v + mat symeigen `X' `v' = `S' + local ncol = colsof(`S') + forv i=1/`ncol' { + mat `v'[1,`i']= abs(`v'[1,`i']) + } + mat `S' = `X' * diag(`v') * `X'' + } + } + mat `S'=(`S'+`S'')/2 + mat `Sinv'=syminv(`S') + local rankS = rowsof(`Sinv') - diag0cnt(`Sinv') + } + +* End robust-HAC S and Sinv block +************************************************************************************ +* Block for non-robust S and Sinv, including autocorrelation-consistent (AC). +* Do not enter if user supplies smatrix or if cue + + if "`robust'`cluster'`cue'`smatrix'"=="" { +* First do with S_0 (=S for simple IV) +* Step 1 sigma^2 is IV sigma^2 unless b0 or wmatrix provided + mat `S' = `s1sigmasq'*`ZZ'*(1/`Nprec') + + if "`kernel'" != "" { +* AC code for S_1 onwards matrices + tempvar vt1 + qui gen double `vt1' = . + tempname tt tx kw karg ow sigttj +* Use insts with TS ops removed and with iota (constant) column + tsrevar `exexog1' `inexog1' + local TSinsts1 `r(varlist)' + if "`noconstant'"=="" { + local insts1c "`TSinsts1' `iota'" + } + else { + local insts1c "`TSinsts1'" + } + local iv1c_ct : word count `insts1c' +* "tau=0 loop" is S_0 block above + local tau 1 +* Spectral windows require looping through all T-1 autocovariances + if "`window'" == "spectral" { + local TAU `T'-1 +di in ye "Computing kernel ..." + } + else { + local TAU `bw' + } + if "`weight'" == "" { +* If no weights specified, define neutral ow variable and wtexp for code below + qui gen byte `ow'=1 + local wtexp `"[fweight=`wvar']"' + } + else { +* pweights and aweights + summ `wvar' if `touse', meanonly + qui gen double `ow' = `wvar'/r(mean) + } + while `tau' <= `TAU' { + capture mat drop `tt' + local i 1 +* errflag signals problems that make this loop's tt invalid + local errflag 0 +* Additional marksample/markout required so that treatment of MVs is consistent across all IVs + marksample touse2 + markout `touse2' `insts1c' L`tau'.(`insts1c') + local Low "L`tau'.`ow'" + while `i' <= `iv1c_ct' { + local x : word `i' of `insts1c' +* Add lags defined with TS operators + local Lx "L`tau'.`x'" + qui replace `vt1'=. + qui replace `vt1' = `Lx'*`Low'*`ow' if `touse' & `touse2' +* Use capture here because there may be insufficient observations, e.g., if +* the IVs include lags and tau=N-1. _rc will be 2000 in this case. + capture mat vecaccum `tx' = `vt1' `insts1c' /* + */ if `touse', nocons + if _rc == 0 { + mat `tt' = nullmat(`tt') \ `tx' + } + local i = `i'+1 + } + capture mat `tt' = 1/`Nprec' * `tt' + if _rc != 0 { + local errflag = 1 + } + local Ls1resid "L`tau'.`s1resid'" +* Weights belong here as well + tempvar ivLiv + qui gen double `ivLiv' = `s1resid'*`Ls1resid'*`ow'*`Low' if `touse' + qui sum `ivLiv' if `touse', meanonly + scalar `sigttj' = r(sum)/`Nprec' + + capture mat `tt' = `sigttj' * `tt' +* bw = bandwidth, karg is argument to kernel function, kw is kernel function (weight) + scalar `karg' = `tau'/(`bw') + if "`kernel'" == "Truncated" { + scalar `kw'=1 + } + if "`kernel'" == "Bartlett" { + scalar `kw'=(1-`karg') + } + if "`kernel'" == "Parzen" { + if `karg' <= 0.5 { + scalar `kw' = 1-6*`karg'^2+6*`karg'^3 + } + else { + scalar `kw' = 2*(1-`karg')^3 + } + } + if "`kernel'" == "Tukey-Hanning" { + scalar `kw'=0.5+0.5*cos(_pi*`karg') + } + if "`kernel'" == "Tukey-Hamming" { + scalar `kw'=0.54+0.46*cos(_pi*`karg') + } + if "`kernel'" == "Tent" { + scalar `kw'=2*(1-cos(`tau'*`karg')) / (`karg'^2) + } + if "`kernel'" == "Daniell" { + scalar `kw'=sin(_pi*`karg') / (_pi*`karg') + } + if "`kernel'" == "Quadratic spectral" { + scalar `kw'=25/(12*_pi^2*`karg'^2) /* + */ * ( sin(6*_pi*`karg'/5)/(6*_pi*`karg'/5) /* + */ - cos(6*_pi*`karg'/5) ) + } + +* Need -capture-s here because tt may not exist (because of insufficient observations/lags) + capture mat `tt' = (`tt'+`tt'')*`kw' + if _rc != 0 { + local errflag = 1 + } +* Accumulate if tt is valid + if `errflag' == 0 { + capture mat `S' = `S' + `tt' + } + local tau = `tau'+1 + } + if "`weight'" == "" { +* If no weights specified, remove neutral weight variables + local wtexp "" + } + } +* End of AC code +* To give S the right col/row names + mat `S'=`S'+0*diag(`s1Zu') + mat `S'=(`S'+`S'')/2 + mat `Sinv'=syminv(`S') + local rankS = rowsof(`Sinv') - diag0cnt(`Sinv') + } + +* End of non-robust S and Sinv code (including AC) +******************************************************************************************* +* 2nd step and final coefficients +******************************************************************************************* +* User-supplied b0. CUE objective function. + if "`b0'" ~= "" { + mat `B' = `b0' + scalar `rss'=`b0rss' + scalar `sigmasq'=`b0_s2' + mat `W' = `Sinv' + } +******************************************************************************************* +* Block for gmm 2nd step to get coefficients and 2nd step residuals + +* Non-robust IV, LIML, k-class, CUE do not enter + if "`gmm'`robust'`cluster'`kernel'`wmatrix'" != "" & "`cue'"=="" & "`ewmatrix'"=="" { + mat `tempmat'=`XZ'*`Sinv'*`XZ'' + mat `tempmat'=(`tempmat'+`tempmat'')/2 + mat `B1'=syminv(`tempmat') + mat `B1'=(`B1'+`B1'')/2 + mat `gmmB'=(`B1'*`XZ'*`Sinv'*`Zy')' + + capture drop `yhat' + qui mat score double `yhat'=`gmmB' if `touse' + qui gen double `gresid'=`lhs'-`yhat' + qui gen double `gresid2'=`gresid'^2 + qui mat vecaccum `s2Zu'=`gresid' `exexog1' `inexog1' /* + */ `wtexp' if `touse', `noconstant' + } +******************************************************************************************* +* GMM with arbitrary weighting matrix + if ("`wmatrix'"~="") & ("`gmm'"=="") & ("`liml'`kclass'`cue'"=="") & "`b0'"=="" { + mat `B'=`wB' + scalar `rss'=`wbrss' + scalar `sigmasq'=`wb_s2' +* Weighting matrix wmatrix already checked and assigned to macro W + } +******************************************************************************************* +* IV coefficients + if ("`wmatrix'"=="") & ("`gmm'"=="") & ("`liml'`kclass'`cue'"=="") & "`b0'"=="" { + mat `B'=`ivB' + scalar `rss'=`ivrss' + scalar `sigmasq'=`iv_s2' +* IV weighting matrix. By convention, no small-sample adjustment (consistent with S) + mat `W' = `ZZinv'*(`Nprec'-`dofminus')/`iv_s2' + } +******************************************************************************************* +* LIML, k-class coefficients + if "`liml'`kclass'" ~= "" { + mat `B'=`lB' + scalar `rss'=`lrss' + scalar `sigmasq'=`l_s2' +* No weighting matrix. + } +******************************************************************************************* +* Efficient GMM coefficients + if "`gmm'"!="" & ("`liml'`kclass'`cue'"=="") & "`b0'"=="" { + mat `B'=`gmmB' + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `gresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `rss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `sigmasq'=`rss'/(`Nprec'-`dofminus') + mat `W'=`Sinv' + } +******************************************************************************************* +* Var-cov matrix +******************************************************************************************* +* Expressions below multipy by N because we are working with cross-products (XZ) not vcvs (Qxz) +* Efficient GMM: homoskedastic IV, 2-step FEGMM. LIML, k-class, CUE handled separately. +* No robust, cluster, kernel => must be efficient GMM +* GMM option => must be efficient GMM +* b0 => must be efficient GMM +* ewmatrix => must be efficient GMM + tempname rankV + if ("`robust'`cluster'`kernel'`liml'`kclass'`cue'`wmatrix'"=="") /* + */ | ("`gmm'"~="") /* + */ | ("`b0'"~="") /* + */ | ("`ewmatrix'"~="") { + mat `tempmat'=`XZ'*`Sinv'*`XZ'' + mat `tempmat'=(`tempmat'+`tempmat'')/2 + mat `V' = syminv(`tempmat')*`Nprec' + mat `V'=(`V'+`V'')/2 + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + } +* Possibly inefficient GMM: robust of all sorts with no 2nd step. LIML, k-class, CUE handled separately. + else if ("`liml'`kclass'`cue'"=="") { + mat `V'=`XZWZXinv'*`XZW'*`S'* /* + */ `XZW''*`XZWZXinv'*`Nprec' + mat `V'=(`V'+`V'')/2 + mat `tempmat'=syminv(`V') + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + } +* LIML and k-class non-robust + else if ("`liml'`kclass'" ~= "") & ("`robust'`cluster'" == "") { + if "`coviv'"== "" { +* LIML or k-class cov matrix + mat `V'=`sigmasq'*`XhXhinv' + scalar `rankV'=rowsof(`XhXh') - diag0cnt(`XhXh') + } + else { +* IV cov matrix + mat `V'=`sigmasq'*`XPZXinv' + scalar `rankV'=rowsof(`XPZXinv') - diag0cnt(`XPZXinv') + } + mat `V'=(`V'+`V'')/2 + } +* LIML and k-class robust + else if ("`liml'`kclass'" ~= "") & ("`robust'`cluster'" ~= "") { + if "`coviv'"== "" { +* Use LIML or k-class cov matrix + mat `V'=`XhXhinv'*`XZ'*`ZZinv'*`S'*`Nprec'* /* + */ `ZZinv'*`XZ''*`XhXhinv' + } + else { +* Use IV cov matrix + mat `V'=`XPZXinv'*`XZ'*`ZZinv'*`S'*`Nprec'* /* + */ `ZZinv'*`XZ''*`XPZXinv' + } + mat `V'=(`V'+`V'')/2 + mat `tempmat'=syminv(`V') + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + } +* Model df handled here since it depends on rank of V +* CUE handled separately + if "`cue'"=="" { + if "`noconstant'"=="" { + local df_m = `rankV' - 1 + } + else { + local df_m = `rankV' + } + } +* End of VCV block +******************************************************************************** +* Sargan-Hansen-Anderson-Rubin statistics +******************************************************************************************* +* Robust requires using gmm residuals; otherwise use iv residuals. CUE handled separately. +* b0 => return value of CUE objective function. b0 is efficient GMM. + if ("`robust'`cluster'`kernel'" == "") & ("`cue'"=="") & ("`b0'`ewmatrix'"=="") { + mat `uZSinvZu'= (`s1Zu'/`Nprec')*`Sinv'*(`s1Zu''/`Nprec') + scalar `j' = `Nprec'*`uZSinvZu'[1,1] + } + if ("`robust'`cluster'`kernel'" ~= "") & ("`cue'"=="") & ("`b0'`ewmatrix'"=="") { + mat `uZSinvZu'= (`s2Zu'/`Nprec')*`Sinv'*(`s2Zu''/`Nprec') + scalar `j' = `Nprec'*`uZSinvZu'[1,1] + } + if "`b0'`ewmatrix'"~="" { + mat `uZSinvZu'= (`s1Zu'/`Nprec')*`Sinv'*(`s1Zu''/`Nprec') + scalar `j' = `Nprec'*`uZSinvZu'[1,1] + } + if "`liml'" != "" { +* Also save Anderson-Rubin overid stat if LIML +* Note dofminus is required because unlike Sargan and 2-step GMM J, doesn't derive from S + scalar `arubin'=(`Nprec'-`dofminus')*ln(`lambda') + } + +*************************************************************************************** +* Block for cue gmm +******************************************************************************************* + if "`cue'" != "" { +* Set up variables and options as globals + global IV_lhs "`lhs'" + global IV_inexog "`inexog1'" + global IV_endog "`endo1'" + global IV_exexog "`exexog1'" + global IV_wt "`wtexp'" + global IV_opt "`noconstant' `robust' `clopt' `bwopt' `kernopt' `dofmopt'" +* `gmm' not in IV_opt because cue+gmm not allowed +* Initial values use 2-step GMM if robust + if "`robust'`cluster'"~="" { + local init_opt "gmm" + } + tempname b_init temphold + capture _estimates hold `temphold', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + qui `ivreg2_cmd' $IV_lhs $IV_inexog ($IV_endog=$IV_exexog) $IV_wt /* + */ if `touse', $IV_opt `init_opt' noid +* Trap here if just-identified + if e(rankzz)>e(rankxx) { + if "`cueinit'"== "" { + mat `b_init'=e(b) + } + else { + mat `b_init'=`cueinit' + } +* Use ML for numerical optimization + ml model d0 `ivreg2_cmd'_cue ($IV_lhs = $IV_endog $IV_inexog, `noconstant') $IV_wt /* + */ if `touse', maximize init(`b_init') `cueoptions' /* + */ crittype(neg GMM obj function -J) /* + */ collinear nooutput nopreserve missing noscvars + } + else { +di in ye "Equation exactly-identified: CUE and 2-step GMM coincide" + } +* Remove equation number from column names + mat `B'=e(b) + mat colnames `B' = _: +* Last call to get vcv, j, Sinv etc. + qui `ivreg2_cmd' $IV_lhs $IV_inexog ($IV_endog=$IV_exexog) $IV_wt /* + */ if `touse', $IV_opt b0(`B') noid +* Save all results + mat `V'=e(V) + mat `S'=e(S) + mat `Sinv'=syminv(`S') + mat `W'=`Sinv' + + local rankS = e(rankS) + scalar `j'=e(j) + local df_m = e(df_m) + scalar `rankV'=e(rankV) + + if "`cluster'" != "" { + local N_clust=e(N_clust) + } + capture drop `yhat' + qui mat score double `yhat'=`B' if `touse' + qui gen double `gresid'=`lhs'-`yhat' + qui gen double `gresid2'=`gresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `gresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `rss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `sigmasq'=`rss'/(`Nprec'-`dofminus') + + macro drop IV_lhs IV_inexog IV_endog IV_exexog IV_wt IV_opt + capture _estimates unhold `temphold' + } + +******************************************************************************************* +* RSS, counts, dofs, F-stat, small-sample corrections +******************************************************************************************* + scalar `rmse'=sqrt(`sigmasq') + if "`noconstant'"=="" { + scalar `mss'=`yyc' - `rss' + } + else { + scalar `mss'=`yy' - `rss' + } + +* Counts modified to include constant if appropriate + if "`noconstant'"=="" { + local iv1_ct = `iv1_ct' + 1 + local rhs1_ct = `rhs1_ct' + 1 + } +* Correct count of rhs variables accounting for dropped collinear vars +* Count includes constant + + local rhs_ct = rowsof(`XX') - diag0cnt(`XXinv') + + if "`cluster'"=="" { +* Residual dof adjusted for dofminus + local df_r = `N' - `rhs_ct' - `dofminus' + } + else { +* To match Stata, subtract 1 (why 1 and not `rhs_ct' is a mystery) + local df_r = `N_clust' - 1 + } + +* Sargan-Hansen J dof and p-value +* df=0 doesn't guarantee j=0 since can be call to get value of CUE obj fn + local jdf = `iv_ct' - `rhs_ct' + if `jdf' == 0 & "`b0'"=="" { + scalar `j' = 0 + } + else { + scalar `jp' = chiprob(`jdf',`j') + } + if "`liml'"~="" { + scalar `arubinp' = chiprob(`jdf',`arubin') + } + +* Small sample corrections for var-cov matrix. +* If robust, the finite sample correction is N/(N-K), and with no small +* we change this to 1 (a la Davidson & MacKinnon 1993, p. 554, HC0). +* If cluster, the finite sample correction is (N-1)/(N-K)*M/(M-1), and with no small +* we change this to 1 (a la Wooldridge 2002, p. 193), where M=number of clusters. +* In the adj of the V matrix for non-small, we use Nprec instead of N because +* iweights rounds off N. Note that iweights are not allowed with robust +* but we use Nprec anyway to maintain consistency of code. + + if "`small'" != "" { + if "`cluster'"=="" { + matrix `V'=`V'*(`Nprec'-`dofminus')/(`Nprec'-`rhs_ct'-`dofminus') + } + else { + matrix `V'=`V'*(`Nprec'-1)/(`Nprec'-`rhs_ct') /* + */ * `N_clust'/(`N_clust'-1) + } + scalar `sigmasq'=`rss'/(`Nprec'-`rhs_ct'-`dofminus') + scalar `rmse'=sqrt(`sigmasq') + } + + scalar `r2u'=1-`rss'/`yy' + scalar `r2c'=1-`rss'/`yyc' + if "`noconstant'"=="" { + scalar `r2'=`r2c' + scalar `r2_a'=1-(1-`r2')*(`Nprec'-1)/(`Nprec'-`rhs_ct'-`dofminus') + } + else { + scalar `r2'=`r2u' + scalar `r2_a'=1-(1-`r2')*`Nprec'/(`Nprec'-`rhs_ct'-`dofminus') + } + +* Fstat +* To get it to match Stata's, must post separately with dofs and then do F stat by hand +* in case weights generate non-integer obs and dofs +* Create copies so they can be posted + tempname FB FV + mat `FB'=`B' + mat `FV'=`V' + capture ereturn post `FB' `FV' +* If the cov matrix wasn't positive definite, the post fails with error code 506 + local rc = _rc + if `rc' != 506 { + local Frhs1 `rhs1' + capture test `Frhs1' + if "`small'" == "" { + if "`cluster'"=="" { + capture scalar `F' = r(chi2)/`df_m' * `df_r'/(`Nprec'-`dofminus') + } + else { + capture scalar `F' = r(chi2)/`df_m' * /* +* fwl_ct used here so that F-stat matches test stat from regression with no FWL and small + */ (`N_clust'-1)/`N_clust' * (`Nprec'-`rhs_ct'-`fwl_ct')/(`Nprec'-1) + } + } + else { + capture scalar `F' = r(chi2)/`df_m' + } + capture scalar `Fp'=Ftail(`df_m',`df_r',`F') + capture scalar `Fdf2'=`df_r' + } + +* If j==. or vcv wasn't full rank, then vcv problems and F is meaningless + if `j' == . | `rc'==506 { + scalar `F' = . + scalar `Fp' = . + } + +* End of counts, dofs, F-stat, small sample corrections +******************************************************************************************* +* orthog option: C statistic (difference of Sargan statistics) +******************************************************************************************* +* Requires j dof from above + if "`orthog'"!="" { + tempname cj cstat cstatp +* Initialize cstat + scalar `cstat' = 0 +* Each variable listed must be in instrument list. +* To avoid overwriting, use cendo, cinexog1, cexexog, cendo_ct, cex_ct + local cendo1 "`endo1'" + local cinexog1 "`inexog1'" + local cexexog1 "`exexog1'" + local cinsts1 "`insts1'" + local crhs1 "`rhs1'" + local clist1 "`orthog'" + local clist_ct : word count `clist1' + +* Check to see if c-stat vars are in original list of all ivs +* cinexog1 and cexexog1 are after c-stat exog list vars have been removed +* cendo1 is endo1 after included exog being tested has been added + foreach x of local clist1 { + local llex_ct : word count `cexexog1' + Subtract cexexog1 : "`cexexog1'" "`x'" + local cex1_ct : word count `cexexog1' + local ok = `llex_ct' - `cex1_ct' + if (`ok'==0) { +* Not in excluded, check included and add to endog list if it appears + local llin_ct : word count `cinexog1' + Subtract cinexog1 : "`cinexog1'" "`x'" + local cin1_ct : word count `cinexog1' + local ok = `llin_ct' - `cin1_ct' + if (`ok'==0) { +* Not in either list +di in r "Error: `x' listed in orthog() but does not appear as exogenous." + error 198 + } + else { + local cendo1 "`cendo1' `x'" + } + } + } + +* If robust, HAC/AC or GMM (but not LIML or IV), create optimal weighting matrix to pass to ivreg2 +* by extracting the submatrix from the full S and then inverting. +* This guarantees the C stat will be non-negative. See Hayashi (2000), p. 220. +* Calculate C statistic with recursive call to ivreg2 +* Collinearities may cause problems, hence -capture-. +* smatrix works generally, including homoskedastic case with Sargan stat + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + if "`kernel'" != "" { + local bwopt "bw(`bw')" + local kernopt "kernel(`kernel')" + } +* clopt is omitted because it requires calculation of numbers of clusters, which is done +* only when S matrix is calculated + capture `ivreg2_cmd' `lhs' `cinexog1' /* + */ (`cendo1'=`cexexog1') /* + */ if `touse' `wtexp', `noconstant' /* + */ `options' `small' `robust' /* + */ `gmm' `bwopt' `kernopt' `dofmopt' `sw' `swpsd' /* + */ smatrix("`S'") noid + local rc = _rc + if `rc' == 481 { + scalar `cstat' = 0 + local cstatdf = 0 + } + else { + scalar `cj'=e(j) + local cjdf=e(jdf) + } + scalar `cstat' = `j' - `cj' + local cstatdf = `jdf' - `cjdf' + _estimates unhold `ivest' + scalar `cstatp'= chiprob(`cstatdf',`cstat') +* Collinearities may cause C-stat dof to differ from the number of variables in orthog() +* If so, set cstat=0 + if `cstatdf' != `clist_ct' { + scalar `cstat' = 0 + } + } + } +* End of orthog block + +******************************************************************************************* +* Endog option +******************************************************************************************* +* Uses recursive call with orthog + if "`endogtest'"!="" { + tempname estat estatp +* Initialize estat + scalar `estat' = 0 +* Each variable to test must be in endo list. +* To avoid overwriting, use eendo, einexog1, etc. + local eendo1 "`endo1'" + local einexog1 "`inexog1'" + local einsts1 "`insts1'" + local elist1 "`endogtest'" + local elist_ct : word count `elist1' +* Check to see if endog test vars are in original endo1 list of endogeneous variables +* eendo1 and einexog1 are after endog test vars have been removed from endo and added to inexog + foreach x of local elist1 { + local llendo_ct : word count `eendo1' + local eendo1 : list eendo1 - x + local eendo1_ct : word count `eendo1' + local ok = `llendo_ct' - `eendo1_ct' + if (`ok'==0) { +* Not in endogenous list +di in r "Error: `x' listed in endog() but does not appear as endogenous." + error 198 + } + else { + local einexog1 "`einexog1' `x'" + } + } +* Recursive call to ivreg2 using orthog option to obtain endogeneity test statistic +* Collinearities may cause problems, hence -capture-. + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + capture `ivreg2_cmd' `lhs' `einexog1' /* + */ (`eendo1'=`exexog1') if `touse' /* + */ `wtexp', `noconstant' `robust' `clopt' /* + */ `gmm' `liml' `bwopt' `kernopt' /* + */ `small' `dofmopt' `sw' `swpsd' `options' /* + */ orthog(`elist1') noid + local rc = _rc + if `rc' == 481 { + scalar `estat' = 0 + local estatdf = 0 + } + else { + scalar `estat'=e(cstat) + local estatdf=e(cstatdf) + scalar `estatp'=e(cstatp) + } + _estimates unhold `ivest' +* Collinearities may cause endog stat dof to differ from the number of variables in endog() +* If so, set estat=0 + if `estatdf' != `elist_ct' { + scalar `estat' = 0 + } + } +* End of endogeneity test block + } + +******************************************************************************************* +* Rank identification and redundancy block +******************************************************************************************* + if `endo1_ct' > 0 & "`noid'"=="" { + tempname ccmat ccrealev ccimagev cc idstat iddf idp + tempname cdchi2 cdchi2p ccf cdf cdeval cd + mat `ccmat' = `XXinv'*`XPZX' +* Need only upper LHS block, which corresponds to included endogenous + mat `ccmat' = `ccmat'[1..`endo1_ct',1..`endo1_ct'] + mat eigenvalues `ccrealev' `ccimagev' = `ccmat' +* Real eigenvalues are the squared canonical correlations +* The first reported cc is NOT necessarily the smallest (with mat symeigen the smallest is last). +* Sort so smallest is first. + vecsort `ccrealev' + scalar `cc'=`ccrealev'[1,1] +* dof adjustment needed because it doesn't use the adjusted S + scalar `idstat' = -(`Nprec'-`dofminus')*ln(1-`cc') + local iddf = `iv_ct' - (`rhs_ct'-1) + scalar `idp' = chiprob(`iddf',`idstat') +* Cragg-Donald, Anderson etc. + scalar `cd'=`cc'/(1-`cc') +* dofminus used because it doesn't use adjusted S + local ddf = `Nprec'-`iv_ct'-`dofminus' + local ndf = `exex1_ct' + scalar `cdchi2'=`cd'*(`Nprec'-`dofminus') + scalar `cdchi2p' = chiprob(`iddf',`cdchi2') + scalar `cdf' =`cd'*`ddf'/`ndf' + scalar `ccf' =`cc'*`ddf'/`ndf' +* Save evs in CD style + local evcols = colsof(`ccrealev') + mat `cdeval' = J(1,`evcols',.) + forval i=1/`evcols' { + mat `cdeval'[1,`i'] = `ccrealev'[1,`i'] / (1 - `ccrealev'[1,`i']) + } + } + +* LR redundancy test + if `endo1_ct' > 0 & "`redundant'" ~= "" & "`noid'"=="" { +* Obtain Anderson zero rank (totally unidentified) statistic for full set of instruments + tempname unidstat + scalar `unidstat'=0 + forvalues thiscol=1(1)`endo1_ct' { +* dof adjustment needed because it doesn't use the adjusted S + scalar `unidstat'=`unidstat'-(`Nprec'-`dofminus')*ln(1-`ccrealev'[1,`thiscol']) + } +* Diff between this and the stat using the irrelevant excl IVs is chi2 with dof=#endog*#tested + local redlist1 "`redundant'" +* XZcols are the Z columns, so can use for ZZ too + local rXZcols : colnames `XZ' + foreach x of local redlist1 { + local riv_ct_a : word count `rXZcols' + Subtract rXZcols : "`rXZcols'" "`x'" + local riv_ct_b : word count `rXZcols' + if `riv_ct_a' == `riv_ct_b' { +* Not in list +di in r "Error: `x' listed in redundant() but does not appear as excluded instrument." + error 198 + } + } + tempname rXZ rZZ rZZtemp rZZinv rXPZX rccmat rccrealev rccimagev runidmat runidstat + foreach cn of local rXZcols { + mat `rXZ' = nullmat(`rXZ') , `XZ'[1...,"`cn'"] + mat `rZZtemp' = nullmat(`rZZtemp') , `ZZ'[1...,"`cn'"] + } + foreach cn of local rXZcols { + mat `rZZ' = nullmat(`rZZ') \ `rZZtemp'["`cn'",1...] + } + mat `rZZ'=(`rZZ'+`rZZ'')/2 + mat `rZZinv' = syminv(`rZZ') + mat `rXPZX' = `rXZ'*`rZZinv'*`rXZ'' + mat `rccmat' = `XXinv'*`rXPZX' + mat `rccmat' = `rccmat'[1..`endo1_ct',1..`endo1_ct'] + mat eigenvalues `rccrealev' `rccimagev' = `rccmat' + scalar `runidstat'=0 + forvalues thiscol=1(1)`endo1_ct' { +* dof adjustment needed because it doesn't use the adjusted S + scalar `runidstat'=`runidstat'-(`Nprec'-`dofminus')*ln(1-`rccrealev'[1,`thiscol']) + } + tempname redstat redp + local riv_ct = rowsof(`rZZ') - diag0cnt(`rZZinv') + if `riv_ct' < `rhs_ct' { +* Not in list +di in r "Error: specification with redundant() option is unidentified (fails rank condition)" + error 198 + } + local redlist_ct=`iv_ct'-`riv_ct' + scalar `redstat' = `unidstat' - `runidstat' + local reddf = `endo1_ct'*`redlist_ct' + scalar `redp' = chiprob(`reddf',`redstat') + } + +* End of identification stats block + +******************************************************************************************* +* Error-checking block +******************************************************************************************* + +* Check if adequate number of observations + if `N' <= `iv_ct' { +di in r "Error: number of observations must be greater than number of instruments" +di in r " including constant." + error 2001 + } + +* Check if robust VCV matrix is of full rank + if "`gmm'`robust'`cluster'`kernel'" != "" { +* Robust covariance matrix not of full rank means either a singleton dummy or too few +* clusters (in which case the indiv SEs are OK but no F stat or 2-step GMM is possible), +* or there are too many AC/HAC-lags, or the HAC covariance estimator +* isn't positive definite (possible with truncated and Tukey-Hanning kernels) + if `rankS' < `iv_ct' { +* If two-step GMM then exit with error ... + if "`gmm'" != "" { +di in r "Error: estimated covariance matrix of moment conditions not of full rank;" +di in r " cannot calculate optimal weighting matrix for GMM estimation." +di in r "Possible causes:" + if "`cluster'" != "" { +di in r " number of clusters insufficient to calculate optimal weighting matrix" + } + if "`kernel'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in r "-fwl- option may address problem. See help " _c +di in smcl "{help ivreg2}". + error 498 + } +* Estimation isn't two-step GMM so continue but J, F, and C stat (if present) all meaningless +* Must set Sargan-Hansen j = missing so that problem can be reported in output + else { + scalar `j' = . + if "`orthog'"!="" { + scalar `cstat' = . + } + if "`endogtest'"!="" { + scalar `estat' = . + } + } + } + } + +* End of error-checking block +******************************************************************************************** +* Reduced form and first stage regression options +******************************************************************************************* +* Relies on proper count of (non-collinear) IVs generated earlier. +* Note that nocons option + constant in instrument list means first-stage +* regressions are reported with nocons option. First-stage F-stat therefore +* correctly includes the constant as an explanatory variable. + + if "`rf'`saverf'`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) & "`noid'"=="" { +* Reduced form needed for AR first-stage test stat. Also estimated if requested. + tempname archi2 archi2p arf arfp ardf ardf_r sstat sstatp sstatdf + doRF "`lhs'" "`inexog1'" "`exexog1'" /* + */ `touse' `"`wtexp'"' `"`noconstant'"' `"`robust'"' /* + */ `"`clopt'"' `"`bwopt'"' `"`kernopt'"' /* + */ `"`saverfprefix'"' /* + */ "`dofminus'" `"`sw'"' `"`swpsd'"' "`ivreg2_cmd'" + scalar `archi2'=r(archi2) + scalar `archi2p'=r(archi2p) + scalar `arf'=r(arf) + scalar `arfp'=r(arfp) + scalar `ardf'=r(ardf) + scalar `ardf_r'=r(ardf_r) + local rfeq "`r(rfeq)'" +* Drop saved rf results if needed only for first-stage estimations + if "`rf'`saverf'" == "" { + capture estimates drop `rfeq' + } +* Stock-Wright S statistic. Equiv to J LM test of exexog. +* First block handles all cases except no exog regressors; second block uses GMM obj function, +* which works without fwl because b0 is only endog regressors. + + if `inexog1_ct' + `cons_ct' > 0 { + qui `ivreg2_cmd' `lhs' `inexog' (=`exexog') `wtexp' if `touse', /* + */ `noconstant' dofminus(`dofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' `sw' `swpsd' + } + else { + tempname b1 + mat `b1'=J(1,`endo1_ct',0) + matrix colnames `b1' = `endo1' + qui `ivreg2_cmd' `lhs' (`endo1'=`exexog') `wtexp' if `touse', /* + */ b0(`b1') noconstant dofminus(`dofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' `sw' `swpsd' + } + scalar `sstat'=e(j) + scalar `sstatdf'=`ardf' + scalar `sstatp'=chiprob(`sstatdf',`sstat') + } + + if "`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) { + +* Godfrey method of Shea partial R2 uses IV and OLS estimates without robust vcvs: +* Partial R2 = OLS V[d,d] / IV V[d,d] * IV s2 / OLS s2 +* where d,d is the diagonal element corresponding to the endog regressor +* ... but this simplifies to matrices that have already been calculated: +* = XXinv[d,d] / XPZXinv[d,d] + tempname godfrey sols siv + tempname firstmat sheapr2 pr2 pr2F pr2p + mat `godfrey' = J(1,`endo1_ct',0) + mat colnames `godfrey' = `endo1' + mat rownames `godfrey' = "sheapr2" + local i 1 + foreach var of local endo1 { + mat `sols'=`XXinv'["`var'","`var'"] + mat `siv'=`XPZXinv'["`var'","`var'"] + mat `godfrey'[1,`i'] = `sols'[1,1]/`siv'[1,1] + local i = `i'+1 + } + + if `iv1_ct' > `iv_ct' { +di +di in gr "Warning: collinearities detected among instruments" +di in gr "1st stage tests of excluded exogenous variables may be incorrect" + } + + doFirst "`endo1'" "`inexog1'" "`exexog1'" /* + */ `touse' `"`wtexp'"' `"`noconstant'"' `"`robust'"' /* + */ `"`clopt'"' `"`bwopt'"' `"`kernopt'"' /* + */ `"`savefprefix'"' /* + */ `"`dofmopt'"' `"`sw'"' `"`swpsd'"' "`ivreg2_cmd'" + + local firsteqs "`r(firsteqs)'" + capture mat `firstmat'=`godfrey' \ r(firstmat) + if _rc != 0 { +di in ye "Warning: missing values encountered; first stage regression results not saved" + } + } +* End of first-stage regression code +********************************************************************************************** +* Post and display results. +******************************************************************************************* + +* restore data if preserved for fwl option + if "`fwl'" != "" { + restore + } + +* NB: Would like to use -Nprec- in obs() in case weights generate non-integer obs +* but Stata complains. Using -Nprec- with dof() makes no difference - seems to round it + if "`small'"!="" { + local NminusK = `N'-`rhs_ct' + capture ereturn post `B' `V', dep(`depname') obs(`N') esample(`touse') /* + */ dof(`NminusK') + } + else { + capture ereturn post `B' `V', dep(`depname') obs(`N') esample(`touse') + } + local rc = _rc + if `rc' == 504 { +di in red "Error: estimated variance-covariance matrix has missing values" + exit 504 + } + if `rc' == 506 { +di in red "Error: estimated variance-covariance matrix not positive-definite" + exit 506 + } + if `rc' > 0 { +di in red "Error: estimation failed - could not post estimation results" + exit `rc' + } + +* changed next from `endo1' to `endo' 2Aug06 MES + ereturn local instd `endo' + local insts : colnames `S' + local insts : subinstr local insts "_cons" "" + ereturn local insts `insts' + ereturn local inexog `inexog' + ereturn local exexog `exexog' + ereturn local fwl `fwl' + ereturn scalar inexog_ct=`inexog1_ct' + ereturn scalar exexog_ct=`exex1_ct' + ereturn scalar endog_ct =`endo1_ct' + if "`collin'`ecollin'`dups'`fwlcons'" != "" { + ereturn local collin `collin' + ereturn local ecollin `ecollin' + ereturn local dups `dups' + ereturn local instd1 `endo1' + ereturn local inexog1 `inexog1' + ereturn local exexog1 `exexog1' + ereturn local fwl1 `fwl1' + } + + if "`smatrix'" == "" { + ereturn matrix S `S' + } + else { +* Create a copy so posting doesn't zap the original + tempname Scopy + mat `Scopy'=`smatrix' + ereturn matrix S `Scopy' + } + +* No weighting matrix defined for LIML and kclass + if "`wmatrix'"=="" & "`liml'`kclass'"=="" { + ereturn matrix W `W' + } + else if "`liml'`kclass'"=="" { +* Create a copy so posting doesn't zap the original + tempname Wcopy + mat `Wcopy'=`wmatrix' + ereturn matrix W `Wcopy' + } + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + } + + if "`small'"!="" { + ereturn scalar df_r=`df_r' + ereturn local small "small" + } + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + + ereturn scalar df_m=`df_m' + ereturn scalar r2=`r2' + ereturn scalar rmse=`rmse' + ereturn scalar rss=`rss' + ereturn scalar mss=`mss' + ereturn scalar r2_a=`r2_a' + ereturn scalar F=`F' + ereturn scalar Fp=`Fp' + ereturn scalar Fdf2=`Fdf2' + ereturn scalar yy=`yy' + ereturn scalar yyc=`yyc' + ereturn scalar r2u=`r2u' + ereturn scalar r2c=`r2c' + ereturn scalar rankzz=`iv_ct' + ereturn scalar rankxx=`rhs_ct' + if "`gmm'`robust'`cluster'`kernel'" != "" { + ereturn scalar rankS=`rankS' + } + ereturn scalar rankV=`rankV' + ereturn scalar ll = -0.5 * (`Nprec'*ln(2*_pi) + `Nprec'*ln(`rss'/`Nprec') + `Nprec') + +* Always save J. Also save as Sargan if homoskedastic; save A-R if LIML. + ereturn scalar j=`j' + ereturn scalar jdf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar jp=`jp' + } + if ("`robust'`cluster'"=="") { + ereturn scalar sargan=`j' + ereturn scalar sargandf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar sarganp=`jp' + } + } + if "`liml'"!="" { + ereturn scalar arubin=`arubin' + ereturn scalar arubindf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar arubinp=`arubinp' + } + } + + if "`orthog'"!="" { + ereturn scalar cstat=`cstat' + if `cstat'!=0 & `cstat' != . { + ereturn scalar cstatp=`cstatp' + ereturn scalar cstatdf=`cstatdf' + ereturn local clist `clist1' + } + } + + if "`endogtest'"!="" { + ereturn scalar estat=`estat' + if `estat'!=0 & `estat' != . { + ereturn scalar estatp=`estatp' + ereturn scalar estatdf=`estatdf' + ereturn local elist `elist1' + } + } + + if `endo1_ct' > 0 & "`noid'"=="" { + ereturn scalar idstat=`idstat' + ereturn scalar iddf=`iddf' + ereturn scalar idp=`idp' + ereturn scalar cd=`cd' + ereturn scalar cdf=`cdf' + ereturn matrix ccev=`ccrealev' + capture ereturn matrix cdev `cdeval' + } + + if "`redundant'"!="" & "`noid'"=="" { + ereturn scalar redstat=`redstat' + ereturn scalar redp=`redp' + ereturn scalar reddf=`reddf' + ereturn local redlist `redlist1' + } + + if "`first'`ffirst'`savefirst'" != "" & `endo1_ct'>0 & "`noid'"=="" { +* Capture here because firstmat empty if mvs encountered in 1st stage regressions + capture ereturn matrix first `firstmat' + ereturn scalar cdchi2=`cdchi2' + ereturn scalar cdchi2p=`cdchi2p' + ereturn scalar arf=`arf' + ereturn scalar arfp=`arfp' + ereturn scalar archi2=`archi2' + ereturn scalar archi2p=`archi2p' + ereturn scalar ardf=`ardf' + ereturn scalar ardf_r=`ardf_r' + ereturn scalar sstat=`sstat' + ereturn scalar sstatp=`sstatp' + ereturn scalar sstatdf=`sstatdf' + ereturn local firsteqs `firsteqs' + } + if "`rf'`saverf'" != "" & `endo1_ct'>0 { + ereturn local rfeq `rfeq' + } + + ereturn local depvar `lhs' + + if "`liml'"!="" { + ereturn local model "liml" + ereturn scalar kclass=`kclass2' + ereturn scalar lambda=`lambda' + if `fuller' > 0 & `fuller' < . { + ereturn scalar fuller=`fuller' + } + } + else if "`kclass'" != "" { + ereturn local model "kclass" + ereturn scalar kclass=`kclass2' + } + else if "`gmm'`cue'`wmatrix'"=="" { + if "`endo1'" == "" { + ereturn local model "ols" + } + else { + ereturn local model "iv" + } + } + else if "`cue'"~="" { + ereturn local model "cue" + } + else if "`wmatrix'"~="" { + ereturn local model "gmm" + } + else if "`gmm'"~="" { + ereturn local model "gmm2s" + } + else { +* Should never enter here + ereturn local model "unknown" + } + + if "`weight'" != "" { + ereturn local wexp "=`exp'" + ereturn local wtype `weight' + } + ereturn local cmd `ivreg2_cmd' + ereturn local version `lversion' + if "`noconstant'"!="" { + ereturn scalar cons=0 + } + else { + ereturn scalar cons=1 + } + if `fwl_ct'>0 { + ereturn scalar fwlcons=`fwlcons' + } + ereturn local predict "`ivreg2_cmd'_p" + + if "`e(model)'"=="gmm2s" { + local title2 "2-Step GMM estimation" + } + if "`e(model)'"=="gmm" { + local title2 "GMM estimation with user-supplied weighting matrix" + } + if "`e(model)'"=="cue" { + local title2 "CUE estimation" + } + if "`e(model)'"=="ols" { + local title2 "OLS estimation" + } + if "`e(model)'"=="iv" { + local title2 "IV (2SLS) estimation" + } + if "`e(model)'"=="liml" { + local title2 "LIML estimation" + } + if "`e(model)'"=="kclass" { + local title2 "k-class estimation" + } + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" { + local hacsubtitle3 "autocorrelation" + } + if "`e(clustvar)'"!="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + if "`title'"=="" { + ereturn local title "`title1'`title2'" + } + else { + ereturn local title "`title'" + } + if "`subtitle'"~="" { + ereturn local subtitle "`subtitle'" + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`hacsubtitle'"~="" { + ereturn local hacsubtitle "Statistics robust to `hacsubtitle'" + } + if "`sw'"~="" & "`swpsd'"=="" { + ereturn local hacsubtitle "Stock-Watson heteroskedastic-robust statistics (BETA VERSION)" + } + if "`swpsd'"~="" { + ereturn local hacsubtitle "Stock-Watson psd heteroskedastic-robust statistics (BETA VERSION)" + } + } + +******************************************************************************************* +* Display results unless ivreg2 called just to generate stats or nooutput option + + if "`nooutput'" == "" { + if "`savefirst'`saverf'" != "" { + DispStored `"`saverf'"' `"`savefirst'"' `"`ivreg2_cmd'"' + } + if "`rf'" != "" { + DispRF + } + if "`first'" != "" { + DispFirst `"`ivreg2_cmd'"' + } + if "`first'`ffirst'" != "" { + DispFFirst `"`ivreg2_cmd'"' + } + if "`eform'"!="" { + local efopt "eform(`eform')" + } + DispMain `"`noheader'"' `"`plus'"' `"`efopt'"' `"`level'"' `"`nofooter'"' `"`ivreg2_cmd'"' + } + +* Drop first stage estimations unless explicitly saved or if replay + if "`savefirst'" == "" { + local firsteqs "`e(firsteqs)'" + foreach eqname of local firsteqs { + capture estimates drop `eqname' + } + ereturn local firsteqs + } + +* Drop reduced form estimation unless explicitly saved or if replay + if "`saverf'" == "" { + local eqname "`e(rfeq)'" + capture estimates drop `eqname' + ereturn local rfeq + } + +end + +******************************************************************************************* +* SUBROUTINES +******************************************************************************************* + +program define DispMain, eclass + args noheader plus efopt level nofooter helpfile + version 8.2 +* Prepare for problem resulting from rank(S) being insufficient +* Results from insuff number of clusters, too many lags in HAC, +* to calculate robust S matrix, HAC matrix not PD, singleton dummy, +* and indicated by missing value for j stat +* Macro `rprob' is either 1 (problem) or 0 (no problem) + capture local rprob ("`e(j)'"==".") + + if "`noheader'"=="" { + if "`e(title)'" ~= "" { +di in gr _n "`e(title)'" + local tlen=length("`e(title)'") +di in gr "{hline `tlen'}" + } + if "`e(subtitle)'" ~= "" { +di in gr "`e(subtitle)'" + } + if "`e(model)'"=="liml" | "`e(model)'"=="kclass" { +di in gr "k =" %7.5f `e(kclass)' + } + if "`e(model)'"=="liml" { +di in gr "lambda =" %7.5f `e(lambda)' + } + if e(fuller) > 0 & e(fuller) < . { +di in gr "Fuller parameter=" %-5.0f `e(fuller)' + } + if "`e(hacsubtitle)'" ~= "" { +di in gr _n "`e(hacsubtitle)'" + } + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=`e(bw)'" +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + di + if "`e(clustvar)'"!="" { +di in gr "Number of clusters (" "`e(clustvar)'" ") = " in ye %-4.0f e(N_clust) _continue + } + else { +di in gr " " _continue + } +di in gr _col(55) "Number of obs = " in ye %8.0f e(N) + + if "`e(clustvar)'"=="" { + local Fdf2=e(N)-e(rankxx) + } + else { + local Fdf2=e(N_clust)-1 + } + +di in gr _c _col(55) "F(" %3.0f e(df_m) "," %6.0f e(Fdf2) ") = " + if e(F) < 99999 { +di in ye %8.2f e(F) + } + else { +di in ye %8.2e e(F) + } +di in gr _col(55) "Prob > F = " in ye %8.4f e(Fp) + +di in gr "Total (centered) SS = " in ye %12.0g e(yyc) _continue +di in gr _col(55) "Centered R2 = " in ye %8.4f e(r2c) +di in gr "Total (uncentered) SS = " in ye %12.0g e(yy) _continue +di in gr _col(55) "Uncentered R2 = " in ye %8.4f e(r2u) +di in gr "Residual SS = " in ye %12.0g e(rss) _continue +di in gr _col(55) "Root MSE = " in ye %8.4g e(rmse) +di + } + +* Display coefficients etc. +* Unfortunate but necessary hack here: to suppress message about cluster adjustment of +* standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + ereturn local clustvar + ereturn display, `plus' `efopt' level(`level') + ereturn local clustvar `cluster' + +* Display 1st footer with identification stats +* Footer not displayed if -nofooter- option or if pure OLS, i.e., model="ols" and Sargan-Hansen=0 + if ~("`nofooter'"~="" | (e(model)=="ols" & (e(sargan)==0 | e(j)==0))) { + +* Report Anderson rank ID test + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##cancortest:Anderson canon. corr. LR statistic}" +di in gr _c " (underidentification test):" +di in ye _col(71) %8.3f e(idstat) +di in gr _col(52) "Chi-sq(" in ye e(iddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(idp) +* LR IV redundancy statistic + if "`e(redlist)'"!="" { +di in gr "-redundant- option:" +di in smcl _c "{help `helpfile'##redtest:LR IV redundancy test}" +di in gr _c " (redundancy of specified instruments):" +di in ye _col(71) %8.3f e(redstat) +di in gr _col(52) "Chi-sq(" in ye e(reddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(redp) +di in gr "Instruments tested: " _c + Disp `e(redlist)', _col(23) + } + if "`e(vcetype)'"=="Robust" | "`e(kernel)'"~="" { +di in gr "Test statistic(s) not robust" + } +di in smcl in gr "{hline 78}" + } +* Report Cragg-Donald statistic + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##cdtest:Cragg-Donald F statistic}" +di in gr _c " (weak identification test):" +di in ye _col(71) %8.3f e(cdf) +di in gr _c "Stock-Yogo weak ID test critical values:" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmm" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "15% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "25% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "15% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "25% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + if `cdmissing' { + di in gr _col(64) "" + } + else { + if "`e(vcetype)'"=="Robust" | "`e(kernel)'"~="" { +di in gr "Test statistic(s) not robust" + } + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + } + di in smcl in gr "{hline 78}" + } + +* Report either (a) Sargan-Hansen-C stats, or (b) robust covariance matrix problem + if `rprob' == 0 { +* Display overid statistic + if "`e(vcetype)'" == "Robust" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } + else { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } +di in ye _col(71) %8.3f e(j) + if e(rankxx) < e(rankzz) { +di in gr _col(52) "Chi-sq(" in ye e(jdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(jp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + +* Display orthog option: C statistic (difference of Sargan statistics) + if e(cstat) != . { +* If C-stat = 0 then warn, otherwise output + if e(cstat) > 0 { +di in gr "-orthog- option:" + if "`e(vcetype)'" == "Robust" { +di in gr _c "Hansen J statistic (eqn. excluding suspect orthog. conditions): " + } + else { +di in gr _c "Sargan statistic (eqn. excluding suspect orthogonality conditions):" + } +di in ye _col(71) %8.3f e(j)-e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(jdf)-e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f chiprob(e(jdf)-e(cstatdf),e(j)-e(cstat)) +di in smcl _c "{help `helpfile'##ctest:C statistic}" +di in gr _c " (exogeneity/orthogonality of suspect instruments): " +di in ye _col(71) %8.3f e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f e(cstatp) +di in gr "Instruments tested: " _c + Disp `e(clist)', _col(23) + } + if e(cstat) == 0 { +di in gr _n "Collinearity/identification problems in eqn. excl. suspect orthog. conditions:" +di in gr " C statistic not calculated for -orthog- option" + } + } + } + else { +* Problem exists with robust VCV - notify and list possible causes +di in r "Error: estimated covariance matrix of moment conditions not of full rank;" +di in r " overidentification statistic not reported, and standard errors and" +di in r " model tests should be interpreted with caution." +di in r "Possible causes:" + if e(N_clust) < e(rankzz) { +di in r " number of clusters insufficient to calculate robust covariance matrix" + } + if "`e(kernel)'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in smcl _c "{help `helpfile'##fwl:fwl}" +di in r " option may address problem." + } + +* Display endog option: endogeneity test statistic + if e(estat) != . { +* If stat = 0 then warn, otherwise output + if e(estat) > 0 { +di in gr "-endog- option:" +di in smcl _c "{help `helpfile'##endogtest:Endogeneity test}" +di in gr _c " of endogenous regressors: " +di in ye _col(71) %8.3f e(estat) +di in gr _col(52) "Chi-sq(" in ye e(estatdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(estatp) +di in gr "Regressors tested: " _c + Disp `e(elist)', _col(23) + } + if e(estat) == 0 { +di in gr _n "Collinearity/identification problems in restricted equation:" +di in gr " Endogeneity test statistic not calculated for -endog- option" + } + } + + di in smcl in gr "{hline 78}" +* Display AR overid statistic if LIML and not robust + if "`e(model)'" == "liml" & "`e(vcetype)'" ~= "Robust" & "`e(kernel)'" == "" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (LR test of excluded instruments):" + } +di in ye _col(72) %7.3f e(arubin) + if e(rankxx) < e(rankzz) { +di in gr _col(52) "Chi-sq(" in ye e(arubindf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(arubinp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + di in smcl in gr "{hline 78}" + } + } + +* Display 2nd footer with variable lists + if "`nofooter'"=="" { + +* Warn about dropped instruments if any +* (Re-)calculate number of user-supplied instruments + local iv1_ct : word count `e(insts)' + local iv1_ct = `iv1_ct' + `e(cons)' + + if `iv1_ct' > e(rankzz) { +di in gr "Collinearities detected among instruments: " _c +di in gr `iv1_ct'-e(rankzz) " instrument(s) dropped" + } + + if "`e(collin)'`e(dups)'`e(fwlcons)'" != "" { +* If collinearities, duplicates or fwl, abbreviated varlists saved with a 1 at the end + local one "1" + } + if "`e(instd)'" != "" { + di in gr "Instrumented:" _c + Disp `e(instd`one')', _col(23) + } + if "`e(inexog)'" != "" { + di in gr "Included instruments:" _c + Disp `e(inexog`one')', _col(23) + } + if "`e(exexog)'" != "" { + di in gr "Excluded instruments:" _c + Disp `e(exexog`one')', _col(23) + } + if "`e(fwlcons)'" != "" { + if e(fwlcons) { + local fwl "`e(fwl`one')' _cons" + } + else { + local fwl "`e(fwl`one')'" + } +di in smcl _c "{help `helpfile'##fwl:Partialled-out (FWL)}" + di in gr ":" _c + Disp `fwl', _col(23) +di in gr _col(23) "nb: variable counts and small-sample adjustments" +di in gr _col(23) "do not include partialled-out variables." + } + if "`e(dups)'" != "" { + di in gr "Duplicates:" _c + Disp `e(dups)', _col(23) + } + if "`e(collin)'" != "" { + di in gr "Dropped collinear:" _c + Disp `e(collin)', _col(23) + } + if "`e(ecollin)'" != "" { + di in gr "Reclassified as exog:" _c + Disp `e(ecollin)', _col(23) + } + di in smcl in gr "{hline 78}" + } +end + +************************************************************************************** + +program define DispRF + version 8.2 + local eqname "`e(rfeq)'" + local depvar "`e(depvar)'" + local strlen : length local depvar + local strlen = `strlen'+25 +di +di in gr "Reduced-form regression: `e(depvar)'" +di in smcl in gr "{hline `strlen'}" + capture estimates replay `eqname' + if "`eqname'"=="" | _rc != 0 { +di in ye "Unable to display reduced-form regression of `e(depvar)'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + else { + estimates replay `eqname', noheader +di + } +end + +program define DispFirst + version 8.2 + args helpfile + tempname firstmat ivest sheapr2 pr2 F df df_r pvalue + + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display first-stage estimates; macro e(first) is missing" + exit + } +di in gr _newline "First-stage regressions" +di in smcl in gr "{hline 23}" +di + local endo1 : colnames(`firstmat') + local nrvars : word count `endo1' + local firsteqs "`e(firsteqs)'" + local nreqs : word count `firsteqs' + if `nreqs' < `nrvars' { +di in ye "Unable to display all first-stage regressions." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + foreach eqname of local firsteqs { + _estimates hold `ivest' + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to list stored estimation `eqname'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + else { + local vn "`e(depvar)'" +di in gr "First-stage regression of `vn':" + estimates replay `eqname', noheader + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] +di in smcl _c "{help `helpfile'##partialr2:Partial R-squared}" +di in gr " of excluded instruments: " _c +di in ye %8.4f `pr2'[1,1] +di in gr "Test of excluded instruments:" +di in gr " F(" %3.0f `df'[1,1] "," %6.0f `df_r'[1,1] ") = " in ye %8.2f `F'[1,1] +di in gr " Prob > F = " in ye %8.4f `pvalue'[1,1] +di + } + _estimates unhold `ivest' + } +end + +program define DispStored + args saverf savefirst helpfile + version 8.2 + if "`saverf'" != "" { + local eqlist "`e(rfeq)'" + } + if "`savefirst'" != "" { + local eqlist "`eqlist' `e(firsteqs)'" + } + local eqlist : list retokenize eqlist +di in gr _newline "Stored estimation results" +di in smcl in gr "{hline 25}" _c + capture estimates dir `eqlist' + if "`eqlist'" != "" & _rc == 0 { +* Estimates exist and can be listed + estimates dir `eqlist' + } + else if "`eqlist'" != "" & _rc != 0 { +di +di in ye "Unable to list stored estimations." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } +end + +program define DispFFirst + version 8.2 + args helpfile + tempname firstmat sheapr2 pr2 pr2F pr2p + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display summary of first-stage estimates; macro e(first) is missing" + exit + } + local endo : colnames(`firstmat') + local nrvars : word count `endo' + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + local efirsteqs "`e(firsteqs)'" +di +di in gr _newline "Summary results for first-stage regressions" +di in smcl in gr "{hline 43}" +di + +di in gr _c "Variable |" +di in smcl _c _col(15) "{help `helpfile'##partialr2:Shea Partial R2}" +di in gr _c _col(31) "|" +di in smcl _c _col(35) "{help `helpfile'##partialr2:Partial R2}" +di in gr _c _col(49) "|" +di in smcl _c _col(52) "{help `helpfile'##partialr2:F}" +di in gr _c _col(53) "(" +di in ye _col(54) %3.0f `firstmat'[4,1] in gr "," in ye %6.0f `firstmat'[5,1] in gr ") P-value" + local i = 1 + while `i' <= `nrvars' { + local vn : word `i' of `endo' + local vnlen : length local vn + if `vnlen' > 12 { + local vn : piece 1 12 of "`vn'" + } + scalar `sheapr2'=`firstmat'[1,`i'] + scalar `pr2'=`firstmat'[2,`i'] + scalar `pr2F'=`firstmat'[3,`i'] + scalar `pr2p'=`firstmat'[6,`i'] +di in y %-12s "`vn'" _col(13) in gr "|" _col(17) in y %8.4f `sheapr2' _col(31) in gr "|" /* + */ _col(35) in y %8.4f `pr2' _col(49) in gr "|" /* + */ _col(53) in y %8.2f `pr2F' _col(67) %8.4f `pr2p' + local i = `i' + 1 + } +di + if "`robust'`cluster'" != "" { + if "`cluster'" != "" { + local rtype "cluster-robust" + } + else if "`kernel'" != "" { + local rtype "heteroskedasticity and autocorrelation-robust" + } + else { + local rtype "heteroskedasticity-robust" + } + } + else if "`kernel'" != "" { + local rtype "autocorrelation-robust" + } + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: first-stage F-stat `rtype'" +di + } + + tempname iddf idstat idp cdchi2 cdchi2p cdf + scalar `iddf'=e(iddf) + scalar `idstat'=e(idstat) + scalar `idp'=e(idp) + scalar `cdchi2'=e(cdchi2) + scalar `cdchi2p'=e(cdchi2p) + scalar `cdf'=e(cdf) +di in smcl "{help `helpfile'##s_first:Underidentification tests}" +di in gr "Ho: matrix of reduced form coefficients has rank=K-1 (underidentified)" +di in gr "Ha: matrix has rank=K (identified)" +di in gr _col(50) "Chi-sq(" in ye `iddf' in gr ")" _col(65) "P-value" +di in ye "Anderson canon. corr. -N*ln(1-CCEV) LR stat." _col(49) %8.2f `idstat' _col(65) %7.4f `idp' +di in ye "Cragg-Donald N*CDEV statistic" _col(49) %8.2f `cdchi2' _col(65) %7.4f `cdchi2p' + if "`robust'`cluster'`kernel'" != "" & e(endog_ct)==1 { + tempname rchi2 rchi2p +* Robust chi2 recreated from robust F and dofs of non-robust C-D chi2 and F + scalar `rchi2'=`pr2F'*`cdchi2'/`cdf' + scalar `rchi2p'=chiprob(`iddf',`rchi2') +di in ye "Robust chi-square statistic" _col(49) %8.2f `rchi2' _col(65) %7.4f `rchi2p' + } +di +di in smcl "{help `helpfile'##s_first:Weak identification tests}" +di in gr "Ho: equation is weakly identified" +di in ye "Cragg-Donald (N-L)*CDEV/L1 F-statistic" _col(49) %8.2f `cdf' + if "`robust'`cluster'`kernel'" != "" & e(endog_ct)==1 { +di in ye "Robust F-statistic" _col(49) %8.2f `pr2F' + } +di in gr "See main output for Cragg-Donald weak id test critical values" +di + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: Anderson and Cragg-Donald under- and weak identification stats not robust" + if "`robust'`cluster'`kernel'" != "" & e(endog_ct)==1 { +di in gr " Robust identification stats `rtype'" + } + } +di + tempname arf arfp archi2 archi2p ardf ardf_r + tempname sstat sstatp sstatdf +di in smcl "{help `helpfile'##wirobust:Weak-instrument-robust inference}" +di in gr "Tests of joint significance of endogenous regressors B1 in main equation" +di in gr "Ho: B1=0 and overidentifying restrictions are valid" +* Needs to be small so that adjusted dof is reflected in F stat + scalar `arf'=e(arf) + scalar `arfp'=e(arfp) + scalar `archi2'=e(archi2) + scalar `archi2p'=e(archi2p) + scalar `ardf'=e(ardf) + scalar `ardf_r'=e(ardf_r) + scalar `sstat'=e(sstat) + scalar `sstatp'=e(sstatp) + scalar `sstatdf'=e(sstatdf) +di in ye _c "Anderson-Rubin test" +di in gr _col(30) "F(" in ye `ardf' in gr "," in ye `ardf_r' in gr ")=" /* + */ _col(40) in ye %-7.2f `arf' _col(50) in gr "P-val=" in ye %6.4f `arfp' +di in ye _c "Anderson-Rubin test" +di in gr _col(30) "Chi-sq(" in ye `ardf' in gr ")=" /* + */ _col(40) in ye %-7.2f `archi2' _col(50) in gr "P-val=" in ye %6.4f `archi2p' +di in ye _c "Stock-Wright S statistic" +di in gr _col(30) "Chi-sq(" in ye `sstatdf' in gr ")=" /* + */ _col(40) in ye %-7.2f `sstat' _col(50) in gr "P-val=" in ye %6.4f `sstatp' + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: Test statistics `rtype'" + } +di + if "`cluster'" != "" { +di in gr "Number of clusters N_clust = " in ye %10.0f e(N_clust) + } +di in gr "Number of observations N = " in ye %10.0f e(N) +di in gr "Number of regressors K = " in ye %10.0f e(rankxx) +di in gr "Number of instruments L = " in ye %10.0f e(rankzz) +di in gr "Number of excluded instruments L1 = " in ye %10.0f e(ardf) +di + +end + +* Performs first-stage regressions + +program define doFirst, rclass + version 8.2 + args endog /* variable list (including depvar) + */ inexog /* list of included exogenous + */ exexog /* list of excluded exogenous + */ touse /* touse sample + */ weight /* full weight expression w/ [] + */ nocons /* + */ robust /* + */ clopt /* + */ bwopt /* + */ kernopt /* + */ savefprefix /* + */ dofmopt /* + */ sw /* + */ swpsd /* + */ ivreg2_cmd + + tokenize `endog' + tempname statmat statmat1 + local i 1 + while "``i''" != "" { + capture `ivreg2_cmd' ``i'' `inexog' `exexog' `weight' /* + */ if `touse', `nocons' `robust' `clopt' `bwopt' `kernopt' `dofmopt' `sw' `swpsd' small + if _rc ~= 0 { +* First-stage regression failed +di in ye "Unable to estimate first-stage regression of ``i''" + if _rc == 506 { +di in ye " var-cov matrix of first-stage regression of ``i'' not positive-definite" + } + } + else { +* First-stage regression successful +* Check if there is enough room to save results; leave one free. Allow for overwriting. +* Max is 20-1=19 for Stata 9.0 and earlier, 300-1=299 for Stata 9.1+ + if c(stata_version) < 9.1 { + local maxest=19 + } + else { + local maxest=299 + } + local vn "``i''" + local plen : length local savefprefix + local vlen : length local vn + if `plen'+`vlen' > 27 { + local vlen=27-`plen' + local vn : permname `vn', length(`vlen') +* Must create a variable so that permname doesn't reuse it + gen `vn'=0 + local dropvn "`dropvn' `vn'" + } + local eqname "`savefprefix'`vn'" + local eqname : subinstr local eqname "." "_" + qui estimates dir + local est_list "`r(names)'" + Subtract est_list : "`est_list'" "`eqname'" + local est_ct : word count `est_list' + if `est_ct' < `maxest' { + capture est store `eqname', title("First-stage regression: ``i''") + if _rc == 0 { + local firsteqs "`firsteqs' `eqname'" + } + } + else { +di +di in ye "Unable to store first-stage regression of ``i''." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + tempname rssall rssinc pr2 F p + scalar `rssall'=e(rss) + quietly test `exexog' + scalar `F'=r(F) + scalar `p'=r(p) + local df=r(df) + local df_r=r(df_r) +* 1st stage regression without excluded exogenous +* Use regress since need only RSS and handles all cases, including perverse ones (e.g. no regressors) + qui regress ``i'' `inexog' `weight' /* + */ if `touse', `nocons' + */ if `touse', `nocons' `robust' `clopt' `bwopt' `kernopt' `dofmopt' `sw' `swpsd' small + scalar `rssinc'=e(rss) +* NB: uncentered R2 for main regression is 1-rssall/yy; for restricted is 1-rssinc/yy; +* squared semipartial correlation=(rssinc-rssall)/yy=diff of 2 R2s +* Squared partial correlation (="partialled-out R2") + scalar `pr2'=(`rssinc'-`rssall')/`rssinc' +* End of first-stage successful block + } + capture { + mat `statmat1' = (`pr2' \ `F' \ `df' \ `df_r' \ `p') + mat colname `statmat1' = ``i'' + if `i'==1 { + mat `statmat'=`statmat1' + } + else { + mat `statmat' = `statmat' , `statmat1' + } + } + local i = `i' + 1 + } +* Drop any temporarily-created permname variables + if trim("`dropvn'")~="" { + foreach vn of varlist `dropvn' { + capture drop `vn' + } + } + capture mat rowname `statmat' = pr2 F df df_r pvalue + if _rc==0 { + return matrix firstmat `statmat' + } + return local firsteqs "`firsteqs'" +end + +program define doRF, rclass + version 8.2 + args lhs /* + */ inexog /* list of included exogenous + */ exexog /* list of excluded exogenous + */ touse /* touse sample + */ weight /* full weight expression w/ [] + */ nocons /* + */ robust /* + */ clopt /* + */ bwopt /* + */ kernopt /* + */ saverfprefix /* + */ dofminus /* + */ sw /* + */ swpsd /* + */ ivreg2_cmd + +* Anderson-Rubin test of signif of endog regressors (Bo=0) +* In case ivreg2 called with adjusted dof, first stage should adjust dof as well + tempname arf arfp archi2 archi2p ardf ardf_r tempest + capture _estimates hold `tempest' + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } +* Needs to be small so that adjusted dof is reflected in F stat + qui `ivreg2_cmd' `lhs' `inexog' `exexog' `weight' if `touse', /* + */ small `nocons' dofminus(`dofminus') `robust' `clopt' `bwopt' `kernopt' `sw' `swpsd' + qui test `exexog' + scalar `arf'=r(F) + scalar `arfp'=r(p) + scalar `ardf'=r(df) + scalar `ardf_r'=r(df_r) + if "`clopt'"=="" { + scalar `archi2'=`arf'*`ardf'*(e(N)-`dofminus')/(e(N)-e(rankxx)-`dofminus') + } + else { + scalar `archi2'=`arf'*`ardf'*e(N_clust)/r(df_r)*(e(N)-1)/(e(N)-e(rankxx)) + } + scalar `archi2p'=chiprob(`ardf',`archi2') + +* Check if there is enough room to save results; leave one free. Allow for overwriting. +* Max is 20-1=19 for Stata 9.0 and earlier, 300-1=299 for Stata 9.1+ + if c(stata_version) < 9.1 { + local maxest=19 + } + else { + local maxest=299 + } + local vn "`lhs'" + local plen : length local saverfprefix + local vlen : length local lhs + if `plen'+`vlen' > 27 { + local vlen=27-`plen' + local vn : permname `vn', length(`vlen') + } + local eqname "`saverfprefix'`vn'" + local eqname : subinstr local eqname "." "_" + qui estimates dir + local est_list "`r(names)'" + Subtract est_list : "`est_list'" "`eqname'" + local est_ct : word count `est_list' + if `est_ct' < `maxest' { + capture est store `eqname', title("Reduced-form regression: `lhs'") + return local rfeq "`eqname'" + } + else { +di +di in ye "Unable to store reduced-form regression of `lhs'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + _estimates unhold `tempest' + return scalar arf=`arf' + return scalar arfp=`arfp' + return scalar ardf=`ardf' + return scalar ardf_r=`ardf_r' + return scalar archi2=`archi2' + return scalar archi2p=`archi2p' +end + +************************************************************************************** +program define IsStop, sclass + /* sic, must do tests one-at-a-time, + * 0, may be very large */ + version 8.2 + if `"`0'"' == "[" { + sret local stop 1 + exit + } + if `"`0'"' == "," { + sret local stop 1 + exit + } + if `"`0'"' == "if" { + sret local stop 1 + exit + } +* per official ivreg 5.1.3 + if substr(`"`0'"',1,3) == "if(" { + sret local stop 1 + exit + } + if `"`0'"' == "in" { + sret local stop 1 + exit + } + if `"`0'"' == "" { + sret local stop 1 + exit + } + else sret local stop 0 +end + +program define Disp + version 8.2 + syntax [anything] [, _col(integer 15) ] + local len = 80-`_col'+1 + local piece : piece 1 `len' of `"`anything'"' + local i 1 + while "`piece'" != "" { + di in gr _col(`_col') "`first'`piece'" + local i = `i' + 1 + local piece : piece `i' `len' of `"`anything'"' + } + if `i'==1 { + di + } +end + + + +* Remove all tokens in dirt from full +* Returns "cleaned" full list in cleaned + +program define Subtract /* : */ + version 8.2 + args cleaned /* macro name to hold cleaned list + */ colon /* ":" + */ full /* list to be cleaned + */ dirt /* tokens to be cleaned from full */ + + tokenize `dirt' + local i 1 + while "``i''" != "" { + local full : subinstr local full "``i''" "", word all + local i = `i' + 1 + } + + tokenize `full' /* cleans up extra spaces */ + c_local `cleaned' `*' +end + +program define vecsort /* Also clears col/row names */ + version 8.2 + args vmat + tempname hold + mat `vmat'=`vmat'+J(rowsof(`vmat'),colsof(`vmat'),0) + local lastcol = colsof(`vmat') + local i 1 + while `i' < `lastcol' { + if `vmat'[1,`i'] > `vmat'[1,`i'+1] { + scalar `hold' = `vmat'[1,`i'] + mat `vmat'[1,`i'] = `vmat'[1,`i'+1] + mat `vmat'[1,`i'+1] = `hold' + local i = 1 + } + else { + local i = `i' + 1 + } + } +end + +program define matsort + version 8.2 + args vmat names + tempname hold + foreach vn in `names' { + mat `hold'=nullmat(`hold'), `vmat'[1...,"`vn'"] + } + mat `vmat'=`hold' + mat drop `hold' + foreach vn in `names' { + mat `hold'=nullmat(`hold') \ `vmat'["`vn'",1...] + } + mat `vmat'=`hold' +end + +program define cdsy, rclass + version 8.2 + syntax , type(string) k2(integer) nendog(integer) + +* type() can be ivbias5 (k2<=100, nendog<=3) +* ivbias10 (ditto) +* ivbias20 (ditto) +* ivbias30 (ditto) +* ivsize10 (k2<=100, nendog<=2) +* ivsize15 (ditto) +* ivsize20 (ditto) +* ivsize25 (ditto) +* fullrel5 (ditto) +* fullrel10 (ditto) +* fullrel20 (ditto) +* fullrel30 (ditto) +* fullmax5 (ditto) +* fullmax10 (ditto) +* fullmax20 (ditto) +* fullmax30 (ditto) +* limlsize10 (ditto) +* limlsize15 (ditto) +* limlsize20 (ditto) +* limlsize25 (ditto) + + tempname temp cv + +* Initialize critical value as MV + scalar `cv'=. + + if "`type'"=="ivbias5" { + matrix input `temp' = ( /* + */ . , . , . \ /* + */ . , . , . \ /* + */ 13.91 , . , . \ /* + */ 16.85 , 11.04 , . \ /* + */ 18.37 , 13.97 , 9.53 \ /* + */ 19.28 , 15.72 , 12.20 \ /* + */ 19.86 , 16.88 , 13.95 \ /* + */ 20.25 , 17.70 , 15.18 \ /* + */ 20.53 , 18.30 , 16.10 \ /* + */ 20.74 , 18.76 , 16.80 \ /* + */ 20.90 , 19.12 , 17.35 \ /* + */ 21.01 , 19.40 , 17.80 \ /* + */ 21.10 , 19.64 , 18.17 \ /* + */ 21.18 , 19.83 , 18.47 \ /* + */ 21.23 , 19.98 , 18.73 \ /* + */ 21.28 , 20.12 , 18.94 \ /* + */ 21.31 , 20.23 , 19.13 \ /* + */ 21.34 , 20.33 , 19.29 \ /* + */ 21.36 , 20.41 , 19.44 \ /* + */ 21.38 , 20.48 , 19.56 \ /* + */ 21.39 , 20.54 , 19.67 \ /* + */ 21.40 , 20.60 , 19.77 \ /* + */ 21.41 , 20.65 , 19.86 \ /* + */ 21.41 , 20.69 , 19.94 \ /* + */ 21.42 , 20.73 , 20.01 \ /* + */ 21.42 , 20.76 , 20.07 \ /* + */ 21.42 , 20.79 , 20.13 \ /* + */ 21.42 , 20.82 , 20.18 \ /* + */ 21.42 , 20.84 , 20.23 \ /* + */ 21.42 , 20.86 , 20.27 \ /* + */ 21.41 , 20.88 , 20.31 \ /* + */ 21.41 , 20.90 , 20.35 \ /* + */ 21.41 , 20.91 , 20.38 \ /* + */ 21.40 , 20.93 , 20.41 \ /* + */ 21.40 , 20.94 , 20.44 \ /* + */ 21.39 , 20.95 , 20.47 \ /* + */ 21.39 , 20.96 , 20.49 \ /* + */ 21.38 , 20.97 , 20.51 \ /* + */ 21.38 , 20.98 , 20.54 \ /* + */ 21.37 , 20.99 , 20.56 \ /* + */ 21.37 , 20.99 , 20.57 \ /* + */ 21.36 , 21.00 , 20.59 \ /* + */ 21.35 , 21.00 , 20.61 \ /* + */ 21.35 , 21.01 , 20.62 \ /* + */ 21.34 , 21.01 , 20.64 \ /* + */ 21.34 , 21.02 , 20.65 \ /* + */ 21.33 , 21.02 , 20.66 \ /* + */ 21.32 , 21.02 , 20.67 \ /* + */ 21.32 , 21.03 , 20.68 \ /* + */ 21.31 , 21.03 , 20.69 \ /* + */ 21.31 , 21.03 , 20.70 \ /* + */ 21.30 , 21.03 , 20.71 \ /* + */ 21.30 , 21.03 , 20.72 \ /* + */ 21.29 , 21.03 , 20.73 \ /* + */ 21.28 , 21.03 , 20.73 \ /* + */ 21.28 , 21.04 , 20.74 \ /* + */ 21.27 , 21.04 , 20.75 \ /* + */ 21.27 , 21.04 , 20.75 \ /* + */ 21.26 , 21.04 , 20.76 \ /* + */ 21.26 , 21.04 , 20.76 \ /* + */ 21.25 , 21.04 , 20.77 \ /* + */ 21.24 , 21.04 , 20.77 \ /* + */ 21.24 , 21.04 , 20.78 \ /* + */ 21.23 , 21.04 , 20.78 \ /* + */ 21.23 , 21.03 , 20.79 \ /* + */ 21.22 , 21.03 , 20.79 \ /* + */ 21.22 , 21.03 , 20.79 \ /* + */ 21.21 , 21.03 , 20.80 \ /* + */ 21.21 , 21.03 , 20.80 \ /* + */ 21.20 , 21.03 , 20.80 \ /* + */ 21.20 , 21.03 , 20.80 \ /* + */ 21.19 , 21.03 , 20.81 \ /* + */ 21.19 , 21.03 , 20.81 \ /* + */ 21.18 , 21.03 , 20.81 \ /* + */ 21.18 , 21.02 , 20.81 \ /* + */ 21.17 , 21.02 , 20.82 \ /* + */ 21.17 , 21.02 , 20.82 \ /* + */ 21.16 , 21.02 , 20.82 \ /* + */ 21.16 , 21.02 , 20.82 \ /* + */ 21.15 , 21.02 , 20.82 \ /* + */ 21.15 , 21.02 , 20.82 \ /* + */ 21.15 , 21.02 , 20.83 \ /* + */ 21.14 , 21.01 , 20.83 \ /* + */ 21.14 , 21.01 , 20.83 \ /* + */ 21.13 , 21.01 , 20.83 \ /* + */ 21.13 , 21.01 , 20.83 \ /* + */ 21.12 , 21.01 , 20.84 \ /* + */ 21.12 , 21.01 , 20.84 \ /* + */ 21.11 , 21.01 , 20.84 \ /* + */ 21.11 , 21.01 , 20.84 \ /* + */ 21.10 , 21.00 , 20.84 \ /* + */ 21.10 , 21.00 , 20.84 \ /* + */ 21.09 , 21.00 , 20.85 \ /* + */ 21.09 , 21.00 , 20.85 \ /* + */ 21.08 , 21.00 , 20.85 \ /* + */ 21.08 , 21.00 , 20.85 \ /* + */ 21.07 , 21.00 , 20.85 \ /* + */ 21.07 , 20.99 , 20.86 \ /* + */ 21.06 , 20.99 , 20.86 \ /* + */ 21.06 , 20.99 , 20.86 ) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + + + if "`type'"=="ivbias10" { + matrix input `temp' = /* + */ (.,.,. \ /* + */ .,.,. \ /* + */ 9.08,.,. \ /* + */ 10.27,7.56,. \ /* + */ 10.83,8.78,6.61 \ /* + */ 11.12,9.48,7.77 \ /* + */ 11.29,9.92,8.5 \ /* + */ 11.39,10.22,9.01 \ /* + */ 11.46,10.43,9.37 \ /* + */ 11.49,10.58,9.64 \ /* + */ 11.51,10.69,9.85 \ /* + */ 11.52,10.78,10.01 \ /* + */ 11.52,10.84,10.14 \ /* + */ 11.52,10.89,10.25 \ /* + */ 11.51,10.93,10.33 \ /* + */ 11.5,10.96,10.41 \ /* + */ 11.49,10.99,10.47 \ /* + */ 11.48,11,10.52 \ /* + */ 11.46,11.02,10.56 \ /* + */ 11.45,11.03,10.6 \ /* + */ 11.44,11.04,10.63 \ /* + */ 11.42,11.05,10.65 \ /* + */ 11.41,11.05,10.68 \ /* + */ 11.4,11.05,10.7 \ /* + */ 11.38,11.06,10.71 \ /* + */ 11.37,11.06,10.73 \ /* + */ 11.36,11.06,10.74 \ /* + */ 11.34,11.05,10.75 \ /* + */ 11.33,11.05,10.76 \ /* + */ 11.32,11.05,10.77 \ /* + */ 11.3,11.05,10.78 \ /* + */ 11.29,11.05,10.79 \ /* + */ 11.28,11.04,10.79 \ /* + */ 11.27,11.04,10.8 \ /* + */ 11.26,11.04,10.8 \ /* + */ 11.25,11.03,10.8 \ /* + */ 11.24,11.03,10.81 \ /* + */ 11.23,11.02,10.81 \ /* + */ 11.22,11.02,10.81 \ /* + */ 11.21,11.02,10.81 \ /* + */ 11.2,11.01,10.81 \ /* + */ 11.19,11.01,10.81 \ /* + */ 11.18,11,10.81 \ /* + */ 11.17,11,10.81 \ /* + */ 11.16,10.99,10.81 \ /* + */ 11.15,10.99,10.81 \ /* + */ 11.14,10.98,10.81 \ /* + */ 11.13,10.98,10.81 \ /* + */ 11.13,10.98,10.81 \ /* + */ 11.12,10.97,10.81 \ /* + */ 11.11,10.97,10.81 \ /* + */ 11.1,10.96,10.81 \ /* + */ 11.1,10.96,10.81 \ /* + */ 11.09,10.95,10.81 \ /* + */ 11.08,10.95,10.81 \ /* + */ 11.07,10.94,10.8 \ /* + */ 11.07,10.94,10.8 \ /* + */ 11.06,10.94,10.8 \ /* + */ 11.05,10.93,10.8 \ /* + */ 11.05,10.93,10.8 \ /* + */ 11.04,10.92,10.8 \ /* + */ 11.03,10.92,10.79 \ /* + */ 11.03,10.92,10.79 \ /* + */ 11.02,10.91,10.79 \ /* + */ 11.02,10.91,10.79 \ /* + */ 11.01,10.9,10.79 \ /* + */ 11,10.9,10.79 \ /* + */ 11,10.9,10.78 \ /* + */ 10.99,10.89,10.78 \ /* + */ 10.99,10.89,10.78 \ /* + */ 10.98,10.89,10.78 \ /* + */ 10.98,10.88,10.78 \ /* + */ 10.97,10.88,10.77 \ /* + */ 10.97,10.88,10.77 \ /* + */ 10.96,10.87,10.77 \ /* + */ 10.96,10.87,10.77 \ /* + */ 10.95,10.86,10.77 \ /* + */ 10.95,10.86,10.76 \ /* + */ 10.94,10.86,10.76 \ /* + */ 10.94,10.85,10.76 \ /* + */ 10.93,10.85,10.76 \ /* + */ 10.93,10.85,10.76 \ /* + */ 10.92,10.84,10.75 \ /* + */ 10.92,10.84,10.75 \ /* + */ 10.91,10.84,10.75 \ /* + */ 10.91,10.84,10.75 \ /* + */ 10.91,10.83,10.75 \ /* + */ 10.9,10.83,10.74 \ /* + */ 10.9,10.83,10.74 \ /* + */ 10.89,10.82,10.74 \ /* + */ 10.89,10.82,10.74 \ /* + */ 10.89,10.82,10.74 \ /* + */ 10.88,10.81,10.74 \ /* + */ 10.88,10.81,10.73 \ /* + */ 10.87,10.81,10.73 \ /* + */ 10.87,10.81,10.73 \ /* + */ 10.87,10.8,10.73 \ /* + */ 10.86,10.8,10.73 \ /* + */ 10.86,10.8,10.72 \ /* + */ 10.86,10.8,10.72) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivbias20" { + matrix input `temp' = ( /* + */ . , . , . \ /* + */ . , . , . \ /* + */ 6.46 , . , . \ /* + */ 6.71 , 5.57 , . \ /* + */ 6.77 , 5.91 , 4.99 \ /* + */ 6.76 , 6.08 , 5.35 \ /* + */ 6.73 , 6.16 , 5.56 \ /* + */ 6.69 , 6.20 , 5.69 \ /* + */ 6.65 , 6.22 , 5.78 \ /* + */ 6.61 , 6.23 , 5.83 \ /* + */ 6.56 , 6.23 , 5.87 \ /* + */ 6.53 , 6.22 , 5.90 \ /* + */ 6.49 , 6.21 , 5.92 \ /* + */ 6.45 , 6.20 , 5.93 \ /* + */ 6.42 , 6.19 , 5.94 \ /* + */ 6.39 , 6.17 , 5.94 \ /* + */ 6.36 , 6.16 , 5.94 \ /* + */ 6.33 , 6.14 , 5.94 \ /* + */ 6.31 , 6.13 , 5.94 \ /* + */ 6.28 , 6.11 , 5.93 \ /* + */ 6.26 , 6.10 , 5.93 \ /* + */ 6.24 , 6.08 , 5.92 \ /* + */ 6.22 , 6.07 , 5.92 \ /* + */ 6.20 , 6.06 , 5.91 \ /* + */ 6.18 , 6.05 , 5.90 \ /* + */ 6.16 , 6.03 , 5.90 \ /* + */ 6.14 , 6.02 , 5.89 \ /* + */ 6.13 , 6.01 , 5.88 \ /* + */ 6.11 , 6.00 , 5.88 \ /* + */ 6.09 , 5.99 , 5.87 \ /* + */ 6.08 , 5.98 , 5.87 \ /* + */ 6.07 , 5.97 , 5.86 \ /* + */ 6.05 , 5.96 , 5.85 \ /* + */ 6.04 , 5.95 , 5.85 \ /* + */ 6.03 , 5.94 , 5.84 \ /* + */ 6.01 , 5.93 , 5.83 \ /* + */ 6.00 , 5.92 , 5.83 \ /* + */ 5.99 , 5.91 , 5.82 \ /* + */ 5.98 , 5.90 , 5.82 \ /* + */ 5.97 , 5.89 , 5.81 \ /* + */ 5.96 , 5.89 , 5.80 \ /* + */ 5.95 , 5.88 , 5.80 \ /* + */ 5.94 , 5.87 , 5.79 \ /* + */ 5.93 , 5.86 , 5.79 \ /* + */ 5.92 , 5.86 , 5.78 \ /* + */ 5.91 , 5.85 , 5.78 \ /* + */ 5.91 , 5.84 , 5.77 \ /* + */ 5.90 , 5.83 , 5.77 \ /* + */ 5.89 , 5.83 , 5.76 \ /* + */ 5.88 , 5.82 , 5.76 \ /* + */ 5.87 , 5.82 , 5.75 \ /* + */ 5.87 , 5.81 , 5.75 \ /* + */ 5.86 , 5.80 , 5.74 \ /* + */ 5.85 , 5.80 , 5.74 \ /* + */ 5.85 , 5.79 , 5.73 \ /* + */ 5.84 , 5.79 , 5.73 \ /* + */ 5.83 , 5.78 , 5.72 \ /* + */ 5.83 , 5.78 , 5.72 \ /* + */ 5.82 , 5.77 , 5.72 \ /* + */ 5.81 , 5.77 , 5.71 \ /* + */ 5.81 , 5.76 , 5.71 \ /* + */ 5.80 , 5.76 , 5.70 \ /* + */ 5.80 , 5.75 , 5.70 \ /* + */ 5.79 , 5.75 , 5.70 \ /* + */ 5.78 , 5.74 , 5.69 \ /* + */ 5.78 , 5.74 , 5.69 \ /* + */ 5.77 , 5.73 , 5.68 \ /* + */ 5.77 , 5.73 , 5.68 \ /* + */ 5.76 , 5.72 , 5.68 \ /* + */ 5.76 , 5.72 , 5.67 \ /* + */ 5.75 , 5.72 , 5.67 \ /* + */ 5.75 , 5.71 , 5.67 \ /* + */ 5.75 , 5.71 , 5.66 \ /* + */ 5.74 , 5.70 , 5.66 \ /* + */ 5.74 , 5.70 , 5.66 \ /* + */ 5.73 , 5.70 , 5.65 \ /* + */ 5.73 , 5.69 , 5.65 \ /* + */ 5.72 , 5.69 , 5.65 \ /* + */ 5.72 , 5.68 , 5.65 \ /* + */ 5.71 , 5.68 , 5.64 \ /* + */ 5.71 , 5.68 , 5.64 \ /* + */ 5.71 , 5.67 , 5.64 \ /* + */ 5.70 , 5.67 , 5.63 \ /* + */ 5.70 , 5.67 , 5.63 \ /* + */ 5.70 , 5.66 , 5.63 \ /* + */ 5.69 , 5.66 , 5.62 \ /* + */ 5.69 , 5.66 , 5.62 \ /* + */ 5.68 , 5.65 , 5.62 \ /* + */ 5.68 , 5.65 , 5.62 \ /* + */ 5.68 , 5.65 , 5.61 \ /* + */ 5.67 , 5.65 , 5.61 \ /* + */ 5.67 , 5.64 , 5.61 \ /* + */ 5.67 , 5.64 , 5.61 \ /* + */ 5.66 , 5.64 , 5.60 \ /* + */ 5.66 , 5.63 , 5.60 \ /* + */ 5.66 , 5.63 , 5.60 \ /* + */ 5.65 , 5.63 , 5.60 \ /* + */ 5.65 , 5.63 , 5.59 \ /* + */ 5.65 , 5.62 , 5.59 \ /* + */ 5.65 , 5.62 , 5.59 ) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias30" { + matrix input `temp' = ( /* + */ . , . , . \ /* + */ . , . , . \ /* + */ 5.39 , . , . \ /* + */ 5.34 , 4.73 , . \ /* + */ 5.25 , 4.79 , 4.30 \ /* + */ 5.15 , 4.78 , 4.40 \ /* + */ 5.07 , 4.76 , 4.44 \ /* + */ 4.99 , 4.73 , 4.46 \ /* + */ 4.92 , 4.69 , 4.46 \ /* + */ 4.86 , 4.66 , 4.45 \ /* + */ 4.80 , 4.62 , 4.44 \ /* + */ 4.75 , 4.59 , 4.42 \ /* + */ 4.71 , 4.56 , 4.41 \ /* + */ 4.67 , 4.53 , 4.39 \ /* + */ 4.63 , 4.50 , 4.37 \ /* + */ 4.59 , 4.48 , 4.36 \ /* + */ 4.56 , 4.45 , 4.34 \ /* + */ 4.53 , 4.43 , 4.32 \ /* + */ 4.51 , 4.41 , 4.31 \ /* + */ 4.48 , 4.39 , 4.29 \ /* + */ 4.46 , 4.37 , 4.28 \ /* + */ 4.43 , 4.35 , 4.27 \ /* + */ 4.41 , 4.33 , 4.25 \ /* + */ 4.39 , 4.32 , 4.24 \ /* + */ 4.37 , 4.30 , 4.23 \ /* + */ 4.35 , 4.29 , 4.21 \ /* + */ 4.34 , 4.27 , 4.20 \ /* + */ 4.32 , 4.26 , 4.19 \ /* + */ 4.31 , 4.24 , 4.18 \ /* + */ 4.29 , 4.23 , 4.17 \ /* + */ 4.28 , 4.22 , 4.16 \ /* + */ 4.26 , 4.21 , 4.15 \ /* + */ 4.25 , 4.20 , 4.14 \ /* + */ 4.24 , 4.19 , 4.13 \ /* + */ 4.23 , 4.18 , 4.13 \ /* + */ 4.22 , 4.17 , 4.12 \ /* + */ 4.20 , 4.16 , 4.11 \ /* + */ 4.19 , 4.15 , 4.10 \ /* + */ 4.18 , 4.14 , 4.09 \ /* + */ 4.17 , 4.13 , 4.09 \ /* + */ 4.16 , 4.12 , 4.08 \ /* + */ 4.15 , 4.11 , 4.07 \ /* + */ 4.15 , 4.11 , 4.07 \ /* + */ 4.14 , 4.10 , 4.06 \ /* + */ 4.13 , 4.09 , 4.05 \ /* + */ 4.12 , 4.08 , 4.05 \ /* + */ 4.11 , 4.08 , 4.04 \ /* + */ 4.11 , 4.07 , 4.03 \ /* + */ 4.10 , 4.06 , 4.03 \ /* + */ 4.09 , 4.06 , 4.02 \ /* + */ 4.08 , 4.05 , 4.02 \ /* + */ 4.08 , 4.05 , 4.01 \ /* + */ 4.07 , 4.04 , 4.01 \ /* + */ 4.06 , 4.03 , 4.00 \ /* + */ 4.06 , 4.03 , 4.00 \ /* + */ 4.05 , 4.02 , 3.99 \ /* + */ 4.05 , 4.02 , 3.99 \ /* + */ 4.04 , 4.01 , 3.98 \ /* + */ 4.04 , 4.01 , 3.98 \ /* + */ 4.03 , 4.00 , 3.97 \ /* + */ 4.02 , 4.00 , 3.97 \ /* + */ 4.02 , 3.99 , 3.96 \ /* + */ 4.01 , 3.99 , 3.96 \ /* + */ 4.01 , 3.98 , 3.96 \ /* + */ 4.00 , 3.98 , 3.95 \ /* + */ 4.00 , 3.97 , 3.95 \ /* + */ 3.99 , 3.97 , 3.94 \ /* + */ 3.99 , 3.97 , 3.94 \ /* + */ 3.99 , 3.96 , 3.94 \ /* + */ 3.98 , 3.96 , 3.93 \ /* + */ 3.98 , 3.95 , 3.93 \ /* + */ 3.97 , 3.95 , 3.93 \ /* + */ 3.97 , 3.95 , 3.92 \ /* + */ 3.96 , 3.94 , 3.92 \ /* + */ 3.96 , 3.94 , 3.92 \ /* + */ 3.96 , 3.93 , 3.91 \ /* + */ 3.95 , 3.93 , 3.91 \ /* + */ 3.95 , 3.93 , 3.91 \ /* + */ 3.95 , 3.92 , 3.90 \ /* + */ 3.94 , 3.92 , 3.90 \ /* + */ 3.94 , 3.92 , 3.90 \ /* + */ 3.93 , 3.91 , 3.89 \ /* + */ 3.93 , 3.91 , 3.89 \ /* + */ 3.93 , 3.91 , 3.89 \ /* + */ 3.92 , 3.91 , 3.89 \ /* + */ 3.92 , 3.90 , 3.88 \ /* + */ 3.92 , 3.90 , 3.88 \ /* + */ 3.91 , 3.90 , 3.88 \ /* + */ 3.91 , 3.89 , 3.87 \ /* + */ 3.91 , 3.89 , 3.87 \ /* + */ 3.91 , 3.89 , 3.87 \ /* + */ 3.90 , 3.89 , 3.87 \ /* + */ 3.90 , 3.88 , 3.86 \ /* + */ 3.90 , 3.88 , 3.86 \ /* + */ 3.89 , 3.88 , 3.86 \ /* + */ 3.89 , 3.87 , 3.86 \ /* + */ 3.89 , 3.87 , 3.85 \ /* + */ 3.89 , 3.87 , 3.85 \ /* + */ 3.88 , 3.87 , 3.85 \ /* + */ 3.88 , 3.86 , 3.85 ) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivsize10" { + matrix input `temp' = /* + */ (16.38,. \ /* + */ 19.93,7.03 \ /* + */ 22.3,13.43 \ /* + */ 24.58,16.87 \ /* + */ 26.87,19.45 \ /* + */ 29.18,21.68 \ /* + */ 31.5,23.72 \ /* + */ 33.84,25.64 \ /* + */ 36.19,27.51 \ /* + */ 38.54,29.32 \ /* + */ 40.9,31.11 \ /* + */ 43.27,32.88 \ /* + */ 45.64,34.62 \ /* + */ 48.01,36.36 \ /* + */ 50.39,38.08 \ /* + */ 52.77,39.8 \ /* + */ 55.15,41.51 \ /* + */ 57.53,43.22 \ /* + */ 59.92,44.92 \ /* + */ 62.3,46.62 \ /* + */ 64.69,48.31 \ /* + */ 67.07,50.01 \ /* + */ 69.46,51.7 \ /* + */ 71.85,53.39 \ /* + */ 74.24,55.07 \ /* + */ 76.62,56.76 \ /* + */ 79.01,58.45 \ /* + */ 81.4,60.13 \ /* + */ 83.79,61.82 \ /* + */ 86.17,63.51 \ /* + */ 88.56,65.19 \ /* + */ 90.95,66.88 \ /* + */ 93.33,68.56 \ /* + */ 95.72,70.25 \ /* + */ 98.11,71.94 \ /* + */ 100.5,73.62 \ /* + */ 102.88,75.31 \ /* + */ 105.27,76.99 \ /* + */ 107.66,78.68 \ /* + */ 110.04,80.37 \ /* + */ 112.43,82.05 \ /* + */ 114.82,83.74 \ /* + */ 117.21,85.42 \ /* + */ 119.59,87.11 \ /* + */ 121.98,88.8 \ /* + */ 124.37,90.48 \ /* + */ 126.75,92.17 \ /* + */ 129.14,93.85 \ /* + */ 131.53,95.54 \ /* + */ 133.92,97.23 \ /* + */ 136.3,98.91 \ /* + */ 138.69,100.6 \ /* + */ 141.08,102.29 \ /* + */ 143.47,103.97 \ /* + */ 145.85,105.66 \ /* + */ 148.24,107.34 \ /* + */ 150.63,109.03 \ /* + */ 153.01,110.72 \ /* + */ 155.4,112.4 \ /* + */ 157.79,114.09 \ /* + */ 160.18,115.77 \ /* + */ 162.56,117.46 \ /* + */ 164.95,119.15 \ /* + */ 167.34,120.83 \ /* + */ 169.72,122.52 \ /* + */ 172.11,124.2 \ /* + */ 174.5,125.89 \ /* + */ 176.89,127.58 \ /* + */ 179.27,129.26 \ /* + */ 181.66,130.95 \ /* + */ 184.05,132.63 \ /* + */ 186.44,134.32 \ /* + */ 188.82,136.01 \ /* + */ 191.21,137.69 \ /* + */ 193.6,139.38 \ /* + */ 195.98,141.07 \ /* + */ 198.37,142.75 \ /* + */ 200.76,144.44 \ /* + */ 203.15,146.12 \ /* + */ 205.53,147.81 \ /* + */ 207.92,149.5 \ /* + */ 210.31,151.18 \ /* + */ 212.69,152.87 \ /* + */ 215.08,154.55 \ /* + */ 217.47,156.24 \ /* + */ 219.86,157.93 \ /* + */ 222.24,159.61 \ /* + */ 224.63,161.3 \ /* + */ 227.02,162.98 \ /* + */ 229.41,164.67 \ /* + */ 231.79,166.36 \ /* + */ 234.18,168.04 \ /* + */ 236.57,169.73 \ /* + */ 238.95,171.41 \ /* + */ 241.34,173.1 \ /* + */ 243.73,174.79 \ /* + */ 246.12,176.47 \ /* + */ 248.5,178.16 \ /* + */ 250.89,179.84 \ /* + */ 253.28,181.53) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize15" { + matrix input `temp' = ( /* + */ 8.96 , . \ /* + */ 11.59 , 4.58 \ /* + */ 12.83 , 8.18 \ /* + */ 13.96 , 9.93 \ /* + */ 15.09 , 11.22 \ /* + */ 16.23 , 12.33 \ /* + */ 17.38 , 13.34 \ /* + */ 18.54 , 14.31 \ /* + */ 19.71 , 15.24 \ /* + */ 20.88 , 16.16 \ /* + */ 22.06 , 17.06 \ /* + */ 23.24 , 17.95 \ /* + */ 24.42 , 18.84 \ /* + */ 25.61 , 19.72 \ /* + */ 26.80 , 20.60 \ /* + */ 27.99 , 21.48 \ /* + */ 29.19 , 22.35 \ /* + */ 30.38 , 23.22 \ /* + */ 31.58 , 24.09 \ /* + */ 32.77 , 24.96 \ /* + */ 33.97 , 25.82 \ /* + */ 35.17 , 26.69 \ /* + */ 36.37 , 27.56 \ /* + */ 37.57 , 28.42 \ /* + */ 38.77 , 29.29 \ /* + */ 39.97 , 30.15 \ /* + */ 41.17 , 31.02 \ /* + */ 42.37 , 31.88 \ /* + */ 43.57 , 32.74 \ /* + */ 44.78 , 33.61 \ /* + */ 45.98 , 34.47 \ /* + */ 47.18 , 35.33 \ /* + */ 48.38 , 36.19 \ /* + */ 49.59 , 37.06 \ /* + */ 50.79 , 37.92 \ /* + */ 51.99 , 38.78 \ /* + */ 53.19 , 39.64 \ /* + */ 54.40 , 40.50 \ /* + */ 55.60 , 41.37 \ /* + */ 56.80 , 42.23 \ /* + */ 58.01 , 43.09 \ /* + */ 59.21 , 43.95 \ /* + */ 60.41 , 44.81 \ /* + */ 61.61 , 45.68 \ /* + */ 62.82 , 46.54 \ /* + */ 64.02 , 47.40 \ /* + */ 65.22 , 48.26 \ /* + */ 66.42 , 49.12 \ /* + */ 67.63 , 49.99 \ /* + */ 68.83 , 50.85 \ /* + */ 70.03 , 51.71 \ /* + */ 71.24 , 52.57 \ /* + */ 72.44 , 53.43 \ /* + */ 73.64 , 54.30 \ /* + */ 74.84 , 55.16 \ /* + */ 76.05 , 56.02 \ /* + */ 77.25 , 56.88 \ /* + */ 78.45 , 57.74 \ /* + */ 79.66 , 58.61 \ /* + */ 80.86 , 59.47 \ /* + */ 82.06 , 60.33 \ /* + */ 83.26 , 61.19 \ /* + */ 84.47 , 62.05 \ /* + */ 85.67 , 62.92 \ /* + */ 86.87 , 63.78 \ /* + */ 88.07 , 64.64 \ /* + */ 89.28 , 65.50 \ /* + */ 90.48 , 66.36 \ /* + */ 91.68 , 67.22 \ /* + */ 92.89 , 68.09 \ /* + */ 94.09 , 68.95 \ /* + */ 95.29 , 69.81 \ /* + */ 96.49 , 70.67 \ /* + */ 97.70 , 71.53 \ /* + */ 98.90 , 72.40 \ /* + */ 100.10 , 73.26 \ /* + */ 101.30 , 74.12 \ /* + */ 102.51 , 74.98 \ /* + */ 103.71 , 75.84 \ /* + */ 104.91 , 76.71 \ /* + */ 106.12 , 77.57 \ /* + */ 107.32 , 78.43 \ /* + */ 108.52 , 79.29 \ /* + */ 109.72 , 80.15 \ /* + */ 110.93 , 81.02 \ /* + */ 112.13 , 81.88 \ /* + */ 113.33 , 82.74 \ /* + */ 114.53 , 83.60 \ /* + */ 115.74 , 84.46 \ /* + */ 116.94 , 85.33 \ /* + */ 118.14 , 86.19 \ /* + */ 119.35 , 87.05 \ /* + */ 120.55 , 87.91 \ /* + */ 121.75 , 88.77 \ /* + */ 122.95 , 89.64 \ /* + */ 124.16 , 90.50 \ /* + */ 125.36 , 91.36 \ /* + */ 126.56 , 92.22 \ /* + */ 127.76 , 93.08 \ /* + */ 128.97 , 93.95 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize20" { + matrix input `temp' = ( /* + */ 6.66 , . \ /* + */ 8.75 , 3.95 \ /* + */ 9.54 , 6.40 \ /* + */ 10.26 , 7.54 \ /* + */ 10.98 , 8.38 \ /* + */ 11.72 , 9.10 \ /* + */ 12.48 , 9.77 \ /* + */ 13.24 , 10.41 \ /* + */ 14.01 , 11.03 \ /* + */ 14.78 , 11.65 \ /* + */ 15.56 , 12.25 \ /* + */ 16.35 , 12.86 \ /* + */ 17.14 , 13.45 \ /* + */ 17.93 , 14.05 \ /* + */ 18.72 , 14.65 \ /* + */ 19.51 , 15.24 \ /* + */ 20.31 , 15.83 \ /* + */ 21.10 , 16.42 \ /* + */ 21.90 , 17.02 \ /* + */ 22.70 , 17.61 \ /* + */ 23.50 , 18.20 \ /* + */ 24.30 , 18.79 \ /* + */ 25.10 , 19.38 \ /* + */ 25.90 , 19.97 \ /* + */ 26.71 , 20.56 \ /* + */ 27.51 , 21.15 \ /* + */ 28.31 , 21.74 \ /* + */ 29.12 , 22.33 \ /* + */ 29.92 , 22.92 \ /* + */ 30.72 , 23.51 \ /* + */ 31.53 , 24.10 \ /* + */ 32.33 , 24.69 \ /* + */ 33.14 , 25.28 \ /* + */ 33.94 , 25.87 \ /* + */ 34.75 , 26.46 \ /* + */ 35.55 , 27.05 \ /* + */ 36.36 , 27.64 \ /* + */ 37.17 , 28.23 \ /* + */ 37.97 , 28.82 \ /* + */ 38.78 , 29.41 \ /* + */ 39.58 , 30.00 \ /* + */ 40.39 , 30.59 \ /* + */ 41.20 , 31.18 \ /* + */ 42.00 , 31.77 \ /* + */ 42.81 , 32.36 \ /* + */ 43.62 , 32.95 \ /* + */ 44.42 , 33.54 \ /* + */ 45.23 , 34.13 \ /* + */ 46.03 , 34.72 \ /* + */ 46.84 , 35.31 \ /* + */ 47.65 , 35.90 \ /* + */ 48.45 , 36.49 \ /* + */ 49.26 , 37.08 \ /* + */ 50.06 , 37.67 \ /* + */ 50.87 , 38.26 \ /* + */ 51.68 , 38.85 \ /* + */ 52.48 , 39.44 \ /* + */ 53.29 , 40.02 \ /* + */ 54.09 , 40.61 \ /* + */ 54.90 , 41.20 \ /* + */ 55.71 , 41.79 \ /* + */ 56.51 , 42.38 \ /* + */ 57.32 , 42.97 \ /* + */ 58.13 , 43.56 \ /* + */ 58.93 , 44.15 \ /* + */ 59.74 , 44.74 \ /* + */ 60.54 , 45.33 \ /* + */ 61.35 , 45.92 \ /* + */ 62.16 , 46.51 \ /* + */ 62.96 , 47.10 \ /* + */ 63.77 , 47.69 \ /* + */ 64.57 , 48.28 \ /* + */ 65.38 , 48.87 \ /* + */ 66.19 , 49.46 \ /* + */ 66.99 , 50.05 \ /* + */ 67.80 , 50.64 \ /* + */ 68.60 , 51.23 \ /* + */ 69.41 , 51.82 \ /* + */ 70.22 , 52.41 \ /* + */ 71.02 , 53.00 \ /* + */ 71.83 , 53.59 \ /* + */ 72.64 , 54.18 \ /* + */ 73.44 , 54.77 \ /* + */ 74.25 , 55.36 \ /* + */ 75.05 , 55.95 \ /* + */ 75.86 , 56.54 \ /* + */ 76.67 , 57.13 \ /* + */ 77.47 , 57.72 \ /* + */ 78.28 , 58.31 \ /* + */ 79.08 , 58.90 \ /* + */ 79.89 , 59.49 \ /* + */ 80.70 , 60.08 \ /* + */ 81.50 , 60.67 \ /* + */ 82.31 , 61.26 \ /* + */ 83.12 , 61.85 \ /* + */ 83.92 , 62.44 \ /* + */ 84.73 , 63.03 \ /* + */ 85.53 , 63.62 \ /* + */ 86.34 , 64.21 \ /* + */ 87.15 , 64.80 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize25" { + matrix input `temp' = ( /* + */ 5.53 , . \ /* + */ 7.25 , 3.63 \ /* + */ 7.80 , 5.45 \ /* + */ 8.31 , 6.28 \ /* + */ 8.84 , 6.89 \ /* + */ 9.38 , 7.42 \ /* + */ 9.93 , 7.91 \ /* + */ 10.50 , 8.39 \ /* + */ 11.07 , 8.85 \ /* + */ 11.65 , 9.31 \ /* + */ 12.23 , 9.77 \ /* + */ 12.82 , 10.22 \ /* + */ 13.41 , 10.68 \ /* + */ 14.00 , 11.13 \ /* + */ 14.60 , 11.58 \ /* + */ 15.19 , 12.03 \ /* + */ 15.79 , 12.49 \ /* + */ 16.39 , 12.94 \ /* + */ 16.99 , 13.39 \ /* + */ 17.60 , 13.84 \ /* + */ 18.20 , 14.29 \ /* + */ 18.80 , 14.74 \ /* + */ 19.41 , 15.19 \ /* + */ 20.01 , 15.64 \ /* + */ 20.61 , 16.10 \ /* + */ 21.22 , 16.55 \ /* + */ 21.83 , 17.00 \ /* + */ 22.43 , 17.45 \ /* + */ 23.04 , 17.90 \ /* + */ 23.65 , 18.35 \ /* + */ 24.25 , 18.81 \ /* + */ 24.86 , 19.26 \ /* + */ 25.47 , 19.71 \ /* + */ 26.08 , 20.16 \ /* + */ 26.68 , 20.61 \ /* + */ 27.29 , 21.06 \ /* + */ 27.90 , 21.52 \ /* + */ 28.51 , 21.97 \ /* + */ 29.12 , 22.42 \ /* + */ 29.73 , 22.87 \ /* + */ 30.33 , 23.32 \ /* + */ 30.94 , 23.78 \ /* + */ 31.55 , 24.23 \ /* + */ 32.16 , 24.68 \ /* + */ 32.77 , 25.13 \ /* + */ 33.38 , 25.58 \ /* + */ 33.99 , 26.04 \ /* + */ 34.60 , 26.49 \ /* + */ 35.21 , 26.94 \ /* + */ 35.82 , 27.39 \ /* + */ 36.43 , 27.85 \ /* + */ 37.04 , 28.30 \ /* + */ 37.65 , 28.75 \ /* + */ 38.25 , 29.20 \ /* + */ 38.86 , 29.66 \ /* + */ 39.47 , 30.11 \ /* + */ 40.08 , 30.56 \ /* + */ 40.69 , 31.01 \ /* + */ 41.30 , 31.47 \ /* + */ 41.91 , 31.92 \ /* + */ 42.52 , 32.37 \ /* + */ 43.13 , 32.82 \ /* + */ 43.74 , 33.27 \ /* + */ 44.35 , 33.73 \ /* + */ 44.96 , 34.18 \ /* + */ 45.57 , 34.63 \ /* + */ 46.18 , 35.08 \ /* + */ 46.78 , 35.54 \ /* + */ 47.39 , 35.99 \ /* + */ 48.00 , 36.44 \ /* + */ 48.61 , 36.89 \ /* + */ 49.22 , 37.35 \ /* + */ 49.83 , 37.80 \ /* + */ 50.44 , 38.25 \ /* + */ 51.05 , 38.70 \ /* + */ 51.66 , 39.16 \ /* + */ 52.27 , 39.61 \ /* + */ 52.88 , 40.06 \ /* + */ 53.49 , 40.51 \ /* + */ 54.10 , 40.96 \ /* + */ 54.71 , 41.42 \ /* + */ 55.32 , 41.87 \ /* + */ 55.92 , 42.32 \ /* + */ 56.53 , 42.77 \ /* + */ 57.14 , 43.23 \ /* + */ 57.75 , 43.68 \ /* + */ 58.36 , 44.13 \ /* + */ 58.97 , 44.58 \ /* + */ 59.58 , 45.04 \ /* + */ 60.19 , 45.49 \ /* + */ 60.80 , 45.94 \ /* + */ 61.41 , 46.39 \ /* + */ 62.02 , 46.85 \ /* + */ 62.63 , 47.30 \ /* + */ 63.24 , 47.75 \ /* + */ 63.85 , 48.20 \ /* + */ 64.45 , 48.65 \ /* + */ 65.06 , 49.11 \ /* + */ 65.67 , 49.56 \ /* + */ 66.28 , 50.01 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel5" { + matrix input `temp' = ( /* + */ 24.09 , . \ /* + */ 13.46 , 15.50 \ /* + */ 9.61 , 10.83 \ /* + */ 7.63 , 8.53 \ /* + */ 6.42 , 7.16 \ /* + */ 5.61 , 6.24 \ /* + */ 5.02 , 5.59 \ /* + */ 4.58 , 5.10 \ /* + */ 4.23 , 4.71 \ /* + */ 3.96 , 4.41 \ /* + */ 3.73 , 4.15 \ /* + */ 3.54 , 3.94 \ /* + */ 3.38 , 3.76 \ /* + */ 3.24 , 3.60 \ /* + */ 3.12 , 3.47 \ /* + */ 3.01 , 3.35 \ /* + */ 2.92 , 3.24 \ /* + */ 2.84 , 3.15 \ /* + */ 2.76 , 3.06 \ /* + */ 2.69 , 2.98 \ /* + */ 2.63 , 2.91 \ /* + */ 2.58 , 2.85 \ /* + */ 2.52 , 2.79 \ /* + */ 2.48 , 2.73 \ /* + */ 2.43 , 2.68 \ /* + */ 2.39 , 2.63 \ /* + */ 2.36 , 2.59 \ /* + */ 2.32 , 2.55 \ /* + */ 2.29 , 2.51 \ /* + */ 2.26 , 2.47 \ /* + */ 2.23 , 2.44 \ /* + */ 2.20 , 2.41 \ /* + */ 2.18 , 2.37 \ /* + */ 2.16 , 2.35 \ /* + */ 2.13 , 2.32 \ /* + */ 2.11 , 2.29 \ /* + */ 2.09 , 2.27 \ /* + */ 2.07 , 2.24 \ /* + */ 2.05 , 2.22 \ /* + */ 2.04 , 2.20 \ /* + */ 2.02 , 2.18 \ /* + */ 2.00 , 2.16 \ /* + */ 1.99 , 2.14 \ /* + */ 1.97 , 2.12 \ /* + */ 1.96 , 2.10 \ /* + */ 1.94 , 2.09 \ /* + */ 1.93 , 2.07 \ /* + */ 1.92 , 2.05 \ /* + */ 1.91 , 2.04 \ /* + */ 1.89 , 2.02 \ /* + */ 1.88 , 2.01 \ /* + */ 1.87 , 2.00 \ /* + */ 1.86 , 1.98 \ /* + */ 1.85 , 1.97 \ /* + */ 1.84 , 1.96 \ /* + */ 1.83 , 1.95 \ /* + */ 1.82 , 1.94 \ /* + */ 1.81 , 1.92 \ /* + */ 1.80 , 1.91 \ /* + */ 1.79 , 1.90 \ /* + */ 1.79 , 1.89 \ /* + */ 1.78 , 1.88 \ /* + */ 1.77 , 1.87 \ /* + */ 1.76 , 1.87 \ /* + */ 1.75 , 1.86 \ /* + */ 1.75 , 1.85 \ /* + */ 1.74 , 1.84 \ /* + */ 1.73 , 1.83 \ /* + */ 1.72 , 1.83 \ /* + */ 1.72 , 1.82 \ /* + */ 1.71 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.79 \ /* + */ 1.68 , 1.79 \ /* + */ 1.68 , 1.78 \ /* + */ 1.67 , 1.77 \ /* + */ 1.67 , 1.77 \ /* + */ 1.66 , 1.76 \ /* + */ 1.65 , 1.76 \ /* + */ 1.65 , 1.75 \ /* + */ 1.64 , 1.75 \ /* + */ 1.64 , 1.74 \ /* + */ 1.63 , 1.74 \ /* + */ 1.63 , 1.73 \ /* + */ 1.62 , 1.73 \ /* + */ 1.61 , 1.73 \ /* + */ 1.61 , 1.72 \ /* + */ 1.60 , 1.72 \ /* + */ 1.60 , 1.71 \ /* + */ 1.59 , 1.71 \ /* + */ 1.59 , 1.71 \ /* + */ 1.58 , 1.71 \ /* + */ 1.58 , 1.70 \ /* + */ 1.57 , 1.70 \ /* + */ 1.57 , 1.70 \ /* + */ 1.56 , 1.69 \ /* + */ 1.56 , 1.69 \ /* + */ 1.55 , 1.69 \ /* + */ 1.55 , 1.69 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel10" { + matrix input `temp' = ( /* + */ 19.36 , . \ /* + */ 10.89 , 12.55 \ /* + */ 7.90 , 8.96 \ /* + */ 6.37 , 7.15 \ /* + */ 5.44 , 6.07 \ /* + */ 4.81 , 5.34 \ /* + */ 4.35 , 4.82 \ /* + */ 4.01 , 4.43 \ /* + */ 3.74 , 4.12 \ /* + */ 3.52 , 3.87 \ /* + */ 3.34 , 3.67 \ /* + */ 3.19 , 3.49 \ /* + */ 3.06 , 3.35 \ /* + */ 2.95 , 3.22 \ /* + */ 2.85 , 3.11 \ /* + */ 2.76 , 3.01 \ /* + */ 2.69 , 2.92 \ /* + */ 2.62 , 2.84 \ /* + */ 2.56 , 2.77 \ /* + */ 2.50 , 2.71 \ /* + */ 2.45 , 2.65 \ /* + */ 2.40 , 2.60 \ /* + */ 2.36 , 2.55 \ /* + */ 2.32 , 2.50 \ /* + */ 2.28 , 2.46 \ /* + */ 2.24 , 2.42 \ /* + */ 2.21 , 2.38 \ /* + */ 2.18 , 2.35 \ /* + */ 2.15 , 2.31 \ /* + */ 2.12 , 2.28 \ /* + */ 2.10 , 2.25 \ /* + */ 2.07 , 2.23 \ /* + */ 2.05 , 2.20 \ /* + */ 2.03 , 2.17 \ /* + */ 2.01 , 2.15 \ /* + */ 1.99 , 2.13 \ /* + */ 1.97 , 2.11 \ /* + */ 1.95 , 2.09 \ /* + */ 1.93 , 2.07 \ /* + */ 1.92 , 2.05 \ /* + */ 1.90 , 2.03 \ /* + */ 1.88 , 2.01 \ /* + */ 1.87 , 2.00 \ /* + */ 1.86 , 1.98 \ /* + */ 1.84 , 1.96 \ /* + */ 1.83 , 1.95 \ /* + */ 1.82 , 1.93 \ /* + */ 1.81 , 1.92 \ /* + */ 1.79 , 1.91 \ /* + */ 1.78 , 1.89 \ /* + */ 1.77 , 1.88 \ /* + */ 1.76 , 1.87 \ /* + */ 1.75 , 1.86 \ /* + */ 1.74 , 1.85 \ /* + */ 1.73 , 1.84 \ /* + */ 1.72 , 1.83 \ /* + */ 1.71 , 1.82 \ /* + */ 1.70 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.79 \ /* + */ 1.68 , 1.78 \ /* + */ 1.67 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.65 , 1.75 \ /* + */ 1.64 , 1.74 \ /* + */ 1.64 , 1.73 \ /* + */ 1.63 , 1.72 \ /* + */ 1.63 , 1.72 \ /* + */ 1.62 , 1.71 \ /* + */ 1.61 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.60 , 1.69 \ /* + */ 1.60 , 1.68 \ /* + */ 1.59 , 1.68 \ /* + */ 1.59 , 1.67 \ /* + */ 1.58 , 1.67 \ /* + */ 1.58 , 1.66 \ /* + */ 1.57 , 1.66 \ /* + */ 1.57 , 1.65 \ /* + */ 1.56 , 1.65 \ /* + */ 1.56 , 1.64 \ /* + */ 1.56 , 1.64 \ /* + */ 1.55 , 1.63 \ /* + */ 1.55 , 1.63 \ /* + */ 1.54 , 1.62 \ /* + */ 1.54 , 1.62 \ /* + */ 1.54 , 1.62 \ /* + */ 1.53 , 1.61 \ /* + */ 1.53 , 1.61 \ /* + */ 1.53 , 1.61 \ /* + */ 1.52 , 1.60 \ /* + */ 1.52 , 1.60 \ /* + */ 1.52 , 1.60 \ /* + */ 1.52 , 1.59 \ /* + */ 1.51 , 1.59 \ /* + */ 1.51 , 1.59 \ /* + */ 1.51 , 1.59 \ /* + */ 1.51 , 1.58 \ /* + */ 1.50 , 1.58 ) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel20" { + matrix input `temp' = ( /* + */ 15.64 , . \ /* + */ 9.00 , 9.72 \ /* + */ 6.61 , 7.18 \ /* + */ 5.38 , 5.85 \ /* + */ 4.62 , 5.04 \ /* + */ 4.11 , 4.48 \ /* + */ 3.75 , 4.08 \ /* + */ 3.47 , 3.77 \ /* + */ 3.25 , 3.53 \ /* + */ 3.07 , 3.33 \ /* + */ 2.92 , 3.17 \ /* + */ 2.80 , 3.04 \ /* + */ 2.70 , 2.92 \ /* + */ 2.61 , 2.82 \ /* + */ 2.53 , 2.73 \ /* + */ 2.46 , 2.65 \ /* + */ 2.39 , 2.58 \ /* + */ 2.34 , 2.52 \ /* + */ 2.29 , 2.46 \ /* + */ 2.24 , 2.41 \ /* + */ 2.20 , 2.36 \ /* + */ 2.16 , 2.32 \ /* + */ 2.13 , 2.28 \ /* + */ 2.10 , 2.24 \ /* + */ 2.06 , 2.21 \ /* + */ 2.04 , 2.18 \ /* + */ 2.01 , 2.15 \ /* + */ 1.99 , 2.12 \ /* + */ 1.96 , 2.09 \ /* + */ 1.94 , 2.07 \ /* + */ 1.92 , 2.04 \ /* + */ 1.90 , 2.02 \ /* + */ 1.88 , 2.00 \ /* + */ 1.87 , 1.98 \ /* + */ 1.85 , 1.96 \ /* + */ 1.83 , 1.94 \ /* + */ 1.82 , 1.93 \ /* + */ 1.80 , 1.91 \ /* + */ 1.79 , 1.89 \ /* + */ 1.78 , 1.88 \ /* + */ 1.76 , 1.86 \ /* + */ 1.75 , 1.85 \ /* + */ 1.74 , 1.84 \ /* + */ 1.73 , 1.82 \ /* + */ 1.72 , 1.81 \ /* + */ 1.71 , 1.80 \ /* + */ 1.70 , 1.79 \ /* + */ 1.69 , 1.78 \ /* + */ 1.68 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.65 , 1.74 \ /* + */ 1.65 , 1.73 \ /* + */ 1.64 , 1.72 \ /* + */ 1.63 , 1.71 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.68 \ /* + */ 1.60 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.56 , 1.62 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.54 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.44 , 1.50 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 ) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel30" { + matrix input `temp' = ( /* + */ 12.71 , . \ /* + */ 7.49 , 8.03 \ /* + */ 5.60 , 6.15 \ /* + */ 4.63 , 5.10 \ /* + */ 4.03 , 4.44 \ /* + */ 3.63 , 3.98 \ /* + */ 3.33 , 3.65 \ /* + */ 3.11 , 3.39 \ /* + */ 2.93 , 3.19 \ /* + */ 2.79 , 3.02 \ /* + */ 2.67 , 2.88 \ /* + */ 2.57 , 2.77 \ /* + */ 2.48 , 2.67 \ /* + */ 2.41 , 2.58 \ /* + */ 2.34 , 2.51 \ /* + */ 2.28 , 2.44 \ /* + */ 2.23 , 2.38 \ /* + */ 2.18 , 2.33 \ /* + */ 2.14 , 2.28 \ /* + */ 2.10 , 2.23 \ /* + */ 2.07 , 2.19 \ /* + */ 2.04 , 2.16 \ /* + */ 2.01 , 2.12 \ /* + */ 1.98 , 2.09 \ /* + */ 1.95 , 2.06 \ /* + */ 1.93 , 2.03 \ /* + */ 1.90 , 2.01 \ /* + */ 1.88 , 1.98 \ /* + */ 1.86 , 1.96 \ /* + */ 1.84 , 1.94 \ /* + */ 1.83 , 1.92 \ /* + */ 1.81 , 1.90 \ /* + */ 1.79 , 1.88 \ /* + */ 1.78 , 1.87 \ /* + */ 1.76 , 1.85 \ /* + */ 1.75 , 1.83 \ /* + */ 1.74 , 1.82 \ /* + */ 1.72 , 1.80 \ /* + */ 1.71 , 1.79 \ /* + */ 1.70 , 1.78 \ /* + */ 1.69 , 1.77 \ /* + */ 1.68 , 1.75 \ /* + */ 1.67 , 1.74 \ /* + */ 1.66 , 1.73 \ /* + */ 1.65 , 1.72 \ /* + */ 1.64 , 1.71 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.60 , 1.66 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.47 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.42 , 1.47 \ /* + */ 1.42 , 1.47 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.41 , 1.46 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax5" { + matrix input `temp' = ( /* + */ 23.81 , . \ /* + */ 12.38 , 14.19 \ /* + */ 8.66 , 10.00 \ /* + */ 6.81 , 7.88 \ /* + */ 5.71 , 6.60 \ /* + */ 4.98 , 5.74 \ /* + */ 4.45 , 5.13 \ /* + */ 4.06 , 4.66 \ /* + */ 3.76 , 4.30 \ /* + */ 3.51 , 4.01 \ /* + */ 3.31 , 3.77 \ /* + */ 3.15 , 3.57 \ /* + */ 3.00 , 3.41 \ /* + */ 2.88 , 3.26 \ /* + */ 2.78 , 3.13 \ /* + */ 2.69 , 3.02 \ /* + */ 2.61 , 2.92 \ /* + */ 2.53 , 2.84 \ /* + */ 2.47 , 2.76 \ /* + */ 2.41 , 2.69 \ /* + */ 2.36 , 2.62 \ /* + */ 2.31 , 2.56 \ /* + */ 2.27 , 2.51 \ /* + */ 2.23 , 2.46 \ /* + */ 2.19 , 2.42 \ /* + */ 2.15 , 2.37 \ /* + */ 2.12 , 2.33 \ /* + */ 2.09 , 2.30 \ /* + */ 2.07 , 2.26 \ /* + */ 2.04 , 2.23 \ /* + */ 2.02 , 2.20 \ /* + */ 1.99 , 2.17 \ /* + */ 1.97 , 2.14 \ /* + */ 1.95 , 2.12 \ /* + */ 1.93 , 2.10 \ /* + */ 1.91 , 2.07 \ /* + */ 1.90 , 2.05 \ /* + */ 1.88 , 2.03 \ /* + */ 1.87 , 2.01 \ /* + */ 1.85 , 1.99 \ /* + */ 1.84 , 1.98 \ /* + */ 1.82 , 1.96 \ /* + */ 1.81 , 1.94 \ /* + */ 1.80 , 1.93 \ /* + */ 1.79 , 1.91 \ /* + */ 1.78 , 1.90 \ /* + */ 1.76 , 1.88 \ /* + */ 1.75 , 1.87 \ /* + */ 1.74 , 1.86 \ /* + */ 1.73 , 1.85 \ /* + */ 1.73 , 1.83 \ /* + */ 1.72 , 1.82 \ /* + */ 1.71 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.79 \ /* + */ 1.68 , 1.78 \ /* + */ 1.68 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.65 , 1.74 \ /* + */ 1.65 , 1.74 \ /* + */ 1.64 , 1.73 \ /* + */ 1.63 , 1.72 \ /* + */ 1.63 , 1.71 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.70 \ /* + */ 1.61 , 1.69 \ /* + */ 1.60 , 1.68 \ /* + */ 1.60 , 1.68 \ /* + */ 1.59 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.64 \ /* + */ 1.56 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.62 \ /* + */ 1.54 , 1.61 \ /* + */ 1.54 , 1.61 \ /* + */ 1.53 , 1.60 \ /* + */ 1.53 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.50 , 1.57 \ /* + */ 1.50 , 1.57 \ /* + */ 1.50 , 1.56 \ /* + */ 1.49 , 1.56 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.48 , 1.55 \ /* + */ 1.48 , 1.54 \ /* + */ 1.47 , 1.54 \ /* + */ 1.47 , 1.54 \ /* + */ 1.47 , 1.53 \ /* + */ 1.46 , 1.53 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax10" { + matrix input `temp' = ( /* + */ 19.40 , . \ /* + */ 10.14 , 11.92 \ /* + */ 7.18 , 8.39 \ /* + */ 5.72 , 6.64 \ /* + */ 4.85 , 5.60 \ /* + */ 4.27 , 4.90 \ /* + */ 3.86 , 4.40 \ /* + */ 3.55 , 4.03 \ /* + */ 3.31 , 3.73 \ /* + */ 3.12 , 3.50 \ /* + */ 2.96 , 3.31 \ /* + */ 2.83 , 3.15 \ /* + */ 2.71 , 3.01 \ /* + */ 2.62 , 2.89 \ /* + */ 2.53 , 2.79 \ /* + */ 2.46 , 2.70 \ /* + */ 2.39 , 2.62 \ /* + */ 2.33 , 2.55 \ /* + */ 2.28 , 2.49 \ /* + */ 2.23 , 2.43 \ /* + */ 2.19 , 2.38 \ /* + */ 2.15 , 2.33 \ /* + */ 2.11 , 2.29 \ /* + */ 2.08 , 2.25 \ /* + */ 2.05 , 2.21 \ /* + */ 2.02 , 2.18 \ /* + */ 1.99 , 2.14 \ /* + */ 1.97 , 2.11 \ /* + */ 1.94 , 2.08 \ /* + */ 1.92 , 2.06 \ /* + */ 1.90 , 2.03 \ /* + */ 1.88 , 2.01 \ /* + */ 1.86 , 1.99 \ /* + */ 1.85 , 1.97 \ /* + */ 1.83 , 1.95 \ /* + */ 1.81 , 1.93 \ /* + */ 1.80 , 1.91 \ /* + */ 1.79 , 1.89 \ /* + */ 1.77 , 1.88 \ /* + */ 1.76 , 1.86 \ /* + */ 1.75 , 1.85 \ /* + */ 1.74 , 1.83 \ /* + */ 1.72 , 1.82 \ /* + */ 1.71 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.78 \ /* + */ 1.68 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.66 , 1.74 \ /* + */ 1.65 , 1.73 \ /* + */ 1.64 , 1.72 \ /* + */ 1.63 , 1.71 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.69 \ /* + */ 1.60 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.53 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.44 , 1.50 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.42 , 1.48 \ /* + */ 1.42 , 1.47 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax20" { + matrix input `temp' = ( /* + */ 15.39 , . \ /* + */ 8.16 , 9.41 \ /* + */ 5.87 , 6.79 \ /* + */ 4.75 , 5.47 \ /* + */ 4.08 , 4.66 \ /* + */ 3.64 , 4.13 \ /* + */ 3.32 , 3.74 \ /* + */ 3.08 , 3.45 \ /* + */ 2.89 , 3.22 \ /* + */ 2.74 , 3.03 \ /* + */ 2.62 , 2.88 \ /* + */ 2.51 , 2.76 \ /* + */ 2.42 , 2.65 \ /* + */ 2.35 , 2.56 \ /* + */ 2.28 , 2.48 \ /* + */ 2.22 , 2.40 \ /* + */ 2.17 , 2.34 \ /* + */ 2.12 , 2.28 \ /* + */ 2.08 , 2.23 \ /* + */ 2.04 , 2.19 \ /* + */ 2.01 , 2.15 \ /* + */ 1.98 , 2.11 \ /* + */ 1.95 , 2.07 \ /* + */ 1.92 , 2.04 \ /* + */ 1.89 , 2.01 \ /* + */ 1.87 , 1.98 \ /* + */ 1.85 , 1.96 \ /* + */ 1.83 , 1.93 \ /* + */ 1.81 , 1.91 \ /* + */ 1.79 , 1.89 \ /* + */ 1.77 , 1.87 \ /* + */ 1.76 , 1.85 \ /* + */ 1.74 , 1.83 \ /* + */ 1.73 , 1.82 \ /* + */ 1.72 , 1.80 \ /* + */ 1.70 , 1.79 \ /* + */ 1.69 , 1.77 \ /* + */ 1.68 , 1.76 \ /* + */ 1.67 , 1.74 \ /* + */ 1.66 , 1.73 \ /* + */ 1.65 , 1.72 \ /* + */ 1.64 , 1.71 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.58 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.62 \ /* + */ 1.56 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.54 , 1.59 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.52 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.47 , 1.52 \ /* + */ 1.47 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.48 \ /* + */ 1.44 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.41 \ /* + */ 1.37 , 1.41 \ /* + */ 1.37 , 1.41 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax30" { + matrix input `temp' = ( /* + */ 12.76 , . \ /* + */ 6.97 , 8.01 \ /* + */ 5.11 , 5.88 \ /* + */ 4.19 , 4.78 \ /* + */ 3.64 , 4.12 \ /* + */ 3.27 , 3.67 \ /* + */ 3.00 , 3.35 \ /* + */ 2.80 , 3.10 \ /* + */ 2.64 , 2.91 \ /* + */ 2.52 , 2.76 \ /* + */ 2.41 , 2.63 \ /* + */ 2.33 , 2.52 \ /* + */ 2.25 , 2.43 \ /* + */ 2.19 , 2.35 \ /* + */ 2.13 , 2.29 \ /* + */ 2.08 , 2.22 \ /* + */ 2.04 , 2.17 \ /* + */ 2.00 , 2.12 \ /* + */ 1.96 , 2.08 \ /* + */ 1.93 , 2.04 \ /* + */ 1.90 , 2.01 \ /* + */ 1.87 , 1.97 \ /* + */ 1.84 , 1.94 \ /* + */ 1.82 , 1.92 \ /* + */ 1.80 , 1.89 \ /* + */ 1.78 , 1.87 \ /* + */ 1.76 , 1.84 \ /* + */ 1.74 , 1.82 \ /* + */ 1.73 , 1.80 \ /* + */ 1.71 , 1.79 \ /* + */ 1.70 , 1.77 \ /* + */ 1.68 , 1.75 \ /* + */ 1.67 , 1.74 \ /* + */ 1.66 , 1.72 \ /* + */ 1.64 , 1.71 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.68 \ /* + */ 1.61 , 1.67 \ /* + */ 1.60 , 1.66 \ /* + */ 1.59 , 1.65 \ /* + */ 1.58 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.57 , 1.62 \ /* + */ 1.56 , 1.61 \ /* + */ 1.55 , 1.60 \ /* + */ 1.54 , 1.59 \ /* + */ 1.54 , 1.59 \ /* + */ 1.53 , 1.58 \ /* + */ 1.52 , 1.57 \ /* + */ 1.52 , 1.56 \ /* + */ 1.51 , 1.56 \ /* + */ 1.50 , 1.55 \ /* + */ 1.50 , 1.54 \ /* + */ 1.49 , 1.54 \ /* + */ 1.49 , 1.53 \ /* + */ 1.48 , 1.53 \ /* + */ 1.48 , 1.52 \ /* + */ 1.47 , 1.51 \ /* + */ 1.47 , 1.51 \ /* + */ 1.46 , 1.50 \ /* + */ 1.46 , 1.50 \ /* + */ 1.45 , 1.49 \ /* + */ 1.45 , 1.49 \ /* + */ 1.44 , 1.48 \ /* + */ 1.44 , 1.48 \ /* + */ 1.44 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.42 \ /* + */ 1.39 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.41 \ /* + */ 1.38 , 1.41 \ /* + */ 1.37 , 1.41 \ /* + */ 1.37 , 1.40 \ /* + */ 1.37 , 1.40 \ /* + */ 1.37 , 1.40 \ /* + */ 1.36 , 1.40 \ /* + */ 1.36 , 1.39 \ /* + */ 1.36 , 1.39 \ /* + */ 1.36 , 1.39 \ /* + */ 1.36 , 1.38 \ /* + */ 1.35 , 1.38 \ /* + */ 1.35 , 1.38 \ /* + */ 1.35 , 1.38 \ /* + */ 1.35 , 1.37 \ /* + */ 1.34 , 1.37 \ /* + */ 1.34 , 1.37 \ /* + */ 1.34 , 1.37 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize10" { + matrix input `temp' = ( /* + */ 16.38 , . \ /* + */ 8.68 , 7.03 \ /* + */ 6.46 , 5.44 \ /* + */ 5.44 , 4.72 \ /* + */ 4.84 , 4.32 \ /* + */ 4.45 , 4.06 \ /* + */ 4.18 , 3.90 \ /* + */ 3.97 , 3.78 \ /* + */ 3.81 , 3.70 \ /* + */ 3.68 , 3.64 \ /* + */ 3.58 , 3.60 \ /* + */ 3.50 , 3.58 \ /* + */ 3.42 , 3.56 \ /* + */ 3.36 , 3.55 \ /* + */ 3.31 , 3.54 \ /* + */ 3.27 , 3.55 \ /* + */ 3.24 , 3.55 \ /* + */ 3.20 , 3.56 \ /* + */ 3.18 , 3.57 \ /* + */ 3.21 , 3.58 \ /* + */ 3.39 , 3.59 \ /* + */ 3.57 , 3.60 \ /* + */ 3.68 , 3.62 \ /* + */ 3.75 , 3.64 \ /* + */ 3.79 , 3.65 \ /* + */ 3.82 , 3.67 \ /* + */ 3.85 , 3.74 \ /* + */ 3.86 , 3.87 \ /* + */ 3.87 , 4.02 \ /* + */ 3.88 , 4.12 \ /* + */ 3.89 , 4.19 \ /* + */ 3.89 , 4.24 \ /* + */ 3.90 , 4.27 \ /* + */ 3.90 , 4.31 \ /* + */ 3.90 , 4.33 \ /* + */ 3.90 , 4.36 \ /* + */ 3.90 , 4.38 \ /* + */ 3.90 , 4.39 \ /* + */ 3.90 , 4.41 \ /* + */ 3.90 , 4.43 \ /* + */ 3.90 , 4.44 \ /* + */ 3.90 , 4.45 \ /* + */ 3.90 , 4.47 \ /* + */ 3.90 , 4.48 \ /* + */ 3.90 , 4.49 \ /* + */ 3.90 , 4.50 \ /* + */ 3.90 , 4.51 \ /* + */ 3.90 , 4.52 \ /* + */ 3.90 , 4.53 \ /* + */ 3.90 , 4.54 \ /* + */ 3.90 , 4.55 \ /* + */ 3.90 , 4.56 \ /* + */ 3.90 , 4.56 \ /* + */ 3.90 , 4.57 \ /* + */ 3.90 , 4.58 \ /* + */ 3.90 , 4.59 \ /* + */ 3.90 , 4.59 \ /* + */ 3.90 , 4.60 \ /* + */ 3.90 , 4.61 \ /* + */ 3.90 , 4.61 \ /* + */ 3.90 , 4.62 \ /* + */ 3.90 , 4.62 \ /* + */ 3.90 , 4.63 \ /* + */ 3.90 , 4.63 \ /* + */ 3.89 , 4.64 \ /* + */ 3.89 , 4.64 \ /* + */ 3.89 , 4.64 \ /* + */ 3.89 , 4.65 \ /* + */ 3.89 , 4.65 \ /* + */ 3.89 , 4.65 \ /* + */ 3.89 , 4.66 \ /* + */ 3.89 , 4.66 \ /* + */ 3.89 , 4.66 \ /* + */ 3.89 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.86 , 4.65 \ /* + */ 3.86 , 4.65 \ /* + */ 3.86 , 4.65 \ /* + */ 3.86 , 4.64 \ /* + */ 3.85 , 4.64 \ /* + */ 3.85 , 4.64 \ /* + */ 3.85 , 4.63 \ /* + */ 3.85 , 4.63 \ /* + */ 3.84 , 4.62 \ /* + */ 3.84 , 4.62 \ /* + */ 3.84 , 4.61 \ /* + */ 3.84 , 4.60 \ /* + */ 3.83 , 4.60 \ /* + */ 3.83 , 4.59 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize15" { + matrix input `temp' = ( /* + */ 8.96 , . \ /* + */ 5.33 , 4.58 \ /* + */ 4.36 , 3.81 \ /* + */ 3.87 , 3.39 \ /* + */ 3.56 , 3.13 \ /* + */ 3.34 , 2.95 \ /* + */ 3.18 , 2.83 \ /* + */ 3.04 , 2.73 \ /* + */ 2.93 , 2.66 \ /* + */ 2.84 , 2.60 \ /* + */ 2.76 , 2.55 \ /* + */ 2.69 , 2.52 \ /* + */ 2.63 , 2.48 \ /* + */ 2.57 , 2.46 \ /* + */ 2.52 , 2.44 \ /* + */ 2.48 , 2.42 \ /* + */ 2.44 , 2.41 \ /* + */ 2.41 , 2.40 \ /* + */ 2.37 , 2.39 \ /* + */ 2.34 , 2.38 \ /* + */ 2.32 , 2.38 \ /* + */ 2.29 , 2.37 \ /* + */ 2.27 , 2.37 \ /* + */ 2.25 , 2.37 \ /* + */ 2.24 , 2.37 \ /* + */ 2.22 , 2.38 \ /* + */ 2.21 , 2.38 \ /* + */ 2.20 , 2.38 \ /* + */ 2.19 , 2.39 \ /* + */ 2.18 , 2.39 \ /* + */ 2.19 , 2.40 \ /* + */ 2.22 , 2.41 \ /* + */ 2.33 , 2.42 \ /* + */ 2.40 , 2.42 \ /* + */ 2.45 , 2.43 \ /* + */ 2.48 , 2.44 \ /* + */ 2.50 , 2.45 \ /* + */ 2.52 , 2.54 \ /* + */ 2.53 , 2.55 \ /* + */ 2.54 , 2.66 \ /* + */ 2.55 , 2.73 \ /* + */ 2.56 , 2.78 \ /* + */ 2.57 , 2.82 \ /* + */ 2.57 , 2.85 \ /* + */ 2.58 , 2.87 \ /* + */ 2.58 , 2.89 \ /* + */ 2.58 , 2.91 \ /* + */ 2.59 , 2.92 \ /* + */ 2.59 , 2.93 \ /* + */ 2.59 , 2.94 \ /* + */ 2.59 , 2.95 \ /* + */ 2.59 , 2.96 \ /* + */ 2.60 , 2.97 \ /* + */ 2.60 , 2.98 \ /* + */ 2.60 , 2.98 \ /* + */ 2.60 , 2.99 \ /* + */ 2.60 , 2.99 \ /* + */ 2.60 , 3.00 \ /* + */ 2.60 , 3.00 \ /* + */ 2.60 , 3.01 \ /* + */ 2.60 , 3.01 \ /* + */ 2.60 , 3.02 \ /* + */ 2.61 , 3.02 \ /* + */ 2.61 , 3.02 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.04 \ /* + */ 2.61 , 3.04 \ /* + */ 2.61 , 3.04 \ /* + */ 2.60 , 3.04 \ /* + */ 2.60 , 3.04 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.03 \ /* + */ 2.57 , 3.03 \ /* + */ 2.57 , 3.03 \ /* + */ 2.57 , 3.03 \ /* + */ 2.57 , 3.02 \ /* + */ 2.56 , 3.02 \ /* + */ 2.56 , 3.02 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize20" { + matrix input `temp' = ( /* + */ 6.66 , . \ /* + */ 4.42 , 3.95 \ /* + */ 3.69 , 3.32 \ /* + */ 3.30 , 2.99 \ /* + */ 3.05 , 2.78 \ /* + */ 2.87 , 2.63 \ /* + */ 2.73 , 2.52 \ /* + */ 2.63 , 2.43 \ /* + */ 2.54 , 2.36 \ /* + */ 2.46 , 2.30 \ /* + */ 2.40 , 2.25 \ /* + */ 2.34 , 2.21 \ /* + */ 2.29 , 2.17 \ /* + */ 2.25 , 2.14 \ /* + */ 2.21 , 2.11 \ /* + */ 2.18 , 2.09 \ /* + */ 2.14 , 2.07 \ /* + */ 2.11 , 2.05 \ /* + */ 2.09 , 2.03 \ /* + */ 2.06 , 2.02 \ /* + */ 2.04 , 2.01 \ /* + */ 2.02 , 1.99 \ /* + */ 2.00 , 1.98 \ /* + */ 1.98 , 1.98 \ /* + */ 1.96 , 1.97 \ /* + */ 1.95 , 1.96 \ /* + */ 1.93 , 1.96 \ /* + */ 1.92 , 1.95 \ /* + */ 1.90 , 1.95 \ /* + */ 1.89 , 1.95 \ /* + */ 1.88 , 1.94 \ /* + */ 1.87 , 1.94 \ /* + */ 1.86 , 1.94 \ /* + */ 1.85 , 1.94 \ /* + */ 1.84 , 1.94 \ /* + */ 1.83 , 1.94 \ /* + */ 1.82 , 1.94 \ /* + */ 1.81 , 1.95 \ /* + */ 1.81 , 1.95 \ /* + */ 1.80 , 1.95 \ /* + */ 1.79 , 1.95 \ /* + */ 1.79 , 1.96 \ /* + */ 1.78 , 1.96 \ /* + */ 1.78 , 1.97 \ /* + */ 1.80 , 1.97 \ /* + */ 1.87 , 1.98 \ /* + */ 1.92 , 1.98 \ /* + */ 1.95 , 1.99 \ /* + */ 1.97 , 2.00 \ /* + */ 1.99 , 2.00 \ /* + */ 2.00 , 2.01 \ /* + */ 2.01 , 2.09 \ /* + */ 2.02 , 2.11 \ /* + */ 2.03 , 2.18 \ /* + */ 2.04 , 2.23 \ /* + */ 2.04 , 2.27 \ /* + */ 2.05 , 2.29 \ /* + */ 2.05 , 2.31 \ /* + */ 2.06 , 2.33 \ /* + */ 2.06 , 2.34 \ /* + */ 2.07 , 2.35 \ /* + */ 2.07 , 2.36 \ /* + */ 2.07 , 2.37 \ /* + */ 2.08 , 2.38 \ /* + */ 2.08 , 2.39 \ /* + */ 2.08 , 2.39 \ /* + */ 2.08 , 2.40 \ /* + */ 2.09 , 2.40 \ /* + */ 2.09 , 2.41 \ /* + */ 2.09 , 2.41 \ /* + */ 2.09 , 2.41 \ /* + */ 2.09 , 2.42 \ /* + */ 2.09 , 2.42 \ /* + */ 2.09 , 2.42 \ /* + */ 2.09 , 2.43 \ /* + */ 2.10 , 2.43 \ /* + */ 2.10 , 2.43 \ /* + */ 2.10 , 2.43 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.09 , 2.44 \ /* + */ 2.09 , 2.44 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.07 , 2.44 \ /* + */ 2.07 , 2.44 \ /* + */ 2.07 , 2.44 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize25" { + matrix input `temp' = ( /* + */ 5.53 , . \ /* + */ 3.92 , 3.63 \ /* + */ 3.32 , 3.09 \ /* + */ 2.98 , 2.79 \ /* + */ 2.77 , 2.60 \ /* + */ 2.61 , 2.46 \ /* + */ 2.49 , 2.35 \ /* + */ 2.39 , 2.27 \ /* + */ 2.32 , 2.20 \ /* + */ 2.25 , 2.14 \ /* + */ 2.19 , 2.09 \ /* + */ 2.14 , 2.05 \ /* + */ 2.10 , 2.02 \ /* + */ 2.06 , 1.99 \ /* + */ 2.03 , 1.96 \ /* + */ 2.00 , 1.93 \ /* + */ 1.97 , 1.91 \ /* + */ 1.94 , 1.89 \ /* + */ 1.92 , 1.87 \ /* + */ 1.90 , 1.86 \ /* + */ 1.88 , 1.84 \ /* + */ 1.86 , 1.83 \ /* + */ 1.84 , 1.81 \ /* + */ 1.83 , 1.80 \ /* + */ 1.81 , 1.79 \ /* + */ 1.80 , 1.78 \ /* + */ 1.78 , 1.77 \ /* + */ 1.77 , 1.77 \ /* + */ 1.76 , 1.76 \ /* + */ 1.75 , 1.75 \ /* + */ 1.74 , 1.75 \ /* + */ 1.73 , 1.74 \ /* + */ 1.72 , 1.73 \ /* + */ 1.71 , 1.73 \ /* + */ 1.70 , 1.73 \ /* + */ 1.69 , 1.72 \ /* + */ 1.68 , 1.72 \ /* + */ 1.67 , 1.71 \ /* + */ 1.67 , 1.71 \ /* + */ 1.66 , 1.71 \ /* + */ 1.65 , 1.71 \ /* + */ 1.65 , 1.71 \ /* + */ 1.64 , 1.70 \ /* + */ 1.63 , 1.70 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.60 , 1.70 \ /* + */ 1.60 , 1.70 \ /* + */ 1.59 , 1.70 \ /* + */ 1.59 , 1.70 \ /* + */ 1.59 , 1.70 \ /* + */ 1.58 , 1.70 \ /* + */ 1.58 , 1.71 \ /* + */ 1.58 , 1.71 \ /* + */ 1.57 , 1.71 \ /* + */ 1.59 , 1.71 \ /* + */ 1.60 , 1.71 \ /* + */ 1.63 , 1.72 \ /* + */ 1.65 , 1.72 \ /* + */ 1.67 , 1.72 \ /* + */ 1.69 , 1.72 \ /* + */ 1.70 , 1.76 \ /* + */ 1.71 , 1.81 \ /* + */ 1.72 , 1.87 \ /* + */ 1.73 , 1.91 \ /* + */ 1.74 , 1.94 \ /* + */ 1.74 , 1.96 \ /* + */ 1.75 , 1.98 \ /* + */ 1.75 , 1.99 \ /* + */ 1.76 , 2.01 \ /* + */ 1.76 , 2.02 \ /* + */ 1.77 , 2.03 \ /* + */ 1.77 , 2.04 \ /* + */ 1.78 , 2.04 \ /* + */ 1.78 , 2.05 \ /* + */ 1.78 , 2.06 \ /* + */ 1.79 , 2.06 \ /* + */ 1.79 , 2.07 \ /* + */ 1.79 , 2.07 \ /* + */ 1.79 , 2.08 \ /* + */ 1.80 , 2.08 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + return scalar cv=`cv' +end + +exit + +********************************** VERSION COMMENTS ********************************** +* 1.0.2: add logic for reg3. Sargan test +* 1.0.3: add prunelist to ensure that count of excluded exogeneous is correct +* 1.0.4: revise option to exog(), allow included exog to be specified as well +* 1.0.5: switch from reg3 to regress, many options and output changes +* 1.0.6: fixed treatment of nocons in Sargan and C-stat, and corrected problems +* relating to use of nocons combined with a constant as an IV +* 1.0.7: first option reports F-test of excluded exogenous; prunelist bug fix +* 1.0.8: dropped prunelist and switched to housekeeping of variable lists +* 1.0.9: added collinearity checks; C-stat calculated with recursive call; +* added ffirst option to report only F-test of excluded exogenous +* from 1st stage regressions +* 1.0.10: 1st stage regressions also report partial R2 of excluded exogenous +* 1.0.11: complete rewrite of collinearity approach - no longer uses calls to +* _rmcoll, does not track specific variables dropped; prunelist removed +* 1.0.12: reorganised display code and saved results to enable -replay()- +* 1.0.13: -robust- and -cluster- now imply -small- +* 1.0.14: fixed hascons bug; removed ivreg predict fn (it didn't work); allowed +* robust and cluster with z stats and correct dofs +* 1.0.15: implemented robust Sargan stat; changed to only F-stat, removed chi-sq; +* removed exog option (only orthog works) +* 1.0.16: added clusterised Sargan stat; robust Sargan handles collinearities; +* predict now works with standard SE options plus resids; fixed orthog() +* so it accepts time series operators etc. +* 1.0.17: fixed handling of weights. fw, aw, pw & iw all accepted. +* 1.0.18: fixed bug in robust Sargan code relating to time series variables. +* 1.0.19: fixed bugs in reporting ranks of X'X and Z'Z +* fixed bug in reporting presence of constant +* 1.0.20: added GMM option and replaced robust Sargan with (equivalent) J; +* added saved statistics of 1st stage regressions +* 1.0.21: added Cragg HOLS estimator, including allowing empty endog list; +* -regress- syntax now not allowed; revised code searching for "_cons" +* 1.0.22: modified cluster output message; fixed bug in replay for Sargan/Hansen stat; +* exactly identified Sargan/Hansen now exactly zero and p-value not saved as e(); +* cluster multiplier changed to 1 (from buggy multiplier), in keeping with +* eg Wooldridge 2002 p. 193. +* 1.0.23: fixed orthog option to prevent abort when restricted equation is underid. +* 1.0.24: fixed bug if 1st stage regressions yielded missing values for saving in e(). +* 1.0.25: Added Shea version of partial R2 +* 1.0.26: Replaced Shea algorithm with Godfrey algorithm +* 1.0.27: Main call to regress is OLS form if OLS or HOLS is specified; error variance +* in Sargan and C statistics use small-sample adjustment if -small- option is +* specified; dfn of S matrix now correctly divided by sample size +* 1.0.28: HAC covariance estimation implemented +* Symmetrize all matrices before calling syminv +* Added hack to catch F stats that ought to be missing but actually have a +* huge-but-not-missing value +* Fixed dof of F-stat - was using rank of ZZ, should have used rank of XX (couldn't use df_r +* because it isn't always saved. This is because saving df_r triggers small stats +* (t and F) even when -post- is called without dof() option, hence df_r saved only +* with -small- option and hence a separate saved macro Fdf2 is needed. +* Added rankS to saved macros +* Fixed trap for "no regressors specified" +* Added trap to catch gmm option with no excluded instruments +* Allow OLS syntax (no endog or excluded IVs specified) +* Fixed error messages and traps for rank-deficient robust cov matrix; includes +* singleton dummy possibility +* Capture error if posting estimated VCV that isn't pos def and report slightly +* more informative error message +* Checks 3 variable lists (endo, inexog, exexog) separately for collinearities +* Added AC (autocorrelation-consistent but conditionally-homoskedastic) option +* Sargan no longer has small-sample correction if -small- option +* robust, cluster, AC, HAC all passed on to first-stage F-stat +* bw must be < T +* 1.0.29 -orthog- also displays Hansen-Sargan of unrestricted equation +* Fixed collinearity check to include nocons as well as hascons +* Fixed small bug in Godfrey-Shea code - macros were global rather than local +* Fixed larger bug in Godfrey-Shea code - was using mixture of sigma-squares from IV and OLS +* with and without small-sample corrections +* Added liml and kclass +* 1.0.30 Changed order of insts macro to match saved matrices S and W +* 2.0.00 Collinearities no longer -qui- +* List of instruments tested in -orthog- option prettified +* 2.0.01 Fixed handling of nocons with no included exogenous, including LIML code +* 2.0.02 Allow C-test if unrestricted equation is just-identified. Implemented by +* saving Hansen-Sargan dof as = 0 in e() if just-identified. +* 2.0.03 Added score() option per latest revision to official ivreg +* 2.0.04 Changed score() option to pscore() per new official ivreg +* 2.0.05 Fixed est hold bug in first-stage regressions +* Fixed F-stat finite sample adjustment with cluster option to match official Stata +* Fixed F-stat so that it works with hascons (collinearity with constant is removed) +* Fixed bug in F-stat code - wasn't handling failed posting of vcv +* No longer allows/ignores nonsense options +* 2.0.06 Modified lsStop to sync with official ivreg 5.1.3 +* 2.0.07a Working version of CUE option +* Added sortpreserve, ivar and tvar options +* Fixed smalls bug in calculation of T for AC/HAC - wasn't using the last ob +* in QS kernel, and didn't take account of possible dropped observations +* 2.0.07b Fixed macro bug that truncated long varlists +* 2.0.07c Added dof option. +* Changed display of RMSE so that more digits are displayed (was %8.1g) +* Fixed small bug where cstat was local macro and should have been scalar +* Fixed bug where C stat failed with cluster. NB: wmatrix option and cluster are not compatible! +* 2.0.7d Fixed bug in dof option +* 2.1.0 Added first-stage identification, weak instruments, and redundancy stats +* 2.1.01 Tidying up cue option checks, reporting of cue in output header, etc. +* 2.1.02 Used Poskitt-Skeels (2002) result that C-D eval = cceval / (1-cceval) +* 2.1.03 Added saved lists of separate included and excluded exogenous IVs +* 2.1.04 Added Anderson-Rubin test of signif of endog regressors +* 2.1.05 Fix minor bugs relating to cluster and new first-stage stats +* 2.1.06 Fix bug in cue: capture estimates hold without corresponding capture on estimates unhold +* 2.1.07 Minor fix to ereturn local wexp, promote to version 8.2 +* 2.1.08 Added dofminus option, removed dof option. Added A-R test p-values to e(). +* Minor bug fix to A-R chi2 test - was N chi2, should have been N-L chi2. +* Changed output to remove potentially misleading refs to N-L etc. +* Bug fix to rhs count - sometimes regressors could have exact zero coeffs +* Bug fix related to cluster - if user omitted -robust-, orthog would use Sargan and not J +* Changed output of Shea R2 to make clearer that F and p-values do not refer to it +* Improved handling of collinearites to check across inexog, exexog and endo lists +* Total weight statement moved to follow summ command +* Added traps to catch errors if no room to save temporary estimations with _est hold +* Added -savefirst- option. Removed -hascons-, now synonymous with -nocons-. +* 2.1.09 Fixes to dof option with cluster so it no longer mimics incorrect areg behavior +* Local ivreg2_cmd to allow testing under name ivreg2 +* If wmatrix supplied, used (previously not used if non-robust sargan stat generated) +* Allowed OLS using (=) syntax (empty endo and exexog lists) +* Clarified error message when S matrix is not of full rank +* cdchi2p, ardf, ardf_r added to saved macros +* first and ffirst replay() options; DispFirst and DispFFirst separately codes 1st stage output +* Added savefprefix, macro with saved first-stage equation names. +* Added version option. +* Added check for duplicate variables to collinearity checks +* Rewrote/simplified Godfrey-Shea partial r2 code +* 2.1.10 Added NOOUTput option +* Fixed rf bug so that first does not trigger unnecessary saved rf +* Fixed cue bug - was not starting with robust 2-step gmm if robust/cluster +* 2.1.11 Dropped incorrect/misleading dofminus adjustments in first-stage output summary +* 2.1.12 Collinearity check now checks across inexog/exexog/endog simultaneously +* 2.1.13 Added check to catch failed first-stage regressions +* Fixed misleading failed C-stat message +* 2.1.14 Fixed mishandling of missing values in AC (non-robust) block +* 2.1.15 Fixed bug in RF - was ignoring weights +* Added -endog- option +* Save W matrix for all cases; ensured copy is posted with wmatrix option so original isn't zapped +* Fixed cue bug - with robust, was entering IV block and overwriting correct VCV +* 2.1.16 Added -fwl- option +* Saved S is now robust cov matrix of orthog conditions if robust, whereas W is possibly non-robust +* weighting matrix used by estmator. inv(S)=W if estimator is efficient GMM. +* Removed pscore option (dropped by official ivreg). +* Fixed bug where -post- would fail because of missing values in vcv +* Remove hascons as synonym for nocons +* OLS now outputs 2nd footer with variable lists +* 2.1.17 Reorganization of code +* Added ll() macro +* Fixed N bug where weights meant a non-integer ob count that was rounded down +* Fixed -fwl- option so it correctly handles weights (must include when partialling-out) +* smatrix option takes over from wmatrix option. Consistent treatment of both. +* Saved smatrix and wmatrix now differ in case of inefficient GMM. +* Added title() and subtitle() options. +* b0 option returns a value for the Sargan/J stat even if exactly id'd. +* (Useful for S-stat = value of GMM objective function.) +* HAC and AC now allowed with LIML and k-class. +* Collinearity improvements: bug fixed because collinearity was mistakenly checked across +* inexog/exexog/endog simultaneously; endog predicted exactly by IVs => reclassified as inexog; +* _rmcollright enforces inexog>endo>exexog priority for collinearities, if Stata 9.2 or later. +* K-class, LIML now report Sargan and J. C-stat based on Sargan/J. LIML reports AR if homosked. +* nb: can always easily get a C-stat for LIML based on diff of two AR stats. +* Always save Sargan-Hansen as e(j); also save as e(sargan) if homoskedastic. +* Added Stock-Watson robust SEs options sw() +* 2.1.18 Added Cragg-Donald-Stock-Yogo weak ID statistic critical values to main output +* Save exexog_ct, inexog_ct and endog_ct as macros +* Stock-Watson robust SEs now assume ivar is group variable +* Option -sw- is standard SW. Option -swpsd- is PSD version a la page 6 point 10. +* Added -noid- option. Suppresses all first-stage and identification statistics. +* Internal calls to ivreg2 use noid option. +* Added hyperlinks to ivreg2.hlp and helpfile argument to display routines to enable this. +* 2.1.19 Added matrix rearrangement and checks for smatrix and wmatrix options +* Recursive calls to cstat simplified - no matrix rearrangement or separate robust/nonrobust needed +* Reintroduced weak ID stats to ffirst output +* Added robust ID stats to ffirst output for case of single endogenous regressor +* Fixed obscure bug in reporting 1st stage partial r2 - would report zero if no included exogenous vars +* Removed "HOLS" in main output (misleading if, e.g., estimation is AC but not HAC) +* Removed "ML" in main output if no endogenous regressors - now all ML is labelled LIML +* model=gmm is now model=gmm2s; wmatrix estimation is model=gmm +* wmatrix relates to gmm estimator; smatrix relates to gmm var-cov matrix; b0 behavior equiv to wmatrix +* b0 option implies nooutput and noid options +* Added nocollin option to skip collinearity checks +* Fixed minor display bug in ffirst output for endog vars with varnames > 12 characters +* Fixed bug in saved rf and first-stage results for vars with long varnames; uses permname +* Fixed bug in model df - had counted RHS, now calculates rank(V) since latter may be rank-deficient +* Rank of V now saved as macro rankV +* fwl() now allows partialling-out of just constant with _cons +* Added Stock-Wright S statistic (but adds overhead - calls preserve) +* Properties now include svyj. +* Noted only: fwl bug doesn't allow time-series operators. +* 2.1.20 Fixed Stock-Wright S stat bug - didn't allow time-series operators +* 2.1.21 Fixed Stock-Wright S stat to allow for no exog regressors cases +* 2.1.22 Misc fixes. Fixed bug in AC with aweights; was weighting zi'zi but not ei'ei. +* Fixed bug in AC; need to clear variable vt1 at start of loop +* If iweights, Nprec (#obs with precision) rounded to nearest integer to mimic official Stata treatment diff --git a/110/replication_package/replication/ado/plus/i/ivreg28.hlp b/110/replication_package/replication/ado/plus/i/ivreg28.hlp new file mode 100644 index 0000000000000000000000000000000000000000..f94f9cfa4c5f218486aa9755da660d3c6341be5d --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg28.hlp @@ -0,0 +1,1232 @@ +{smcl} +{* 3feb2007}{...} +{hline} +help for {hi:ivreg28} +{hline} + +{title:Extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression} + +{p 4}Full syntax + +{p 8 14}{cmd:ivreg28} {it:depvar} [{it:varlist1}] +{cmd:(}{it:varlist2}{cmd:=}{it:varlist_iv}{cmd:)} [{it:weight}] +[{cmd:if} {it:exp}] [{cmd:in} {it:range}] +{bind:[{cmd:,} {cmd:gmm}} +{cmd:bw(}{it:#}{cmd:)} +{cmd:kernel(}{it:string}{cmd:)} +{cmd:liml} +{cmd:fuller(}{it:#}{cmd:)} +{cmd:kclass(}{it:#}{cmd:)} +{cmd:coviv} +{cmd:cue} +{cmd:cueinit}{cmd:(}{it:matrix}{cmd:)} +{cmdab:cueopt:ions}{cmd:(}{it:string}{cmd:)} +{cmdab:r:obust} +{cmdab:cl:uster}{cmd:(}{it:varname}{cmd:)} +{cmd:orthog(}{it:varlist_ex}{cmd:)} +{cmd:endog(}{it:varlist_en}{cmd:)} +{cmdab:red:undant(}{it:varlist_ex}{cmd:)} +{cmd:fwl(}{it:varlist}{cmd:)} +{cmdab:sm:all} +{cmdab:noc:onstant} {cmdab:h}ascons} +{cmd:first} {cmd:ffirst} {cmd:noid} {cmd:savefirst} {cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:rf} {cmd:saverf} {cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:nocollin} {cmd:noid} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{bind:{cmd:plus} ]} + +{p 4}Replay syntax + +{p 8 14}{cmd:ivreg28} +{bind:[{cmd:,} {cmd:first}} +{cmd:ffirst} {cmd:rf} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{cmd:plus} ]} + +{p 4}Version syntax + +{p 8 14}{cmd:ivreg28}, {cmd:version} + +{p}{cmd:ivreg28} may be used with time-series or panel data, +in which case the data must be {cmd:tsset} +before using {cmd:ivreg28}; see help {help tsset}. + +{p}All {it:varlists} may contain time-series operators; +see help {help varlist}. + +{p}{cmd:by}, {cmd:rolling}, {cmd:statsby}, {cmd:xi}, +{cmd:bootstrap} and {cmd:jackknife} are allowed; see help {help prefix}. + +{p}{cmd:aweight}s, {cmd:fweight}s, {cmd:iweight}s and {cmd:pweight}s +are allowed; see help {help weights}. + +{p}The syntax of {help predict} following {cmd:ivreg28} is + +{p 8 16}{cmd:predict} [{it:type}] {it:newvarname} [{cmd:if} {it:exp}] +[{cmd:in} {it:range}] [{cmd:,} {it:statistic}] + +{p}where {it:statistic} is + +{p 8 23}{cmd:xb}{space 11}fitted values; the default{p_end} +{p 8 23}{cmdab:r:esiduals}{space 4}residuals{p_end} +{p 8 23}{cmd:stdp}{space 9}standard error of the prediction{p_end} + +{p}These statistics are available both in and out of sample; +type "{cmd:predict} {it:...} {cmd:if e(sample)} {it:...}" +if wanted only for the estimation sample. + +{title:Contents} +{p 2}{help ivreg28##s_description:Description}{p_end} +{p 2}{help ivreg28##s_robust:Calculation of robust, AC, HAC standard errors}{p_end} +{p 2}{help ivreg28##s_gmm:GMM estimation}{p_end} +{p 2}{help ivreg28##s_liml:LIML, k-class and GMM-CUE estimation}{p_end} +{p 2}{help ivreg28##s_sumopt:Summary of robust, HAC, AC, GMM, LIML and CUE options}{p_end} +{p 2}{help ivreg28##s_overid:Testing overidentifying restrictions}{p_end} +{p 2}{help ivreg28##s_endog:Testing subsets of regressors and instruments for endogeneity}{p_end} +{p 2}{help ivreg28##s_relevance:Tests of under- and weak identification and instrument redundancy}{p_end} +{p 2}{help ivreg28##s_first:First stage regressions, identification, and weak-id-robust inference}{p_end} +{p 2}{help ivreg28##s_rf:Reduced form estimates}{p_end} +{p 2}{help ivreg28##s_fwl:Estimating the Frisch-Waugh-Lovell regression}{p_end} +{p 2}{help ivreg28##s_ols:OLS and Heteroskedastic OLS (HOLS) estimation}{p_end} +{p 2}{help ivreg28##s_collin:Collinearities}{p_end} +{p 2}{help ivreg28##s_speed:Speed options: nocollin and noid}{p_end} +{p 2}{help ivreg28##s_small:Small sample corrections}{p_end} +{p 2}{help ivreg28##s_options:Options summary}{p_end} +{p 2}{help ivreg28##s_macros:Remarks and saved results}{p_end} +{p 2}{help ivreg28##s_examples:Examples}{p_end} +{p 2}{help ivreg28##s_refs:References}{p_end} +{p 2}{help ivreg28##s_acknow:Acknowledgements}{p_end} +{p 2}{help ivreg28##s_citation:Authors}{p_end} +{p 2}{help ivreg28##s_citation:Citation of ivreg28}{p_end} + +{marker s_description}{title:Description} + +{p}{cmd:ivreg28} implements a range of single-equation estimation methods +for the linear regression model: OLS, instrumental +variables (IV, also known as two-stage least squares, 2SLS), +the generalized method of moments (GMM), +limited-information maximum likelihood (LIML), and k-class estimators. +In the language of IV/GMM, {it:varlist1} are the exogenous +regressors or "included instruments", +{it:varlist_iv} are the exogenous variables excluded +from the regression or "excluded instruments", +and {it:varlist2} the endogenous regressors that are being "instrumented". + +{p}{cmd:ivreg28} will also estimate linear regression models using +robust (heteroskedastic-consistent), +autocorrelation-consistent (AC) and +heteroskedastic and autocorrelation-consistent (HAC) variance estimates. + +{p}{cmd:ivreg28} provides extensions to Stata's official {cmd:ivreg} +and {cmd:newey}. +{cmd:ivreg28} supports the same command syntax as official {cmd:ivreg} +and (almost) all of its options. +The main extensions available are as follows: +two-step feasible GMM estimation ({cmd:gmm} option) +and continuously-updated GMM estimation ({cmd:cue} option); +LIML and k-class estimation; +automatic output overidentification and underidentification test statistics; +C statistic test of exogeneity of subsets of instruments +({cmd:orthog()} option); +endogeneity tests of endogenous regressors +({cmd:endog()} option); +test of instrument redundancy +({cmd:redundant()} option); +kernel-based autocorrelation-consistent (AC) +and heteroskedastic and autocorrelation consistent (HAC) standard errors +and covariance estimation ({cmd:bw(}{it:#}{cmd:)} option), +with user-specified choice of kernel ({cmd:kernel()} option); +default reporting of large-sample statistics +(z and chi-squared rather than t and F); +{cmd:small} option to report small-sample statistics; +first-stage regressions reported with various tests and statistics for +identification and instrument relevance; +{cmd:ffirst} option to report only these identification statistics +and not the first-stage regression results themselves; +{cmd:nofooter} option to suppress footer of regression output. +{cmd:ivreg28} can also be used for ordinary least squares (OLS) estimation +using the same command syntax as official {cmd:regress} and {cmd:newey}. + +{marker s_robust}{dlgtab:Calculation of robust, AC, HAC standard errors} + +{p}The standard errors reported by {cmd:ivreg28} can be made consistent +in the presence of a variety of violations of the assumption of i.i.d. errors: +{bind:(1) {cmd:robust}} causes {cmd:ivreg28} to report standard errors that are +robust to the presence of arbitrary heteroskedasticity; +{bind:(2) {cmd:cluster}} standard errors are robust to both +arbitrary heteroskedasticity and arbitrary intra-group correlation; +{bind:(3) {cmd:bw(}{it:#}{cmd:)}} requests AC standard errors that are +robust to arbitrary autocorrelation; +{bind:(4) {cmd:bw(}{it:#}{cmd:)}} combined with {cmd:robust} +requests HAC standard errors that are +robust to both arbitrary heteroskedasticity and arbitrary autocorrelation. + +{p}{cmd:ivreg28} allows a variety of options for kernel-based HAC and AC estimation. +The {cmd:bw(}{it:#}{cmd:)} option sets the bandwidth used in the estimation +and {cmd:kernel(}{it:string}{cmd:)} is the kernel used; +the default kernel is the Bartlett kernel, +also known in econometrics as Newey-West (see help {help newey}). +{cmd:ivreg28} can also be used for kernel-based estimation +with panel data, i.e., a cross-section of time series. +Before using {cmd:ivreg28} for kernel-based estimation +of time series or panel data, +the data must be {cmd:tsset}; see help {help tsset}. + +{marker s_gmm}{dlgtab:GMM estimation} + +{p}When combined with the above options, the {cmd:gmm} option generates +efficient estimates of the coefficients as well as consistent +estimates of the standard errors. +The {cmd:gmm} option implements the two-step efficient +generalized method of moments (GMM) estimator. +The efficient GMM estimator minimizes the GMM criterion function +J=N*g'*W*g, where N is the sample size, +g are the orthogonality or moment conditions +(specifying that all the exogenous variables, or instruments, +in the equation are uncorrelated with the error term) +and W is a weighting matrix. +In two-step efficient GMM, the efficient or optimal weighting matrix +is the inverse of an estimate of the covariance matrix of orthogonality conditions. +The efficiency gains of this estimator relative to the +traditional IV/2SLS estimator derive from the use of the optimal +weighting matrix, the overidentifying restrictions of the model, +and the relaxation of the i.i.d. assumption. +For an exactly-identified model, +the efficient GMM and traditional IV/2SLS estimators coincide, +and under the assumptions of conditional homoskedasticity and independence, +the efficient GMM estimator is the traditional IV/2SLS estimator. +For further details, see Hayashi (2000), pp. 206-13, and 226-27. + +{p}The efficient GMM estimators available with {cmd:gmm} correspond +to the above choices for consistent standard errors: +{bind:(1) used} on its own, {cmd:gmm} causes {cmd:ivreg28} to report +coefficient estimates that are efficient in presence of arbitrary heteroskedasticity; +{bind:(2) {cmd:gmm}} combined with {cmd:cluster} +generates coefficient estimates that are efficient in the presence of +arbitrary heteroskedasticity and arbitrary intra-group group correlation; +{bind:(3) {cmd:gmm}} plus {cmd:bw(}{it:#}{cmd:)} requests coefficient estimates that are +efficient in the presence of arbitrary autocorrelation; +{bind:(4) {cmd:gmm}} plus {cmd:bw(}{it:#}{cmd:)} and {cmd:robust} +generates coefficient estimates that are efficient in the presence of +both arbitrary heteroskedasticity and arbitrary autocorrelation. + +{marker s_liml}{dlgtab:LIML, k-class and GMM-CUE estimation} + +{marker liml}{p} Maximum-likelihood estimation of a single equation of this form +(endogenous RHS variables and excluded instruments) +is known as limited-information maximum likelihood or LIML. +The overidentifying restrictions test +reported after LIML estimation is the Anderson-Rubin (1950) overidentification +statistic in a homoskedastic context. +LIML, OLS and IV/2SLS are examples of k-class estimators. +LIML is a k-class estimator with k=the LIML eigenvalue lambda; +2SLS is a k-class estimator with k=1; +OLS is a k-class esimator with k=0. +Estimators based on other values of k have been proposed. +Fuller's modified LIML (available with the {cmd:fuller(}{it:#}{cmd:)} option) +sets k = lambda - alpha/(N-L), where lambda is the LIML eigenvalue, +L = number of instruments (included and excluded), +and the Fuller parameter alpha is a user-specified positive constant. +Nagar's bias-adjusted 2SLS estimator can be obtained with the +{cmd:kclass(}{it:#}{cmd:)} option by setting +k = 1 + (L-K)/N, where L-K = number of overidentifying restrictions +and N = the sample size. +For a discussion of LIML and k-class estimators, +see Davidson and MacKinnon (1993, pp. 644-51). + +{p} The GMM generalization of the LIML estimator +to the case of possibly heteroskedastic +and autocorrelated disturbances +is the "continuously-updated" GMM estimator or CUE +of Hansen, Heaton and Yaron (1996). +The CUE estimator directly maximizes the GMM objective function +J=N*g'*W(b_cue)*g, where W(b_cue) is an optimal weighting matrix +that depends on the estimated coefficients b_cue. +{cmd:cue} combined with {cmd:robust}, {cmd:cluster}, and/or {cmd:bw}, +generates coefficient estimates that are efficient in the presence +of the corresponding deviations from homoskedasticity. +Specifying {cmd:cue} with no other options +is equivalent to the combination of the options {cmd:liml} and {cmd:coviv}. +The CUE estimator requires numerical optimization methods, +and the implementation here uses Stata's {cmd:ml} routine. +The starting values are either IV or two-step efficient GMM +coefficient estimates; +these can be overridden with the {cmd:cueinit} option, +which takes the matrix of starting values b as its argument. +{cmd:cueoptions} passes options to Stata's {cmd:ml}; see help {help ml}. +Estimation with the {cmd:cue} option can be slow and problematic, +and it should be used with caution. + +{marker s_sumopt}{dlgtab:Summary of robust, HAC, AC, GMM, LIML and CUE options} + +{p}To summarize the robust, HAC, AC, GMM, LIML and CUE options: + +{p 4}{cmd:robust} => heteroskedastic-robust SEs{p_end} +{p 4}{cmd:gmm} => heteroskedastic-efficient two-step GMM estimator{p_end} +{p 4}{cmd:robust}+{cmd:gmm} => same as {cmd:gmm}{p_end} +{p 4}{cmd:bw} => autocorrelation-robust SEs{p_end} +{p 4}{cmd:bw}+{cmd:robust} => heteroskedastic and autocorrelation-robust SEs{p_end} +{p 4}{cmd:bw}+{cmd:gmm} => autocorrelation-efficient two-step GMM estimator{p_end} +{p 4}{cmd:bw}+{cmd:robust}+{cmd:gmm} => heteroskedastic and autocorrelation-efficient two-step GMM estimator{p_end} +{p 4}{cmd:liml} => LIML estimation with non-robust SEs{p_end} +{p 4}{cmd:liml}+{cmd:coviv} => LIML estimation with alternative non-robust SEs{p_end} +{p 4}{cmd:liml}+{cmd:robust} => LIML estimation with heteroskedastic-robust SEs{p_end} +{p 4}{cmd:cue} => same as {cmd:liml}+{cmd:coviv}{p_end} +{p 4}{cmd:cue}+{cmd:robust} => heteroskedastic-efficient continuously-updated GMM estimator{p_end} +{p 4}{cmd:cue}+{cmd:bw} => autocorrelation-efficient continuously-updated GMM estimator{p_end} +{p 4 8}{cmd:cue}+{cmd:bw}+{cmd:robust} => heteroskedastic and autocorrelation-efficient continuously updated GMM estimator{p_end} + +{p}For further details, see Hayashi (2000), pp. 206-13 and 226-27 +(on GMM estimation), Wooldridge (2002), p. 193 (on cluster-robust GMM), +and Hayashi (2000), pp. 406-10 or Cushing and McGarvey (1999) +(on kernel-based covariance estimation). + +{marker s_overid}{marker overidtests}{dlgtab:Testing overidentifying restrictions} + +{p}The Sargan-Hansen test is a test of overidentifying restrictions. +The joint null hypothesis is that the instruments are valid +instruments, i.e., uncorrelated with the error term, +and that the excluded instruments are correctly excluded from the estimated equation. +Under the null, the test statistic is distributed as chi-squared +in the number of overidentifying restrictions. +A rejection casts doubt on the validity of the instruments. +For the efficient GMM estimator, the test statistic is +Hansen's J statistic, the minimized value of the GMM criterion function. +For the 2SLS estimator, the test statistic is Sargan's statistic, +typically calculated as N*R-squared from a regression of the IV residuals +on the full set of instruments. +Under the assumption of conditional homoskedasticity, +Hansen's J statistic becomes Sargan's statistic. +The J statistic is consistent in the presence of heteroskedasticity +and (for HAC-consistent estimation) autocorrelation; +Sargan's statistic is consistent if the disturbance is homoskedastic +and (for AC-consistent estimation) if it is also autocorrelated. +With {cmd:gmm}, {cmd:robust} and/or {cmd:cluster}, +Hansen's J statistic is reported. +In the latter case the statistic allows observations +to be correlated within groups. +For further discussion see e.g. Hayashi (2000, pp. 227-8, 407, 417). + +{p}The Sargan statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg28} by the command {cmd:overid}. +The features of {cmd:ivreg28} that are unavailable in {cmd:overid} +are the J statistic and the C statistic; +the {cmd:overid} options unavailable in {cmd:ivreg28} +are various small-sample and pseudo-F versions of Sargan's statistic +and its close relative, Basmann's statistic. +See help {help overid} (if installed). + +{marker s_endog}{dlgtab:Testing subsets of regressors and instruments for endogeneity} + +{marker ctest}{p}The C statistic +(also known as a "GMM distance" +or "difference-in-Sargan" statistic) +implemented using the {cmd:orthog} option, +allows a test of a subset of the orthogonality conditions, i.e., +it is a test of the exogeneity of one or more instruments. +It is defined as +the difference of the Sargan-Hansen statistic +of the equation with the smaller set of instruments +(valid under both the null and alternative hypotheses) +and the equation with the full set of instruments, +i.e., including the instruments whose validity is suspect. +Under the null hypothesis that +both the smaller set of instruments +and the additional, suspect instruments are valid, +the C statistic is distributed as chi-squared +in the number of instruments tested. +Note that failure to reject the null hypothesis +requires that the full set of orthogonality conditions be valid; +the C statistic and the Sargan-Hansen test statistics +for the equations with both the smaller and full set of instruments +should all be small. +The instruments tested may be either excluded or included exogenous variables. +If excluded exogenous variables are being tested, +the equation that does not use these orthogonality conditions +omits the suspect instruments from the excluded instruments. +If included exogenous variables are being tested, +the equation that does not use these orthogonality conditions +treats the suspect instruments as included endogenous variables. +To guarantee that the C statistic is non-negative in finite samples, +the estimated covariance matrix of the full set orthogonality conditions +is used to calculate both Sargan-Hansen statistics +(in the case of simple IV/2SLS, this amounts to using the MSE +from the unrestricted equation to calculate both Sargan statistics). +If estimation is by LIML, the C statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For further discussion, see Hayashi (2000), pp. 218-22 and pp. 232-34. + +{marker endogtest}{p}Endogeneity tests of one or more endogenous regressors +can implemented using the {cmd:endog} option. +Under the null hypothesis that the specified endogenous regressors +can actually be treated as exogenous, the test statistic is distributed +as chi-squared with degrees of freedom equal to the number of regressors tested. +The endogeneity test implemented by {cmd:ivreg28}, is, like the C statistic, +defined as the difference of two Sargan-Hansen statistics: +one for the equation with the smaller set of instruments, +where the suspect regressor(s) are treated as endogenous, +and one for the equation with the larger set of instruments, +where the suspect regressors are treated as exogenous. +Also like the C statistic, the estimated covariance matrix used +guarantees a non-negative test statistic. +Under conditional homoskedasticity, +this endogeneity test statistic is numerically equal to +a Hausman test statistic; see Hayashi (2000, pp. 233-34). +The endogeneity test statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg28} by the command {cmd:ivendog}. +Unlike the Durbin-Wu-Hausman tests reported by {cmd:ivendog}, +the {cmd:endog} option of {cmd:ivreg28} can report test statistics +that are robust to various violations of conditional homoskedasticity; +the {cmd:ivendog} option unavailable in {cmd:ivreg28} +is the Wu-Hausman F-test version of the endogeneity test. +See help {help ivendog} (if installed). + +{marker s_relevance}{dlgtab:Tests of under- and weak identification and instrument redundancy} + +{marker cancortest}{p}{cmd:ivreg28} automatically reports tests of +both underidentification and weak identification. +The Anderson (1984) canonical correlations test is +a likelihood-ratio test of whether the equation is identified, +i.e., that the excluded instruments are "relevant", +meaning correlated with the endogenous regressors. +The null hypothesis of the test is that +the matrix of reduced form coefficients has rank=K-1 +where K=number of regressors, +i.e, that the equation is underidentified. +Under the null of underidentification, +the statistic is distributed as chi-squared +with degrees of freedom=(L-K+1) +where L=number of instruments (included+excluded). +A rejection of the null indicates that the model is identified. +{it:Important}: a result of rejection of the null +should be treated with caution, +because weak instrument problems may still be present. +See Hall et al. (1996) for a discussion of this test, +and below for discussion of testing for the presence of weak instruments. +Note: the Anderson canonical correlations test assumes the regressors are distributed +as multivariate normal. + +{marker cdtest}{p}The test for weak identification automatically reported +by {cmd:ivreg28} is based on the Cragg-Donald (1993) F statistic, +a close relative of the Anderson canonical correlations statistic. +Denoting the minimum eigenvalue of the canonical correlations as CCEV +and the minimum eigenvalue of the Cragg-Donald statistic as CDEV, +CDEV=CCEV/(1-CCEV), +the Anderson LR test statistic is -N*ln(1-CCEV) and +the Cragg-Donald F statistic is CDEV*(N-L)/L2, +where L is the number of instruments and L2 is the number of excluded instruments. +"Weak identification" arises when the excluded instruments are correlated +with the endogeous regressors, but only weakly. +Estimators can perform poorly when instruments are weak, +and different estimators are more robust to weak instruments (e.g., LIML) +than others (e.g., IV); +see, e.g., Stock and Yogo (2002, 2005) for further discussion. +Stock and Yogo (2005) have compiled critical values +for the Cragg-Donald F statistic for +several different estimators (IV, LIML, Fuller-LIML), +several different definitions of "perform poorly" (based on bias and test size), +and a range of configurations (up to 100 excluded instruments +and up to 2 or 3 endogenous regressors, +depending on the estimator). +{cmd:ivreg28} will report the Stock-Yogo critical values +if these are available; +missing values mean that the critical values +haven't been tabulated or aren't applicable. +See Stock and Yogo (2002, 2005) for details. +The critical values reported by {cmd:ivreg28} for +(2-step) GMM are the IV critical values, +and the critical values reported for CUE are the LIML +critical values. +Note that the test statistic and the critical values +assume conditional homoskedasticity and independence. +In the special case of a single endogenous regressor, +a robust test statistic for weak instruments is available +with the {cmd:first} or {cmd:ffirst} options; +see below under {help ivreg28##s_first:First stage regressions}. + +{marker redtest}{p}The {cmd:redundant} option allows a test of +whether a subset of excluded instruments is "redundant". +Excluded instruments are redundant if the asymptotic efficiency +of the estimation is not improved by using them. +The test statistic is a likelihood-ratio test +based on the canonical correlations +between the regressors and the instruments with, +and without, the instruments being tested. +Under the null that the specified instruments are redundant, +the statistic is distributed as chi-squared +with degrees of freedom=(#endogenous regressors)*(#instruments tested). +Rejection of the null indicates that +the instruments are not redundant. +See Hall and Peixe (2000) for further discussion of this test. +Note: this test assumes the regressors are distributed +as multivariate normal. + +{p}Calculation and reporting of all underidentification +and weak identification statistics +can be supressed with the {cmd:noid} option. + +{marker s_first}{dlgtab:First stage regressions, identification, and weak-id-robust inference} + +{marker partialr2}{p}The {cmd:first} and {cmd:ffirst} options report +various first-stage results and identification statistics. +Both the Anderson canonical correlations likelihood-ratio test statistic +-N*ln(1-EV) +and its close relative, +the chi-squared version of the Cragg-Donald (1993) test statistic +N*(EV/(1-EV)), are reported; +both are tests of whether the equation is identified (see {help ivreg28##s_relevance:above}). +The first-stage results also include Shea's (1997) "partial R-squared" measure +of instrument relevance that takes +intercorrelations among instruments into account, +the more common form of "partial R-squared" +(a.k.a. the "squared partial correlation" between the excluded +instruments and the endogenous regressor in question), +and the F-test of the excluded instruments +in the corresponding first-stage regression. +When the model has only one endogenous regressor, +(a) the two measures of "partial R-squared" coincide; +(b) the F-stat form of the Cragg-Donald statistic +coincides with the (non-robust) first-stage F-test +of the excluded instruments. +The two partial R-squared measures, the F statistic, +the degrees of freedom of the F statistic, +and the p-value of the F statistic for each endogenous variable +are saved in the matrix e(first). +The first-stage results are always reported with small-sample statistics, +to be consistent with the recommended use of the first-stage F-test as a diagnostic. +If the estimated equation is reported with robust standard errors, +the first-stage F-test is also robust. +Note that in the special case of only one endogenous regressor, +this provides a robust test of weak or underidentification. + +{marker wirobust}{p}The first-stage output also includes +two statistics that provide weak-instrument robust inference +for testing the significance of the endogenous regressors +in the structural equation being estimated. +The first statistic is the Anderson-Rubin (1949) test +(not to be confused with the Anderson-Rubin overidentification test for LIML estimation; +see {help ivreg28##s_liml:above}). +The second is the closely related Stock-Wright (2000) S statistic. +The null hypothesis tested in both cases +is that the coefficients of the endogenous +regressors in the structural equation are jointly equal to zero, +and, in addition, +that the overidentifying restrictions are valid. +Both tests are robust to the presence of weak instruments. +The tests are equivalent to estimating +the reduced form of the equation +(with the full set of instruments as regressors) +and testing that the coefficients of the excluded instruments +are jointly equal to zero. +In the form reported by {cmd:ivreg28}, +the Anderson-Rubin statistic is a Wald test +and the Stock-Watson statistic is a GMM-distance test. +Both statistics are distributed as chi-squared +with L2 degrees of freedom, where L2=number of excluded instruments. +The traditional F-stat version of the Anderson-Rubin test is also reported. +See Stock and Watson (2000), Dufour (2003), Chernozhukov and Hansen (2005) and Kleibergen (2007) +for further discussion. +For related alternative test statistics that are also robust to weak instruments, +see {help condivreg} and the corresponding discussion +in Moreira and Poi (2003) and Mikusheva and Poi (2006). + +{p}The {cmd:savefirst} option requests that +the individual first-stage regressions are saved +for later access using the {cmd:estimates} command. +If saved, they can also be displayed using +{cmd:first} or {cmd:ffirst} and the {cmd:ivreg28} replay syntax. +The regressions are saved with the prefix "_ivreg28_", +unless the user specifies an alternative prefix with the +{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} +option. + +{marker s_rf}{dlgtab:Reduced form estimates} + +{p}The {cmd:rf} option requests that +the reduced form estimation of the equation be displayed. +The {cmd:saverf} option requests that +the reduced form estimation is saved +for later access using the {cmd:estimates} command. +If saved, it can also be displayed using the +{cmd:rf} and the {cmd:ivreg28} replay syntax. +The regression is saved with the prefix "_ivreg28_", +unless the user specifies an alternative prefix with the +{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} +option. + +{marker s_fwl}{dlgtab:Estimating the Frisch-Waugh-Lovell regression} + +{marker fwl}{p}The {cmd:fwl(}{it:varlist}{cmd:)} option requests that +the exogenous regressors in {it:varlist} are "partialled out" +from all the other variables +(other regressors and excluded instruments) in the estimation. +If the equation includes a constant, +it is also automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} +are not calculated. +By the Frisch-Waugh-Lovell (FWL) theorem, +the coefficients for the remaining regressors +are the same as those that would be obtained +if the variables were not partialled out. +The {cmd:fwl} option is most useful when using {cmd:cluster} +and #clusters < (#exogenous regressors + #excluded instruments). +In these circumstances, +the covariance matrix of orthogonality conditions S is not of full rank, +and efficient GMM and overidentification tests are infeasible +since the optimal weighting matrix W = {bind:S^-1} +cannot be calculated. +The problem can be addressed by using {cmd:fwl} +to partial out enough exogenous regressors for S to have full rank. +A similar problem arises when the regressors include a variable that is a singleton dummy, +i.e., a variable with one 1 and N-1 zeros or vice versa, +if a robust covariance matrix is requested. +The singleton dummy causes the robust covariance matrix estimator to be +less than full rank. +In this case, partialling-out the variable with the singleton dummy +solves the problem. +Specifying {cmd:fwl(_cons)} will cause just the constant to be partialled-out, +i.e., the equation will be estimated in deviations-from-means form. +Note that variable counts are not adjusted for the partialled-out variables. +This means that the model degrees of freedom +do not include the partialled-out variables, +and any small-sample statistics such as t or F statistics +will be affected. +Also note that after estimation using the {cmd:fwl} option, +the post-estimation {cmd:predict} can be used only to generate residuals, +and that in the current implementation, +{cmd:fwl} is not compatible with instruments (included or excluded) +that use time-series operators. + +{marker s_ols}{dlgtab:OLS and Heteroskedastic OLS (HOLS) estimation} + +{p}{cmd:ivreg28} also allows straightforward OLS estimation +by using the same syntax as {cmd:regress}, i.e., +{it:ivreg28 depvar varlist1}. +This can be useful if the user wishes to use one of the +features of {cmd:ivreg28} in OLS regression, e.g., AC or +HAC standard errors. + +{p}If the list of endogenous variables {it:varlist2} is empty +but the list of excluded instruments {it:varlist_iv} is not, +and the option {cmd:gmm} is specified, +{cmd:ivreg28} calculates Cragg's "heteroskedastic OLS" (HOLS) estimator, +an estimator that is more efficient than OLS +in the presence of heteroskedasticity of unknown form +(see Davidson and MacKinnon (1993), pp. 599-600). +If the option {cmd:bw(}{it:#}{cmd:)} is specified, +the HOLS estimator is efficient in the presence of +arbitrary autocorrelation; +if both {cmd:bw(}{it:#}{cmd:)} and {cmd:robust} are specified +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and autocorrelation; +and if {cmd:cluster(}{it:varname}{cmd:)} is used, +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and within-group correlation. +The efficiency gains of HOLS derive from the orthogonality conditions +of the excluded instruments listed in {it:varlist_iv}. +If no endogenous variables are specified and {cmd:gmm} is not specified, +{cmd:ivreg28} reports standard OLS coefficients. +The Sargan-Hansen statistic reported +when the list of endogenous variables {it:varlist2} is empty +is a Lagrange multiplier (LM) test +of the hypothesis that the excluded instruments {it:varlist_iv} are +correctly excluded from the restricted model. +If the estimation is LIML, the LM statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For more on LM tests, see e.g. Wooldridge (2002), pp. 58-60. +Note that because the approach of the HOLS estimator +has applications beyond heteroskedastic disturbances, +and to avoid confusion concerning the robustness of the estimates, +the estimators presented above as "HOLS" +are described in the output of {cmd:ivreg28} +as "2-Step GMM", "CUE", etc., as appropriate. + +{marker s_collin}{dlgtab:Collinearities} + +{p}{cmd:ivreg28} checks the lists of included instruments, +excluded instruments, and endogenous regressors +for collinearities and duplicates. If an endogenous regressor is +collinear with the instruments, it is reclassified as exogenous. If any +endogenous regressors are collinear with each other, some are dropped. +If there are any collinearities among the instruments, some are dropped. +In Stata 9+, excluded instruments are dropped before included instruments. +If any variables are dropped, a list of their names are saved +in the macros {cmd:e(collin)} and/or {cmd:e(dups)}. +Lists of the included and excluded instruments +and the endogenous regressors with collinear variables and duplicates removed +are also saved in macros with "1" appended +to the corresponding macro names. + +{p}Collinearity checks can be supressed with the {cmd:nocollin} option. + +{marker s_speed}{dlgtab:Speed options: nocollin and noid} + +{p}Two options are available for speeding execution. +{cmd:nocollin} specifies that the collinearity checks not be performed. +{cmd:noid} suspends calculation and reporting of +the underidentification and weak identification statistics +in the main output. + +{marker s_small}{dlgtab:Small sample corrections} + +{p}Mean square error = sqrt(RSS/(N-K)) if {cmd:small}, = sqrt(RSS/N) otherwise. + +{p}If {cmd:robust} is chosen, the finite sample adjustment +(see {hi:[R] regress}) to the robust variance-covariance matrix +qc = N/(N-K) if {cmd:small}, qc = 1 otherwise. + +{p}If {cmd:cluster} is chosen, the finite sample adjustment +qc = (N-1)/(N-K)*M/(M-1) if {cmd:small}, where M=number of clusters, +qc = 1 otherwise. + +{p}The Sargan and C (difference-in-Sargan) statistics use +error variance = RSS/N, i.e., there is no small sample correction. + +{p}A full discussion of these computations and related topics +can be found in Baum, Schaffer, and Stillman (2003) and Baum, Schaffer and +Stillman (2007). Some features of the program postdate the 2003 article. + + +{marker s_options}{title:Options summary} + +{p 0 4}{cmd:gmm} requests the two-step efficient GMM estimator. +If no endogenous variables are specified, the estimator is Cragg's HOLS estimator. +See help {help ivgmm0} (if installed) for more details. + +{p 0 4}{cmd:bw(}{it:#}{cmd:)} impements AC or HAC covariance estimation +with bandwidth equal to {it:#}, where {it:#} is an integer greater than zero. +Specifying {cmd:robust} implements HAC covariance estimation; +omitting it implements AC covariance estimation. + +{p 0 4}{cmd:kernel(}{it:string)}{cmd:)} specifies the kernel +to be used for AC and HAC covariance estimation; +the default kernel is Bartlett (also known in econometrics +as Newey-West). Other kernels available are (abbreviations in parentheses): +Truncated (tru); Parzen (par); Tukey-Hanning (thann); Tukey-Hamming (thamm); +Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs). + +{p 4 4}Note: in the cases of the Bartlett, Parzen, +and Tukey-Hanning/Hamming kernels, the number of lags used +to construct the kernel estimate equals the bandwidth minus one. +Stata's official {cmd:newey} implements +HAC standard errors based on the Bartlett kernel, +and requires the user to specify +the maximum number of lags used and not the bandwidth; +see help {help newey}. +If these kernels are used with {cmd:bw(1)}, +no lags are used and {cmd:ivreg28} will report the usual +Eicker/Huber/White/sandwich variance estimates. + +{p 0 4}{cmd:liml} requests the limited-information maximum likelihood estimator. + +{p 0 4}{cmd:fuller(}{it:#}{cmd:)} specifies that Fuller's modified LIML estimator +is calculated using the user-supplied Fuller parameter alpha, +a non-negative number. +Alpha=1 has been suggested as a good choice. + +{p 0 4}{cmd:kclass(}{it:#}{cmd:)} specifies that a general k-class estimator is calculated +using the user-supplied #, a non-negative number. + +{p 0 4}{cmd:coviv} specifies that the matrix used to calculate the +covariance matrix for the LIML or k-class estimator +is based on the 2SLS matrix, i.e., with k=1. +In this case the covariance matrix will differ from that calculated for the 2SLS +estimator only because the estimate of the error variance will differ. +The default is for the covariance matrix to be based on the LIML or k-class matrix. + +{p 0 4}{cmd:cue} requests the GMM continuously-updated estimator (CUE). + +{p 0 4}{cmd:cueinit(}{it:matrix}{cmd:)} specifies that the starting values +for the CUE estimator use those in a user-supplied matrix b. +If omitted, the default behavior is to use starting values +from IV or 2-step efficient GMM estimation. + +{p 0 4}{cmd:cueopt(}{it:string}{cmd:)} passes user-specified options +to Stata's {cmd:ml} routine; see help {help ml}. + +{p 0 4}{cmd:robust} specifies that the Eicker/Huber/White/sandwich estimator of +variance is to be used in place of the traditional calculation. {cmd:robust} +combined with {cmd:cluster()} further allows residuals which are not +independent within cluster (although they must be independent between +clusters). See {hi:[U] Obtaining robust variance estimates}. + +{p 0 4}{cmd:cluster}{cmd:(}{it:varname}{cmd:)} specifies that the observations +are independent across groups (clusters) but not necessarily independent +within groups. {it:varname} specifies to which group each observation +belongs; e.g., {cmd:cluster(personid)} in data with repeated observations on +individuals. {cmd:cluster()} can be used with {help pweight}s to produce +estimates for unstratified cluster-sampled data, but see help {help svyreg} +for a command especially designed for survey data. Specifying {cmd:cluster()} +implies {cmd:robust}. + +{p 0 4}{cmd:orthog}{cmd:(}{it:varlist_ex}{cmd:)} requests that a C-statistic +be calculated as a test of the exogeneity of the instruments in {it:varlist_ex}. +These may be either included or excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instruments must still be identified. + +{p 0 4}{cmd:endog}{cmd:(}{it:varlist_en}{cmd:)} requests that a C-statistic +be calculated as a test of the endogeneity +of the endogenous regressors in {it:varlist_en}. + +{p 0 4}{cmd:redundant}{cmd:(}{it:varlist_ex}{cmd:)} requests a likelihood-ratio test +of the redundancy of the instruments in {it:varlist_ex}. +These must be excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instrumenst must still be identified. + +{p 0 4}{cmd:small} requests that small-sample statistics (F and t-statistics) +be reported instead of large-sample statistics (chi-squared and z-statistics). +Large-sample statistics are the default. +The exception is the statistic for the significance of the regression, +which is always reported as a small-sample F statistic. + +{p 0 4}{cmd:noconstant} suppresses the constant term (intercept) in the +regression. If {cmd:noconstant} is specified, the constant term is excluded +from both the final regression and the first-stage regression. To include a +constant in the first-stage when {cmd:noconstant} is specified, explicitly +include a variable containing all 1's in {it:varlist_iv}. + +{p 0 4}{cmd:first} requests that the full first-stage regression results be displayed, +along with the associated diagnostic and identification statistics. + +{p 0 4}{cmd:ffirst} requests the first-stage diagnostic and identification statistics. +The results are saved in various e() macros. + +{p 0 4}{cmd:nocollin} suppresses the checks for collinearities +and duplicate variables. + +{p 0 4}{cmd:noid} suppresses the calculation and reporting +of underidentification and weak identification statistics. + +{p 0 4}{cmd:savefirst} requests that the first-stage regressions results +are saved for later access using the {cmd:estimates} command. +The names under which the first-stage regressions are saved +are the names of the endogenous regressors prefixed by "_ivreg28_". +If these use Stata's time-series operators, +the "." is replaced by a "_". +The maximum number of first-stage estimation results that can be saved +depends on how many other estimation results the user has already saved +and on the maximum supported by Stata (20 for Stata 8.2 and 9.0, 300 for Stata 9.1). + +{p 0 4}{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the first-stage regression results be saved using the user-specified prefix +instead of the default "_ivreg28_". + +{p 0 4}{cmd:rf} requests that the reduced-form estimation of the equation +be displayed. + +{p 0 4}{cmd:saverf} requests that the reduced-form estimation of the equation +be saved for later access using the {cmd:estimates} command. +The estimation is stored under the name of the dependent variable +prefixed by "_ivreg28_". +If this uses Stata's time-series operators, +the "." is replaced by a "_". + +{p 0 4}{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the reduced-form estimation be saved using the user-specified prefix +instead of the default "_ivreg28_". + +{p 0 4}{cmd:level(}{it:#}{cmd:)} specifies the confidence level, in percent, +for confidence intervals of the coefficients; see help {help level}. + +{p 0 4}{cmd:noheader}, {cmd:eform()}, {cmd:depname()} and {cmd:plus} +are for ado-file writers; see {hi:[R] ivreg} and {hi:[R] regress}. + +{p 0 4}{cmd:nofooter} suppresses the display of the footer containing +identification and overidentification statistics, +exogeneity and endogeneity tests, +lists of endogenous variables and instruments, etc. + +{p 0 4}{cmd:version} causes {cmd:ivreg28} to display its current version number +and to leave it in the macro {cmd:e(version)}. +It cannot be used with any other options. +and will clear any existing {cmd:e()} saved results. + +{marker s_macros}{title:Remarks and saved results} + +{p}{cmd:ivreg28} does not report an ANOVA table. +Instead, it reports the RSS and both the centered and uncentered TSS. +It also reports both the centered and uncentered R-squared. +NB: the TSS and R-squared reported by official {cmd:ivreg} is centered +if a constant is included in the regression, and uncentered otherwise. + +{p}{cmd:ivreg28} saves the following results in {cmd:e()}: + +Scalars +{col 4}{cmd:e(N)}{col 18}Number of observations +{col 4}{cmd:e(yy)}{col 18}Total sum of squares (SS), uncentered (y'y) +{col 4}{cmd:e(yyc)}{col 18}Total SS, centered (y'y - ((1'y)^2)/n) +{col 4}{cmd:e(rss)}{col 18}Residual SS +{col 4}{cmd:e(mss)}{col 18}Model SS =yyc-rss if the eqn has a constant, =yy-rss otherwise +{col 4}{cmd:e(df_m)}{col 18}Model degrees of freedom +{col 4}{cmd:e(df_r)}{col 18}Residual degrees of freedom +{col 4}{cmd:e(r2u)}{col 18}Uncentered R-squared, 1-rss/yy +{col 4}{cmd:e(r2c)}{col 18}Centered R-squared, 1-rss/yyc +{col 4}{cmd:e(r2)}{col 18}Centered R-squared if the eqn has a constant, uncentered otherwise +{col 4}{cmd:e(r2_a)}{col 18}Adjusted R-squared +{col 4}{cmd:e(ll)}{col 18}Log likelihood +{col 4}{cmd:e(rankxx)}{col 18}Rank of the matrix of observations on rhs variables=K +{col 4}{cmd:e(rankzz)}{col 18}Rank of the matrix of observations on instruments=L +{col 4}{cmd:e(rankV)}{col 18}Rank of covariance matrix V of coefficients +{col 4}{cmd:e(rankS)}{col 18}Rank of covariance matrix S of orthogonality conditions +{col 4}{cmd:e(rmse)}{col 18}root mean square error=sqrt(rss/(N-K)) if -small-, =sqrt(rss/N) otherwise +{col 4}{cmd:e(F)}{col 18}F statistic +{col 4}{cmd:e(N_clust)}{col 18}Number of clusters +{col 4}{cmd:e(bw)}{col 18}Bandwidth +{col 4}{cmd:e(lambda)}{col 18}LIML eigenvalue +{col 4}{cmd:e(kclass)}{col 18}k in k-class estimation +{col 4}{cmd:e(fuller)}{col 18}Fuller parameter alpha +{col 4}{cmd:e(sargan)}{col 18}Sargan statistic +{col 4}{cmd:e(sarganp)}{col 18}p-value of Sargan statistic +{col 4}{cmd:e(sargandf)}{col 18}dof of Sargan statistic = degree of overidentification = L-K +{col 4}{cmd:e(j)}{col 18}Hansen J statistic +{col 4}{cmd:e(jp)}{col 18}p-value of Hansen J statistic +{col 4}{cmd:e(jdf)}{col 18}dof of Hansen J statistic = degree of overidentification = L-K +{col 4}{cmd:e(arubin)}{col 18}Anderson-Rubin overidentification LR statistic +{col 4}{cmd:e(arubinp)}{col 18}p-value of Anderson-Rubin overidentification LR statistic +{col 4}{cmd:e(arubindf)}{col 18}dof of A-R overid statistic = degree of overidentification = L-K +{col 4}{cmd:e(idstat)}{col 18}Anderson canonical correlations LR statistic +{col 4}{cmd:e(idp)}{col 18}p-value of Anderson canonical correlations LR statistic +{col 4}{cmd:e(iddf)}{col 18}dof of Anderson canonical correlations LR statistic +{col 4}{cmd:e(cdf)}{col 18}Cragg-Donald F statistic +{col 4}{cmd:e(cdchi2)}{col 18}Cragg-Donald chi-sq statistic +{col 4}{cmd:e(cdchi2p)}{col 18}p-value of Cragg-Donald chi-sq statistic +{col 4}{cmd:e(arf)}{col 18}Anderson-Rubin F-test of significance of endogenous regressors +{col 4}{cmd:e(arfp)}{col 18}p-value of Anderson-Rubin F-test of endogenous regressors +{col 4}{cmd:e(archi2)}{col 18}Anderson-Rubin chi-sq test of significance of endogenous regressors +{col 4}{cmd:e(archi2p)}{col 18}p-value of Anderson-Rubin chi-sq test of endogenous regressors +{col 4}{cmd:e(ardf)}{col 18}degrees of freedom of Anderson-Rubin tests of endogenous regressors +{col 4}{cmd:e(ardf_r)}{col 18}denominator degrees of freedom of AR F-test of endogenous regressors +{col 4}{cmd:e(redstat)}{col 18}LR statistic for instrument redundancy +{col 4}{cmd:e(redp)}{col 18}p-value of LR statistic for instrument redundancy +{col 4}{cmd:e(reddf)}{col 18}dof of LR statistic for instrument redundancy +{col 4}{cmd:e(cstat)}{col 18}C-statistic +{col 4}{cmd:e(cstatp)}{col 18}p-value of C-statistic +{col 4}{cmd:e(cstatdf)}{col 18}Degrees of freedom of C-statistic +{col 4}{cmd:e(cons)}{col 18}1 when equation has a Stata-supplied constant; 0 otherwise +{col 4}{cmd:e(fwlcons)}{col 18}as above but prior to partialling-out (see {cmd:e(fwl)}) + +Macros +{col 4}{cmd:e(cmd)}{col 18}ivreg28 +{col 4}{cmd:e(version)}{col 18}Version number of ivreg28 +{col 4}{cmd:e(model)}{col 18}ols, iv, gmm, liml, or kclass +{col 4}{cmd:e(depvar)}{col 18}Name of dependent variable +{col 4}{cmd:e(instd)}{col 18}Instrumented (RHS endogenous) variables +{col 4}{cmd:e(insts)}{col 18}Instruments +{col 4}{cmd:e(inexog)}{col 18}Included instruments (regressors) +{col 4}{cmd:e(exexog)}{col 18}Excluded instruments +{col 4}{cmd:e(collin)}{col 18}Variables dropped because of collinearities +{col 4}{cmd:e(dups)}{col 18}Duplicate variables +{col 4}{cmd:e(ecollin)}{col 18}Endogenous variables reclassified as exogenous because of +{col 20}collinearities with instruments +{col 4}{cmd:e(clist)}{col 18}Instruments tested for orthogonality +{col 4}{cmd:e(redlist)}{col 18}Instruments tested for redundancy +{col 4}{cmd:e(fwl)}{col 18}Partialled-out exogenous regressors +{col 4}{cmd:e(small)}{col 18}small +{col 4}{cmd:e(wtype)}{col 18}weight type +{col 4}{cmd:e(wexp)}{col 18}weight expression +{col 4}{cmd:e(clustvar)}{col 18}Name of cluster variable +{col 4}{cmd:e(vcetype)}{col 18}Covariance estimation method +{col 4}{cmd:e(kernel)}{col 18}Kernel +{col 4}{cmd:e(tvar)}{col 18}Time variable +{col 4}{cmd:e(ivar)}{col 18}Panel variable +{col 4}{cmd:e(firsteqs)}{col 18}Names of stored first-stage equations +{col 4}{cmd:e(rfeq)}{col 18}Name of stored reduced-form equation +{col 4}{cmd:e(predict)}{col 18}Program used to implement predict + +Matrices +{col 4}{cmd:e(b)}{col 18}Coefficient vector +{col 4}{cmd:e(V)}{col 18}Variance-covariance matrix of the estimators +{col 4}{cmd:e(S)}{col 18}Covariance matrix of orthogonality conditions +{col 4}{cmd:e(W)}{col 18}GMM weighting matrix (=inverse of S if efficient GMM estimator) +{col 4}{cmd:e(first)}{col 18}First-stage regression results +{col 4}{cmd:e(ccev)}{col 18}Eigenvalues corresponding to the Anderson canonical correlations test +{col 4}{cmd:e(cdev)}{col 18}Eigenvalues corresponding to the Cragg-Donald test + +Functions +{col 4}{cmd:e(sample)}{col 18}Marks estimation sample + + + +{marker s_examples}{title:Examples} + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta" : . use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta }{p_end} +{p 8 12}(Wages of Very Young Men, Zvi Griliches, J.Pol.Ec. 1976) + +{p 8 12}{stata "xi i.year" : . xi i.year} + +{col 0}(Instrumental variables. Examples follow Hayashi 2000, p. 255.) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt)" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt)} + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), small ffirst" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), small ffirst} + +{col 0}(Testing for the presence of heteroskedasticity in IV/GMM estimation) + +{p 8 12}{stata "ivhettest, fitlev" : . ivhettest, fitlev} + +{col 0}(Two-step GMM efficient in the presence of arbitrary heteroskedasticity) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm} + +{p 0}(Continuously-updated GMM (CUE) efficient in the presence of arbitrary heteroskedasticity. NB: may require 50+ iterations.) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust} + +{col 0}(Sargan-Basmann tests of overidentifying restrictions for IV estimation) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt)" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt)} + +{p 8 12}{stata "overid, all" : . overid, all} + +{col 0}(Tests of exogeneity and endogeneity) + +{col 0}(Test the exogeneity of 1 regressor) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm orthog(s)" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm orthog(s)} + +{col 0}(Test the exogeneity of 2 excluded instruments) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm orthog(age mrt)" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm orthog(age mrt)} + +{col 0}(Frisch-Waugh-Lovell (FWL): equivalence of estimations with and without partialling-out) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns _I* (iq=kww age), cluster(year)" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age), cluster(year)} + +{p 8 12}{stata "ivreg28 lw s expr tenure rns _I* (iq=kww age), cluster(year) fwl(_I*)" : . ivreg28 lw s expr tenure rns smsa _I* (iq=med kww age), cluster(year) fwl(_I*)} + +{col 0}(FWL: efficient GMM with #clusters<#instruments feasible after partialling-out) + +{p 8 12}{stata "ivreg28 lw s expr tenure rns _I* (iq=kww age), cluster(year) fwl(_I*) gmm" : . ivreg28 lw s expr tenure rns smsa (iq=med kww age), cluster(year) fwl(_I*) gmm} + +{col 0}(Examples following Wooldridge 2002, pp.59, 61) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta" : . use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta } + +{col 0}(Test an excluded instrument for redundancy) + +{p 8 12}{stata "ivreg28 lwage exper expersq (educ=age kidslt6 kidsge6), redundant(age)" : . ivreg28 lwage exper expersq (educ=age kidslt6 kidsge6), redundant(age)} + +{col 0}(Equivalence of DWH endogeneity test when regressor is endogenous...) + +{p 8 12}{stata "ivreg28 lwage exper expersq (educ=age kidslt6 kidsge6)" : . ivreg28 lwage exper expersq (educ=age kidslt6 kidsge6)} + +{p 8 12}{stata "ivendog educ" :. ivendog educ} + +{col 0}(... endogeneity test using the {cmd:endog} option) + +{p 8 12}{stata "ivreg28 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)" : . ivreg28 lwage exper expersq educ (educ=age kidslt6 kidsge6), endog(educ)} + +{col 0}(...and C-test of exogeneity when regressor is exogenous, using the {cmd:orthog} option) + +{p 8 12}{stata "ivreg28 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)" : . ivreg28 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)} + +{col 0}(Heteroskedastic Ordinary Least Squares, HOLS) + +{p 8 12}{stata "ivreg28 lwage exper expersq educ (=age kidslt6 kidsge6), gmm" : . ivreg28 lwage exper expersq educ (=age kidslt6 kidsge6), gmm} + +{col 0}(LIML and k-class estimation using Klein data) + +{col 9}{stata "use http://fmwww.bc.edu/repec/bocode/k/kleinI" :. use http://fmwww.bc.edu/repec/bocode/k/kleinI} + +{col 0}(LIML estimates of Klein's consumption function) + +{p 8 12}{stata "ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml" :. ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml} + +{col 0}(Equivalence of LIML and CUE+homoskedasticity+independence) + +{p 8 12}{stata "ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml coviv" :. ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml coviv} + +{p 8 12}{stata "ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), cue" :. ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), cue} + +{col 0}(Fuller's modified LIML with alpha=1) + +{p 8 12}{stata "ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), fuller(1)" :. ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), fuller(1)} + +{col 0}(k-class estimation with Nagar's bias-adjusted IV, k=1+(L-K)/N=1+4/21=1.19) + +{p 8 12}{stata "ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), kclass(1.19)" :. ivreg28 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), kclass(1.19)} + +{col 0}(Kernel-based covariance estimation using time-series data) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta" :. use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta} + +{p 8 12}{stata "tsset year, yearly" :. tsset year, yearly} + +{col 0}(Autocorrelation-consistent (AC) inference in an OLS Regression) + +{p 8 12}{stata "ivreg28 cinf unem, bw(3)" :. ivreg28 cinf unem, bw(3)} + +{col 0}(Heteroskedastic and autocorrelation-consistent (HAC) inference in an OLS regression) + +{p 8 12}{stata "ivreg28 cinf unem, bw(3) kernel(bartlett) robust small" :. ivreg28 cinf unem, bw(3) kernel(bartlett) robust small} + +{p 8 12}{stata "newey cinf unem, lag(2)" :. newey cinf unem, lag(2)} + +{col 0}(AC and HAC in IV and GMM estimation) + +{p 8 12}{stata "ivreg28 cinf (unem = l(1/3).unem), bw(3)" :. ivreg28 cinf (unem = l(1/3).unem), bw(3)} + +{p 8 12}{stata "ivreg28 cinf (unem = l(1/3).unem), bw(3) gmm kernel(thann)" :. ivreg28 cinf (unem = l(1/3).unem), bw(3) gmm kernel(thann)} + +{p 8 12}{stata "ivreg28 cinf (unem = l(1/3).unem), bw(3) gmm kernel(qs) robust orthog(l1.unem)" :. ivreg28 cinf (unem = l(1/3).unem), bw(3) gmm kernel(qs) robust orthog(l1.unem)} + +{col 0}(Examples using Large N, Small T Panel Data) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta" : . use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta }{p_end} +{p 8 12}(Layard & Nickell, Unemployment in Britain, Economica 53, 1986, from Ox dist) + +{p 8 12}{stata "tsset id year" :. tsset id year} + +{col 0}(Autocorrelation-consistent inference in an IV regression) + +{p 8 12}{stata "ivreg28 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(1) kernel(tru)": . ivreg28 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(1) kernel(tru)} + +{col 0}(Two-step effic. GMM in the presence of arbitrary heteroskedasticity and autocorrelation) + +{p 8 12}{stata "ivreg28 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(2) gmm kernel(tru) robust": . ivreg28 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(2) gmm kernel(tru) robust} + +{col 0}(Two-step effic. GMM in the presence of arbitrary heterosked. and intra-group correlation) + +{p 8 12}{stata "ivreg28 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm cluster(id)": . ivreg28 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm cluster(id)} + + +{marker s_refs}{title:References} + +{p 0 4}Anderson, T.W. 1984. Introduction to Multivariate Statistical Analysis. +2d ed. New York: John Wiley & Sons. + +{p 0 4}Anderson, T. W., and H. Rubin. 1949. Estimation of the parameters of a single equation +in a complete system of stochastic equations. Annals of Mathematical Statistics, Vol. 20, +pp. 46-63. + +{p 0 4}Anderson, T. W., and H. Rubin. 1950. The asymptotic properties of estimates of the parameters of a single +equation in a complete system of stochastic equations. Annals of Mathematical Statistics, +Vol. 21, pp. 570-82. + +{p 0 4}Baum, C.F., Schaffer, M.E., and Stillman, S. 2003. Instrumental Variables and GMM: +Estimation and Testing. The Stata Journal, Vol. 3, No. 1, pp. 1-31. +Working paper version: Boston College Department of Economics Working Paper No 545. +{browse "http://ideas.repec.org/p/boc/bocoec/545.html":http://ideas.repec.org/p/boc/bocoec/545.html} + +{p 0 4}Baum, C. F., Schaffer, M. E., and Stillman, S. 2007. Enhanced routines for +instrumental variables/GMM estimation and testing. Unpublished working paper, +forthcoming. + +{p 0 4}Chernozhukov, V. and Hansen, C. 2005. The Reduced Form: +A Simple Approach to Inference with Weak Instruments. +Working paper, University of Chicago, Graduate School of Business. + +{p 0 4}Cragg, J.G. and Donald, S.G. 1993. Testing Identfiability and Specification in +Instrumental Variables Models. Econometric Theory, Vol. 9, pp. 222-240. + +{p 0 4}Cushing, M.J. and McGarvey, M.G. 1999. Covariance Matrix Estimation. +In L. Matyas (ed.), Generalized Methods of Moments Estimation. +Cambridge: Cambridge University Press. + +{p 0 4}Davidson, R. and MacKinnon, J. 1993. Estimation and Inference in Econometrics. +1993. New York: Oxford University Press. + +{p 0 4}Dufour, J.M. 2003. Identification, Weak Instruments and Statistical Inference +in Econometrics. Canadian Journal of Economics, Vol. 36, No. 4, pp. 767-808. +Working paper version: CIRANO Working Paper 2003s-49. +http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf + +{p 0 4}Hall, A.R. and Peixe, F.P.M. 2000. A Consistent Method for the Selection of +Relevant Instruments. Econometric Society World Congress 2000 Contributed papers. +http://econpapers.repec.org/paper/ecmwc2000/0790.htm + +{p 0 4}Hall, A.R., Rudebusch, G.D. and Wilcox, D.W. 1996. Judging Instrument Relevance in +Instrumental Variables Estimation. International Economic Review, Vol. 37, No. 2, pp. 283-298. + +{p 0 4}Hayashi, F. Econometrics. 2000. Princeton: Princeton University Press. + +{p 0 4}Hansen, L.P., Heaton, J., and Yaron, A. 1996. Finite Sample Properties +of Some Alternative GMM Estimators. Journal of Business and Economic Statistics, +Vol. 14, No. 3, pp. 262-280. + +{p 0 4}Kleibergen, F. 2007. Generalizing Weak Instrument Robust Statistics Towards +Multiple Parameters, Unrestricted Covariance Matrices and Identification Statistics. +Journal of Econometrics, forthcoming. + +{p 0 4}Mikusheva, A. and Poi, B.P. 2006. +Tests and confidence sets with correct size when instruments are potentially weak. +The Stata Journal, Vol. 6, No. 3, pp. 335-347. + +{p 0 4}Moreira, M.J. and Poi, B.P. 2003. Implementing Tests with the Correct Size +in the Simultaneous Equations Model. The Stata Journal, Vol. 3, No. 1, pp. 57-70. + +{p 0 4}Shea, J. 1997. Instrument Relevance in Multivariate Linear Models: +A Simple Measure. +Review of Economics and Statistics, Vol. 49, No. 2, pp. 348-352. + +{p 0 4}Stock, J.H. and Wright, J.H. 2000. GMM with Weak Identification. +Econometrica, Vol. 68, No. 5, September, pp. 1055-1096. + +{p 0 4}Stock, J.H. and Yogo, M. 2005. Testing for Weak Instruments in Linear IV Regression. +In D.W.K. Andrews and J.H. Stock, eds. Identification and Inference for Econometric Models: +Essays in Honor of Thomas Rothenberg. Cambridge: Cambridge University Press, 2005, pp. 80�108. +Working paper version: NBER Technical Working Paper 284. http://www.nber.org/papers/T0284. + +{p 0 4}Wooldridge, J.M. 2002. Econometric Analysis of Cross Section and Panel Data. +Cambridge, MA: MIT Press. + + +{marker s_acknow}{title:Acknowledgements} + +{p}We would like to thanks various colleagues who helped us along the way, including +David Drukker, +Austin Nichols, +Vince Wiggins, +and, not least, the users of {cmd:ivreg28} +who have provided suggestions, +spotted bugs, +and helped test the package. +We are also grateful to Jim Stock and Moto Yogo for permission to reproduce +their critical values for the Cragg-Donald statistic. + +{marker s_citation}{title:Citation of ivreg28} + +{p}{cmd:ivreg28} is not an official Stata command. It is a free contribution +to the research community, like a paper. Please cite it as such: {p_end} + +{phang}Baum, C.F., Schaffer, M.E., Stillman, S. 2007. +ivreg28: Stata module for extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression. +{browse "http://ideas.repec.org/c/boc/bocode/s425401.html":http://ideas.repec.org/c/boc/bocode/s425401.html}{p_end} + +{title:Authors} + + Christopher F Baum, Boston College, USA + baum@bc.edu + + Mark E Schaffer, Heriot-Watt University, UK + m.e.schaffer@hw.ac.uk + + Steven Stillman, Motu Economic and Public Policy Research + stillman@motu.org.nz + + +{title:Also see} + +{p 1 14}Manual: {hi:[U] 23 Estimation and post-estimation commands},{p_end} +{p 10 14}{hi:[U] 29 Overview of model estimation in Stata},{p_end} + {hi:[R] ivreg} +{p 0 19}On-line: help for {help ivreg}, {help newey}; +{help overid}, {help ivendog}, {help ivhettest}, {help ivreset}, +{help xtivreg28}, {help xtoverid}, +{help condivreg} (if installed); +{help est}, {help postest}; +{help regress}{p_end} diff --git a/110/replication_package/replication/ado/plus/i/ivreg28_cue.ado b/110/replication_package/replication/ado/plus/i/ivreg28_cue.ado new file mode 100644 index 0000000000000000000000000000000000000000..be0a78edadea8c24990e16ec6cafeeeb54911e91 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg28_cue.ado @@ -0,0 +1,24 @@ +! 1.0.4 4feb2007 +* 1.0.1 cfb updated to v8.2 +* 1.0.2 mes fixed col and row names mismatch +* 1.0.3 added noid option to supress unnecessary identification stats +* 1.0.4 added local `ivreg2_cmd'. ref only to e(j); e(sargan) no longer needed. + +program define ivreg28_cue + version 8.2 + args todo b lnf + local ivreg2_cmd "ivreg28" + tempname b1 J +* Need to make col and rownames match + mat `b1'=`b' +* Remove equation number from col names + local vn : colfullnames `b1' + local vn : subinstr local vn "eq1" "", all + mat colnames `b1' = `vn' +* Standard row name + mat rownames `b1' = y1 + qui `ivreg2_cmd' $IV_lhs $IV_inexog ($IV_endog=$IV_exexog) $IV_wt if $ML_samp==1, b0(`b1') $IV_opt noid + scalar `J'=e(j) + scalar `lnf' = -`J' +end + diff --git a/110/replication_package/replication/ado/plus/i/ivreg28_p.ado b/110/replication_package/replication/ado/plus/i/ivreg28_p.ado new file mode 100644 index 0000000000000000000000000000000000000000..8d8af8e8576b7437cd02c907b46175bec6b36997 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg28_p.ado @@ -0,0 +1,97 @@ +*! version 1.0.6 30Jan2011 +*! author mes +* 1.0.1: 25apr2002 original version +* 1.0.2: 28jun2005 version 8.2 +* 1.0.3: 1Aug2006 complete rewrite plus fwl option +* 1.0.4: 26Jan2007 eliminated double reporting of #MVs +* 1.0.5: 2Feb2007 small fix to allow fwl of just _cons +* 1.0.6: 30Jan2011 re-introduced stdp (had been removed with fwl) +* and added labelling of created residual variable + +program define ivreg28_p + version 8.2 + syntax newvarname [if] [in] , [XB Residuals stdp] + marksample touse, novarlist + + local type "`xb'`residuals'`stdp'" + + if "`type'"=="" { + local type "xb" +di in gr "(option xb assumed; fitted values)" + } + + if "`e(fwlcons)'" != "" { +* fwl partial-out block + if "`type'" == "residuals" { + + tempvar esample + tempname ivres + gen byte `esample' = e(sample) + +* Need to strip out time series operators + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + + local rhs : colnames(e(b)) + tsrevar `rhs', substitute + local rhs_t "`r(varlist)'" + + if "`e(fwl1)'" != "" { + local fwl "`e(fwl1)'" + } + else { + local fwl "`e(fwl)'" + } + tsrevar `fwl', substitute + local fwl_t "`r(varlist)'" + + if ~e(fwlcons) { + local noconstant "noconstant" + } + + local allvars "`lhs_t' `rhs_t'" +* Partial-out block. Uses estimatation sample to get coeffs, markout sample for predict + _estimates hold `ivres', restore + foreach var of local allvars { + tempname `var'_fwl + qui regress `var' `fwl' if `esample', `noconstant' + qui predict double ``var'_fwl' if `touse', resid + local allvars_fwl "`allvars_fwl' ``var'_fwl'" + } + _estimates unhold `ivres' + + tokenize `allvars_fwl' + local lhs_fwl "`1'" + mac shift + local rhs_fwl "`*'" + + tempname b + mat `b'=e(b) + mat colnames `b' = `rhs_fwl' +* Use forcezero? + tempvar xb + mat score double `xb' = `b' if `touse' + gen `typlist' `varlist' = `lhs_fwl' - `xb' + label var `varlist' "Residuals" + } + else { +di in red "Option `type' not supported with -fwl- option" + error 198 + } + } + else if "`type'" == "residuals" { + tempname lhs lhs_t xb + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + qui _predict `typlist' `xb' if `touse' + gen `typlist' `varlist'=`lhs_t'-`xb' + label var `varlist' "Residuals" + } +* Must be either xb or stdp + else { + _predict `typlist' `varlist' if `touse', `type' + } + +end diff --git a/110/replication_package/replication/ado/plus/i/ivreg29.ado b/110/replication_package/replication/ado/plus/i/ivreg29.ado new file mode 100644 index 0000000000000000000000000000000000000000..20882c57f547e90ca0d851c4100e23164afdbe24 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg29.ado @@ -0,0 +1,7576 @@ +*! ivreg29 2.2.14 19Jan2015 +*! authors cfb & mes +*! see end of file for version comments + +* Variable naming: +* lhs = LHS endogenous +* endo = RHS endogenous (instrumented) +* inexog = included exogenous (instruments) +* exexog = excluded exogenous (instruments) +* iv = {inexog exexog} = all instruments +* rhs = {endo inexog} = RHS regressors +* 1 at the end of the name means the varlist after duplicates and collinearities removed +* ..1_ct at the end means a straight count of the list +* .._ct at the end means ..1_ct with any additional detected cnts removed + +program define ivreg29, eclass byable(recall) /* properties(svyj) */ sortpreserve + version 9.2 + local lversion 02.2.14 + + local ivreg2_cmd "ivreg29" + + if replay() { + syntax [, FIRST FFIRST RF Level(integer $S_level) NOHEader NOFOoter dropfirst droprf /* + */ EForm(string) PLUS VERsion] + if "`version'" != "" & "`first'`ffirst'`rf'`noheader'`nofooter'`dropfirst'`droprf'`eform'`plus'" != "" { + di as err "option version not allowed" + error 198 + } + if "`version'" != "" { + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + if `"`e(cmd)'"' != "`ivreg2_cmd'" { + error 301 + } + if "`e(firsteqs)'" != "" & "`dropfirst'" == "" { +* On replay, set flag so saved eqns aren't dropped + local savefirst "savefirst" + } + if "`e(rfeq)'" != "" & "`droprf'" == "" { +* On replay, set flag so saved eqns aren't dropped + local saverf "saverf" + } + } + else { + local cmdline "`ivreg2_cmd' `*'" + + syntax [anything(name=0)] [if] [in] [aw fw pw iw/] [, /* + */ FIRST FFIRST NOID NOCOLLIN SAVEFIRST SAVEFPrefix(name) SMall Robust CLuster(varname) /* + */ GMM GMM2s CUE CUEINIT(string) CUEOPTions(string) ORTHOG(string) ENDOGtest(string) /* + */ PARTIAL(string) FWL(string) NOConstant Level(integer $S_level) Beta hc2 hc3 /* + */ NOHEader NOFOoter NOOUTput title(string) subtitle(string) /* + */ DEPname(string) EForm(string) PLUS /* + */ BW(string) kernel(string) Tvar(varname) Ivar(varname)/* + */ LIML COVIV FULLER(real 0) Kclass(string) /* + */ REDundant(string) RF SAVERF SAVERFPrefix(name) /* + */ B0(string) SMATRIX(string) WMATRIX(string) /* + */ dofminus(integer 0) sdofminus(integer 0) NOPARTIALSMALL ] + + local n 0 + + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + if `s(stop)' { + error 198 + } + while `s(stop)'==0 { + if "`paren'"=="(" { + local n = `n' + 1 + if `n'>1 { +capture noi error 198 +di in red `"syntax is "(all instrumented variables = instrument variables)""' +exit 198 + } + gettoken p lhs : lhs, parse(" =") + while "`p'"!="=" { + if "`p'"=="" { +capture noi error 198 +di in red `"syntax is "(all instrumented variables = instrument variables)""' +di in red `"the equal sign "=" is required"' +exit 198 + } + local endo `endo' `p' + gettoken p lhs : lhs, parse(" =") + } +* To enable Cragg HOLS estimator, allow for empty endo list + local temp_ct : word count `endo' + if `temp_ct' > 0 { + tsunab endo : `endo' + } +* To enable OLS estimator with (=) syntax, allow for empty exexog list + local temp_ct : word count `lhs' + if `temp_ct' > 0 { + tsunab exexog : `lhs' + } + } + else { + local inexog `inexog' `lhs' + } + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + } + local 0 `"`lhs' `0'"' + + tsunab inexog : `inexog' + tokenize `inexog' + local lhs "`1'" + local 1 " " + local inexog `*' + + if "`gmm2s'`cue'" != "" & "`exexog'" == "" { + di in red "option `gmm2s'`cue' invalid: no excluded instruments specified" + exit 102 + } + +/* Block disabled - ranktest code now incorporated into ivreg29. +* Check that -ranktest- is installed + capture ranktest, version + if _rc != 0 { +di as err "Error: must have ranktest version `ranktestversion' or greater installed" +di as err "To install, from within Stata type " _c +di in smcl "{stata ssc install ranktest :ssc install ranktest}" + exit 601 + } + local vernum "`r(version)'" + if ("`vernum'" < "`ranktestversion'") | ("`vernum'" > "09.9.99") { +di as err "Error: must have ranktest version `ranktestversion' or greater installed" +di as err "Currently installed version is `vernum'" +di as err "To update, from within Stata type " _c +di in smcl "{stata ssc install ranktest, replace :ssc install ranktest, replace}" + exit 601 + } +*/ + +* Process options + +* Legacy gmm option + if "`gmm'" ~= "" { +di in ye "-gmm- is no longer a supported option; use -gmm2s- with the appropriate option" +di in ye " gmm = gmm2s robust" +di in ye " gmm robust = gmm2s robust" +di in ye " gmm bw() = gmm2s bw()" +di in ye " gmm robust bw() = gmm2s robust bw()" +di in ye " gmm cluster() = gmm2s cluster()" + local gmm2s "gmm2s" + if "`robust'`cluster'`bw'"=="" { +* 2-step efficient gmm with arbitrary heteroskedasticity + local robust "robust" + } + } + +* partial, including legacy FWL option + local partial "`partial' `fwl'" + local partial : list retokenize partial +* Need word option so that varnames with cons in them aren't zapped + local partial : subinstr local partial "_cons" "", all count(local partialcons) word + if `partialcons' > 0 & "`noconstant'"~="" { +di in r "Error: _cons listed in partial() but equation specifies -noconstant-." + error 198 + } + else if "`noconstant'"~="" { + local partialcons 0 + } + else if `partialcons' > 1 { +* Just in case of multiple _cons +di in r "Error: _cons listed more than once in partial()." + error 198 + } + else if "`partial'" ~= "" { + local partialcons 1 + } + +* Fuller implies LIML + if "`liml'" == "" & `fuller' != 0 { + local liml "liml" + } + +* b0 implies noid. Also check for incompatible options. + if "`b0'" ~= "" { + local noid "noid" + local b0opts "`gmm2s'`cue'`liml'`wmatrix'`kclass'" + if "`b0opts'" != "" { +* ...with spaces + local b0opts "`gmm2s' `cue' `liml' `wmatrix' `kclass'" + local b0opts : list retokenize b0opts +di as err "incompatible options: -b0- and `b0opts'" + exit 198 + } + } + + if "`gmm2s'" != "" & "`cue'" != "" { +di as err "incompatible options: 2-step efficient gmm and cue gmm" + exit 198 + } + +* savefprefix implies savefirst + if "`savefprefix'" != "" & "`savefirst'" == "" { + local savefirst "savefirst" + } + +* default savefprefix is _ivreg2_ + if "`savefprefix'" == "" { + local savefprefix "_`ivreg2_cmd'_" + } + +* saverfprefix implies saverf + if "`saverfprefix'" != "" & "`saverf'" == "" { + local saverf "saverf" + } + +* default saverfprefix is _ivreg2_ + if "`saverfprefix'" == "" { + local saverfprefix "_`ivreg2_cmd'_" + } + +* LIML/kclass incompatibilities + if "`liml'`kclass'" != "" { + if "`gmm2s'`cue'" != "" { +di as err "GMM estimation not available with LIML or k-class estimators" + exit 198 + } + if `fuller' < 0 { +di as err "invalid Fuller option" + exit 198 + } + if "`liml'" != "" & "`kclass'" != "" { +di as err "cannot use liml and kclass options together" + exit 198 + } +* Process kclass string + tempname kclass2 + scalar `kclass2'=real("`kclass'") + if "`kclass'" != "" & (`kclass2' == . | `kclass2' < 0 ) { +di as err "invalid k-class option" + exit 198 + } + } + +* HAC estimation. +* If bw is omitted, default `bw' is empty string. +* If bw or kernel supplied, check/set `kernel'. +* Macro `kernel' is also used for indicating HAC in use. + if "`bw'" != "" | "`kernel'" != "" { +* Need tvar only for markout with time-series stuff +* but data must be tsset for time-series operators in code to work +* User-supplied tvar not used but checked if consistent with tsset + capture tsset + if "`r(timevar)'" == "" { +di as err "must tsset data and specify timevar" + exit 5 + } + if "`tvar'" == "" { + local tvar "`r(timevar)'" + } + else if "`tvar'"!="`r(timevar)'" { +di as err "invalid tvar() option - data already -tsset-" + exit 5 + } + tsreport, panel + if `r(N_gaps)' != 0 { +di in gr "Warning: time variable " in ye "`tvar'" in gr " has " /* + */ in ye "`r(N_gaps)'" in gr " gap(s) in relevant range" + } + + if "`bw'" == "" { +di as err "bandwidth option bw() required for HAC-robust estimation" + exit 102 + } +* Check for bw(auto); flag as -1 + if lower("`bw'") == "auto" { + if "`ivar'" ~= "" { +di as err "Automatic bandwidth selection not available for panel data" + exit 198 + } + local bw=-1 + } + else { + local bw real("`bw'") +* Check it's a valid bandwidth + if `bw' != int(`bw') | /* + */ `bw' == . | /* + */ `bw' <= 0 { +di as err "invalid bandwidth in option bw() - must be integer > 0 or 'auto'" + exit 198 + } +* Convert bw macro to simple integer + local bw=`bw' + } +* Check it's a valid kernel + local validkernel 0 + if lower(substr("`kernel'", 1, 3)) == "bar" | "`kernel'" == "" { +* Default kernel + local kernel "Bartlett" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Bartlett and bw=1 implies zero lags used. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 3)) == "par" { + local kernel "Parzen" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Parzen and bw=1 implies zero lags used. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 3)) == "tru" { + local kernel "Truncated" + local window "lag" + local validkernel 1 + } + if lower(substr("`kernel'", 1, 9)) == "tukey-han" | lower("`kernel'") == "thann" { + local kernel "Tukey-Hanning" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Tukey-Hanning and bw=1 implies zero lags. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 9)) == "tukey-ham" | lower("`kernel'") == "thamm" { + local kernel "Tukey-Hamming" + local window "lag" + local validkernel 1 + if `bw'==1 { +di in ye "Note: kernel=Tukey-Hamming and bw=1 implies zero lags. Standard errors and" +di in ye " test statistics are not autocorrelation-consistent." + } + } + if lower(substr("`kernel'", 1, 3)) == "qua" | lower("`kernel'") == "qs" { + local kernel "Quadratic spectral" + local window "spectral" + local validkernel 1 + } + if lower(substr("`kernel'", 1, 3)) == "dan" { + local kernel "Daniell" + local window "spectral" + local validkernel 1 + } + if lower(substr("`kernel'", 1, 3)) == "ten" { + local kernel "Tent" + local window "spectral" + local validkernel 1 + } + if ~`validkernel' { + di in red "invalid kernel" + exit 198 + } + } + + if "`kernel'" != "" & "`cluster'" != "" { +di as err "cannot use HAC kernel estimator with -cluster- option" + exit 198 + } + + if "`orthog'`endogtest'`redundant'`partial'" != "" { + capture tsunab orthog : `orthog' + capture tsunab endogtest : `endogtest' + capture tsunab redundant : `redundant' + capture tsunab partial : `partial' + } + + if "`hc2'`hc3'" != "" { + if "`hc2'"!="" { + di in red "option `hc2' invalid" + } + else di in red "option `hc3' invalid" + exit 198 + } + + if "`beta'" != "" { + di in red "option `beta' invalid" + exit 198 + } + +* Weights +* fweight and aweight accepted as is +* iweight not allowed with robust or gmm and requires a trap below when used with summarize +* pweight is equivalent to aweight + robust +* but in HAC case, robust implied by `kernel' rather than `robust' + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + if "`weight'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`gmm2s'`kernel'" !="" { + di in red "iweights not allowed with robust or gmm" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + qui gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + + if `dofminus' > 0 { + local dofmopt "dofminus(`dofminus')" + } + + marksample touse + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster' `tvar', strok + +* Weight statement + if "`weight'" ~= "" { + sum `wvar' if `touse' `wtexp', meanonly +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + +* Set local macro T and check that bw < T +* Also make sure only used sample is checked + if "`bw'" != "" { + sum `tvar' if `touse', meanonly + local T = r(max)-r(min)+1 + if (`bw' > `T') & (`bw' ~= -1) { +di as err "invalid bandwidth in option bw() - cannot exceed timespan of data" + exit 198 + } + } + +************* Collinearities and duplicates block ***************** + + if "`noconstant'" != "" { + local rmcnocons "nocons" + } + +* Check for duplicates of variables +* To mimic official ivreg, in the case of duplicates, +* (1) inexog > endo +* (2) inexog > exexog +* (3) endo + exexog = inexog, as if it were "perfectly predicted" + local dupsen1 : list dups endo + local endo1 : list uniq endo + local dupsex1 : list dups exexog + local exexog1 : list uniq exexog + local dupsin1 : list dups inexog + local inexog1 : list uniq inexog +* Remove inexog from endo + local dupsen2 : list endo1 & inexog1 + local endo1 : list endo1 - inexog1 +* Remove inexog from exexog + local dupsex2 : list exexog1 & inexog1 + local exexog1 : list exexog1 - inexog1 +* Remove endo from exexog + local dupsex3 : list exexog1 & endo1 + local exexog1 : list exexog1 - endo1 + local dups "`dupsen1' `dupsex1' `dupsin1' `dupsen2' `dupsex2' `dupsex3'" + local dups : list uniq dups + + if "`nocollin'" == "" { +* First, collinearities check using canonical correlations approach +* Eigenvalue=1 => included endog is really included exogenous +* Eigenvalue=0 => included endog collinear with another included endog +* Corresponding column names give name of variable +* Code block stolen from below, so some repetition + local insts1 `inexog1' `exexog1' + local rhs1 `endo1' `inexog1' + local endo1_ct : word count `endo1' + if `endo1_ct' > 0 { + tempname ccmat ccrealev ccimagev cc A XX XXinv ZZ ZZinv XZ XPZX + qui mat accum `A' = `endo1' `insts1' if `touse' `wtexp', `rmcnocons' + mat `XX' = `A'[1..`endo1_ct',1..`endo1_ct'] + mat `XXinv'=syminv(`XX') + mat `ZZ' = `A'[`endo1_ct'+1...,`endo1_ct'+1...] + mat `ZZinv'=syminv(`ZZ') + mat `XZ' = `A'[1..`endo1_ct',`endo1_ct'+1...] + mat `XPZX'=`XZ'*`ZZinv'*`XZ'' + mat `ccmat' = `XXinv'*`XPZX' + mat eigenvalues `ccrealev' `ccimagev' = `ccmat' +* Loop through endo1 to find eigenvalues=0 or 1 + foreach vn of varlist `endo1' { + local i=colnumb(`ccmat',"`vn'") + if round(`ccmat'[`i',`i'],10e-7)==0 { +* Collinear with another endog, so remove from endog list + local endo1 : list endo1-vn + local ncollin "`ncollin' `vn'" + } + if round(`ccmat'[`i',`i'],10e-7)==1 { +* Collinear with exogenous, so remove from endog and add to inexog + local endo1 : list endo1-vn + local inexog1 "`inexog1' `vn'" + local ecollin "`ecollin' `vn'" + } + } + } + +* _rmcollright crashes if no arguments supplied + capture _rmcollright `inexog1' `exexog1' if `touse' `wtexp', `rmcnocons' + +* endo1 has had within-endo collinear removed, so non-colllinear list is _rmcoll result + endo1 + local ncvars `r(varlist)' `endo1' + local allvars1 `endo1' `inexog1' `exexog1' +* collin gets collinear variables to be removed + local collin : list allvars1-ncvars +* Remove collin from exexog1 + local exexog1 : list exexog1-collin +* Remove collin from inexog1 + local inexog1 : list inexog1-collin +* Add dropped endogenous to collinear list, trimming down to "" if empty + local collin "`collin' `ncollin'" + local collin : list clean collin + +* Collinearity and duplicates warning messages, if necessary + if "`dups'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `dups', _col(21) + } + if "`ecollin'" != "" { +di in gr "Warning - endogenous variable(s) collinear with instruments" +di in gr "Vars now exogenous:" _c + Disp `ecollin', _col(21) + } + if "`collin'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `collin', _col(21) + } + } + +**** End of collinearities block ************ + +**** Partial-out block ****************** + +* `partial' has all to be partialled out except for constant + if "`partial'" != "" | `partialcons'==1 { + preserve + local partialdrop : list inexog - inexog1 + local partial1 : list partial - partialdrop + local partialcheck : list partial1 - inexog1 + if ("`partialcheck'"~="") { +di in r "Error: `partialcheck' listed in partial() but not in list of regressors." + error 198 + } + local inexog1 : list inexog1 - partial1 +* Check that cluster or weight var won't be transformed + local allvars "`lhs' `inexog' `endo' `exexog'" + if "`cluster'"~="" { + local clustvarcheck : list cluster in allvars + if `clustvarcheck' { +di in r "Error: cannot use cluster variable `cluster' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`wtexp'"~="" { + tokenize `exp', parse("*/()+-^&|~") + local wvartokens `*' + local nwvarnames : list allvars - wvartokens + local wvarnames : list allvars - nwvarnames + if "`wvarnames'"~="" { +di in r "Error: cannot use weight variables as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } +* Constant is partialled out, unless nocons already specified in the first place + tempname partial_resid + foreach var of varlist `lhs' `inexog1' `endo1' `exexog1' { + qui regress `var' `partial1' if `touse' `wtexp', `noconstant' + qui predict double `partial_resid' if `touse', resid + qui replace `var' = `partial_resid' + drop `partial_resid' + } + local partial_ct : word count `partial1' + if "`noconstant'" == "" { +* partial_ct used for small-sample adjustment to regression F-stat + local partial_ct = `partial_ct' + 1 + local noconstant "noconstant" + } + } + else { +* Set count of partial vars to zero if option not used + local partial_ct 0 + local partialcons 0 + } +* Add partial_ct to small dof adjustment sdofminus + if "`nopartialsmall'"=="" { + local sdofminus = `sdofminus'+`partial_ct' + } + +********************************************* + + local insts1 `inexog1' `exexog1' + local rhs1 `endo1' `inexog1' + local iv1_ct : word count `insts1' + local rhs1_ct : word count `rhs1' + local endo1_ct : word count `endo1' + local exex1_ct : word count `exexog1' + local endoexex1_ct : word count `endo1' `exexog1' + local inexog1_ct : word count `inexog1' + + if "`noconstant'" == "" { + local cons 1 + } + else { + local cons 0 + } + + if `rhs1_ct' > `iv1_ct' { + di in red "equation not identified; must have at " /* + */ "least as many instruments not in" + di in red "the regression as there are " /* + */ "instrumented variables" + exit 481 + } + + if `rhs1_ct' + `cons' == 0 { + di in red "error: no regressors specified" + exit 102 + } + + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + if "`robust'"=="" { + local robust "robust" + } + } + if "`bw'"!="" { + local bwopt "bw(`bw')" + } + if "`kernel'"!="" { + local kernopt "kernel(`kernel')" + } +* If depname not provided (default) name is lhs variable + if "`depname'"=="" { + local depname `lhs' + } + +************************************************************************************************ +* Cross-products and basic IV coeffs, residuals and moment conditions + tempvar iota y2 yhat ivresid ivresid2 gresid gresid2 lresid lresid2 b0resid b0resid2 s1resid + tempname ysum yy yyc r2u r2c B V ivB gmmB wB lB gmmV ivest + tempname r2 r2_a ivrss lrss wbrss b0rss rss mss rmse sigmasq iv_s2 l_s2 wb_s2 b0_s2 F Fp Fdf2 + tempname S Sinv W s1Zu s2Zu b0Zu wbZu wbresid wbresid2 s1sigmasq + tempname A XZ XZa XZb Zy ZZ ZZinv XPZX XPZXinv XPZy + tempname YY Z2Z2 ZY Z2Y XXa XXb XX Xy Z2Z2inv XXinv + tempname XZWZX XZWZXinv XZWZy XZW + tempname B V B1 uZSinvZu j jp arubin arubinp arubin_lin arubin_linp tempmat + +* Generate cross-products of y, X, Z + qui matrix accum `A' = `lhs' `endo1' `exexog1' `inexog1' /* + */ if `touse' `wtexp', `noconstant' + if "`noconstant'"=="" { + matrix rownames `A' = `lhs' `endo1' `exexog1' /* + */ `inexog1' _cons + matrix colnames `A' = `lhs' `endo1' `exexog1' /* + */ `inexog1' _cons + } + else { + matrix rownames `A' = `lhs' `endo1' `exexog1' `inexog1' + matrix colnames `A' = `lhs' `endo1' `exexog1' `inexog1' + } + if `endo1_ct' > 0 { +* X'Z is [endo1 inexog1]'[exexog1 inexog1] + mat `XZ'=`A'[2..`endo1_ct'+1,`endo1_ct'+2...] +* Append portion corresponding to included exog if they (incl constant) exist + if 2+`endo1_ct'+`iv1_ct'-(`rhs1_ct'-`endo1_ct') /* + */ <= rowsof(`A') { + mat `XZ'=`XZ' \ /* + */ `A'[2+`endo1_ct'+`iv1_ct'- /* + */ (`rhs1_ct'-`endo1_ct')..., /* + */ `endo1_ct'+2...] + } +* If included exog (incl const) exist, create XX matrix in 3 steps + if `inexog1_ct' + `cons' > 0 { + mat `XXa' = `A'[2..`endo1_ct'+1, 2..`endo1_ct'+1], /* + */ `A'[2..`endo1_ct'+1, `endoexex1_ct'+2...] + mat `XXb' = `A'[`endoexex1_ct'+2..., 2..`endo1_ct'+1], /* + */ `A'[`endoexex1_ct'+2..., `endoexex1_ct'+2...] + mat `XX' = `XXa' \ `XXb' + mat `Xy' = `A'[2..`endo1_ct'+1, 1] \ `A'[`endoexex1_ct'+2..., 1] + } + else { + mat `XX' = `A'[2..`endo1_ct'+1, 2..`endo1_ct'+1] + mat `Xy' = `A'[2..`endo1_ct'+1, 1] + } + } + else { +* Cragg HOLS estimator with no endogenous variables + mat `XZ'= `A'[2+`iv1_ct'-(`rhs1_ct'-`endo1_ct')..., /* + */ 2...] + mat `XX' = `A'[`endoexex1_ct'+2..., `endoexex1_ct'+2...] + mat `Xy' = `A'[`endoexex1_ct'+2..., 1] + } + + mat `XX'=(`XX'+`XX'')/2 + mat `XXinv'=syminv(`XX') + mat `Zy'=`A'[`endo1_ct'+2...,1] + mat `ZZ'=`A'[`endo1_ct'+2...,`endo1_ct'+2...] + mat `ZZ'=(`ZZ'+`ZZ'')/2 + mat `ZZinv'=syminv(`ZZ') +* diag0cnt not superfluous - collinearity checks will catch but may be disabled + local iv_ct = rowsof(`ZZ') - diag0cnt(`ZZinv') + mat `YY'=`A'[1..`endo1_ct'+1, 1..`endo1_ct'+1] + mat `ZY' = `A'[`endo1_ct'+2..., 1..`endo1_ct'+1] + mat `XPZX'=`XZ'*`ZZinv'*`XZ'' + mat `XPZX'=(`XPZX'+`XPZX'')/2 + mat `XPZXinv'=syminv(`XPZX') + mat `XPZy'=`XZ'*`ZZinv'*`Zy' +****************************** + qui gen byte `iota'=1 + qui gen double `y2'=`lhs'^2 +* Stata summarize won't work with iweights, so must use matrix cross-product + qui matrix vecaccum `ysum' = `iota' `y2' `lhs' `wtexp' if `touse' +* N is ob count from mat accum. +* `N' is rounded down to nearest integer if iweights are used. +* If aw, pw or fw, should already be integer but use round in case of numerical imprecision. + if "`weight'" == "iweight" { + local N=int(`ysum'[1,3]) + } + else { + local N=round(`ysum'[1,3]) + } +* For yyc, use unrounded N to mimic official -regress- (needed in R-sq) + scalar `yy'=`ysum'[1,1] + scalar `yyc'=`yy'-`ysum'[1,2]^2/`ysum'[1,3] + +******************************************************************************************* +* First-step estimators: b0, wmatrix, LIML-kclass, IV. +* Generate residuals s1resid for used in 2SFEGMM and robust. +* User-supplied b0 provides value of CUE obj fn. + if "`b0'" != "" { + capture drop `yhat' + qui mat score double `yhat' = `b0' if `touse' + qui gen double `b0resid'=`lhs'-`yhat' + qui gen double `b0resid2'=`b0resid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `b0resid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `b0rss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `b0_s2'=`b0rss'/(`N'-`dofminus') + scalar `s1sigmasq'=`b0_s2' + qui gen double `s1resid'=`b0resid' + } + else if "`wmatrix'" != "" { +* GMM with arbitrary weighting matrix provides first-step estimates + local cn : colnames(`ZZ') + matrix `W'=`wmatrix' +* Rearrange/select columns to mat IV matrix + capture matsort `W' "`cn'" + local wrows = rowsof(`W') + local wcols = colsof(`W') + local zcols = colsof(`ZZ') + if _rc ~= 0 | (`wrows'~=`zcols') | (`wcols'~=`zcols') { +di as err "-wmatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + mat `XZWZX'=`XZ'*`W'*`XZ'' + mat `XZWZy'=`XZ'*`W'*`Zy' + mat `XZWZX'=(`XZWZX'+`XZWZX'')/2 + mat `XZWZXinv'=syminv(`XZWZX') + mat `XZW'=`XZ'*`W' + mat `wB'=`XZWZy''*`XZWZXinv'' + + capture drop `yhat' + qui mat score double `yhat' = `wB' if `touse' + qui gen double `wbresid'=`lhs'-`yhat' + qui gen double `wbresid2'=`wbresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `wbresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `wbrss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `wb_s2'=`wbrss'/(`N'-`dofminus') + scalar `s1sigmasq'=`wb_s2' + qui gen double `s1resid'=`wbresid' + } + else if "`liml'`kclass'" != "" { +* LIML and kclass code + tempname WW WW1 Eval lambda khs XhXh XhXhinv ll + if "`kclass'" == "" { +* LIML block + matrix `WW' = `YY' - `ZY''*`ZZinv'*`ZY' + if `inexog1_ct' + `cons' > 0 { + mat `Z2Y' = `A'[`endoexex1_ct'+2..., 1..`endo1_ct'+1] + mat `Z2Z2' = `A'[`endoexex1_ct'+2..., `endoexex1_ct'+2...] + mat `Z2Z2'=(`Z2Z2'+`Z2Z2'')/2 + mat `Z2Z2inv' = syminv(`Z2Z2') + matrix `WW1' = `YY' - `Z2Y''*`Z2Z2inv'*`Z2Y' + } + else { +* Special case of no included exogenous (incl constant) + matrix `WW1' = `YY' + } + matrix `WW'=(`WW'+`WW'')/2 + mata: M=matpowersym(st_matrix("`WW'"), -0.5) + mata: Eval=symeigenvalues(makesymmetric(M*st_matrix("`WW1'")*M)) + mata: lambda=rowmin(Eval) + mata: st_numscalar("r(lambda)", lambda) + scalar `lambda'=r(lambda) + + if `fuller'==0 { +* Basic LIML. Macro kclass2 is the scalar. + scalar `kclass2'=`lambda' + } + else { +* Fuller LIML + if `fuller' > (`N'-`iv_ct') { +di as err "error: invalid choice of Fuller LIML parameter" + exit 198 + } + scalar `kclass2' = `lambda' - `fuller'/(`N'-`iv_ct') + } +* End of LIML block + } + mat `XhXh'=(1-`kclass2')*`XX'+`kclass2'*`XPZX' + mat `XhXh'=(`XhXh'+`XhXh'')/2 + mat `XhXhinv'=syminv(`XhXh') + mat `lB'=`Xy''*`XhXhinv'*(1-`kclass2') + `kclass2'*`Zy''*`ZZinv'*`XZ''*`XhXhinv' + capture drop `yhat' + qui mat score double `yhat'=`lB' if `touse' + qui gen double `lresid'=`lhs' - `yhat' + qui gen double `lresid2'=`lresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `lresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `lrss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `l_s2'=`lrss'/(`N'-`dofminus') + scalar `s1sigmasq'=`l_s2' + qui gen double `s1resid'=`lresid' + } + else { +* IV resids are 1st-step GMM resids +* In these expressions, ignore scaling of W + mat `ivB' = `XPZy''*`XPZXinv'' + mat `XZWZX'=`XPZX' + mat `XZWZXinv'=`XPZXinv' + mat `XZW'=`XZ'*`ZZinv' + capture drop `yhat' + qui mat score double `yhat' = `ivB' if `touse' + qui gen double `ivresid'=`lhs'-`yhat' + qui gen double `ivresid2'=`ivresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `ivresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `ivrss'=`ysum'[1,1] + scalar `iv_s2'=`ivrss'/(`N'-`dofminus') + scalar `s1sigmasq'=`iv_s2' + qui gen double `s1resid'=`ivresid' + } +* Orthogonality conditions using step 1 residuals + qui mat vecaccum `s1Zu'=`s1resid' `exexog1' `inexog1' /* + */ `wtexp' if `touse', `noconstant' + +* call abw code if bw() is defined and bw(auto) selected + if "`bw'" != "" { + if `bw' == -1 { + tempvar abwtouse + gen byte `abwtouse' = (`s1resid' < .) + abw `s1resid' `exexog1' `inexog1' `abwtouse', noconstant kernel(`kernel') + local bw `r(abw)' + local bwopt "bw(`bw')" + local bwchoice "`r(bwchoice)'" + } + } +******************************************************************************************* +* S covariance matrix of orthogonality conditions +******************************************************************************************* +* If user-supplied S matrix is used, use it + if "`smatrix'" != "" { + local cn : colnames(`ZZ') + matrix `S'=`smatrix' +* Rearrange/select columns to mat IV matrix + capture matsort `S' "`cn'" + local srows = rowsof(`S') + local scols = colsof(`S') + local zcols = colsof(`ZZ') + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-smatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + mat `S' = (`S' + `S'') / 2 + mat `Sinv'=syminv(`S') + local rankS = rowsof(`Sinv') - diag0cnt(`Sinv') + } + +******************************************************************************************* +* Start robust block for robust-HAC S and Sinv +* Do not enter if user supplies smatrix or if CUE + if "`robust'`cluster'" != "" & "`cue'"=="" & "`smatrix'"=="" { +* Optimal weighting matrix +* Block calculates S_0 robust matrix +* _robust has same results as +* mat accum `S'=`exexog1' `inexog1' [iweight=`ivresid'^2] if `touse' +* mat `S' = `S'*1/`N' +* _robust doesn't work properly with TS variables, so must first tsrevar + tsrevar `exexog1' `inexog1' + local TSinsts1 `r(varlist)' +* Create identity matrix with matching col/row names + mat `S'=I(colsof(`s1Zu')) + if "`noconstant'"=="" { + mat colnames `S' = `TSinsts1' "_cons" + mat rownames `S' = `TSinsts1' "_cons" + } + else { + mat colnames `S' = `TSinsts1' + mat rownames `S' = `TSinsts1' + } + _robust `s1resid' `wtexp' if `touse', variance(`S') `clopt' minus(0) + if "`cluster'"!="" { + local N_clust=r(N_clust) + } + mat `S' = `S'*1/`N' +* Above doesn't work properly with iweights (i.e. yield same matrix as fw), +* hence iweight trap at start + if "`kernel'" != "" { +* HAC block for S_1 onwards matrices + tempvar vt1 + qui gen double `vt1' = . + tempname tt tx kw karg ow +* Use insts with TS ops removed and with iota (constant) column + if "`noconstant'"=="" { + local insts1c "`TSinsts1' `iota'" + } + else { + local insts1c "`TSinsts1'" + } + local iv1c_ct : word count `insts1c' +* "tau=0 loop" is S_0 block above for all robust code + local tau=1 +* Spectral windows require looping through all T-1 autocovariances + if "`window'" == "spectral" { + local TAU = `T'-1 +di in ye "Computing kernel ..." + } + else { + local TAU=`bw' + } + if "`weight'" == "" { +* If no weights specified, define neutral ow variable and weight expression for code below + qui gen byte `ow'=1 + local wtexp `"[fweight=`wvar']"' + } + else { +* pweights and aweights + summ `wvar' if `touse', meanonly + qui gen double `ow' = `wvar'/r(mean) + } + while `tau' <= `TAU' { + capture mat drop `tt' + local i 1 + while `i' <= `iv1c_ct' { + local x : word `i' of `insts1c' +* Add lags defined with TS operators + local Lx "L`tau'.`x'" + local Ls1resid "L`tau'.`s1resid'" + local Low "L`tau'.`ow'" + qui replace `vt1' = `Lx'*`s1resid'* /* + */ `Ls1resid'*`Low'*`ow' if `touse' +* Use capture here because there may be insufficient observations, e.g., if +* the IVs include lags and tau=N-1. _rc will be 2000 in this case. +* Bug in vecaccum means that if there is only 1 observation it will crash with insufficient obs. +* Replace missings with zeros to head this off. + qui replace `vt1' = 0 if `vt1'==. & `touse' + capture mat vecaccum `tx' = `vt1' `insts1c' /* + */ if `touse', nocons + if _rc == 0 { + mat `tt' = nullmat(`tt') \ `tx' + } + local i = `i'+1 + } +* bw = bandwidth, karg is argument to kernel function, kw is kernel function (weight) + scalar `karg' = `tau'/(`bw') + if "`kernel'" == "Truncated" { + scalar `kw'=1 + } + if "`kernel'" == "Bartlett" { + scalar `kw'=(1-`karg') + } + if "`kernel'" == "Parzen" { + if `karg' <= 0.5 { + scalar `kw' = 1-6*`karg'^2+6*`karg'^3 + } + else { + scalar `kw' = 2*(1-`karg')^3 + } + } + if "`kernel'" == "Tukey-Hanning" { + scalar `kw'=0.5+0.5*cos(_pi*`karg') + } + if "`kernel'" == "Tukey-Hamming" { + scalar `kw'=0.54+0.46*cos(_pi*`karg') + } + if "`kernel'" == "Tent" { + scalar `kw'=2*(1-cos(`tau'*`karg')) / (`karg'^2) + } + if "`kernel'" == "Daniell" { + scalar `kw'=sin(_pi*`karg') / (_pi*`karg') + } + if "`kernel'" == "Quadratic spectral" { + scalar `kw'=25/(12*_pi^2*`karg'^2) /* + */ * ( sin(6*_pi*`karg'/5)/(6*_pi*`karg'/5) /* + */ - cos(6*_pi*`karg'/5) ) + } +* Need -capture-s here because tt may not exist (because of insufficient observations/lags) + capture mat `tt' = (`tt'+`tt'')*`kw'*1/`N' + if _rc == 0 { + mat `S' = `S' + `tt' + } + local tau = `tau'+1 + } + if "`weight'" == "" { +* If no weights specified, remove neutral weight variables + local wtexp "" + } + } +* To give S the right col/row names + mat `S'=`S'+0*diag(`s1Zu') +* Right approach is to adjust S by N/(N-dofminus) if NOT cluster +* because clustered S is already "adjusted" + if "`cluster'"=="" { + mat `S'=`S'*`N'/(`N'-`dofminus') + } + + mat `S'=(`S'+`S'')/2 + mat `Sinv'=syminv(`S') + local rankS = rowsof(`Sinv') - diag0cnt(`Sinv') + } + +* End robust-HAC S and Sinv block +************************************************************************************ +* Block for non-robust S and Sinv, including autocorrelation-consistent (AC). +* Do not enter if user supplies smatrix or if cue + + if "`robust'`cluster'`cue'`smatrix'"=="" { +* First do with S_0 (=S for simple IV) +* Step 1 sigma^2 is IV sigma^2 unless b0 or wmatrix provided + mat `S' = `s1sigmasq'*`ZZ'*(1/`N') + + if "`kernel'" != "" { +* AC code for S_1 onwards matrices + tempvar vt1 + qui gen double `vt1' = . + tempname tt tx kw karg ow sigttj +* Use insts with TS ops removed and with iota (constant) column + tsrevar `exexog1' `inexog1' + local TSinsts1 `r(varlist)' + if "`noconstant'"=="" { + local insts1c "`TSinsts1' `iota'" + } + else { + local insts1c "`TSinsts1'" + } + local iv1c_ct : word count `insts1c' +* "tau=0 loop" is S_0 block above + local tau=1 +* Spectral windows require looping through all T-1 autocovariances + if "`window'" == "spectral" { + local TAU=`T'-1 +di in ye "Computing kernel ..." + } + else { + local TAU=`bw' + } + if "`weight'" == "" { +* If no weights specified, define neutral ow variable and wtexp for code below + qui gen byte `ow'=1 + local wtexp `"[fweight=`wvar']"' + } + else { +* pweights and aweights + sum `wvar' if `touse', meanonly + qui gen double `ow' = `wvar'/r(mean) + } + while `tau' <= `TAU' { + capture mat drop `tt' + local i 1 +* errflag signals problems that make this loop's tt invalid + local errflag 0 +* Additional marksample/markout required so that treatment of MVs is consistent across all IVs and obs + marksample touse2 + markout `touse2' `insts1c' L`tau'.(`insts1c') L`tau'.(`s1resid') + + local Low "L`tau'.`ow'" + while `i' <= `iv1c_ct' { + local x : word `i' of `insts1c' +* Add lags defined with TS operators + local Lx "L`tau'.`x'" + qui replace `vt1'=. + qui replace `vt1' = `Lx'*`Low'*`ow' if `touse' & `touse2' +* Use capture here because there may be insufficient observations, e.g., if +* the IVs include lags and tau=N-1. _rc will be 2000 in this case. +* Bug in vecaccum means that if there is only 1 observation it will crash with insufficient obs. +* Replace missings with zeros to head this off. + qui replace `vt1' = 0 if `vt1'==. & `touse' + capture mat vecaccum `tx' = `vt1' `insts1c' /* + */ if `touse', nocons + + if _rc == 0 { + mat `tt' = nullmat(`tt') \ `tx' + } + local i = `i'+1 + } + + capture mat `tt' = 1/`N' * `tt' + if _rc != 0 { + local errflag = 1 + } + local Ls1resid "L`tau'.`s1resid'" + tempvar ivLiv + qui gen double `ivLiv' = `s1resid'*`Ls1resid'*`ow'*`Low' if `touse' + qui sum `ivLiv' if `touse', meanonly + scalar `sigttj' = r(sum)/(`N'-`dofminus') + capture mat `tt' = `sigttj' * `tt' +* bw = bandwidth, karg is argument to kernel function, kw is kernel function (weight) + scalar `karg' = `tau'/(`bw') + if "`kernel'" == "Truncated" { + scalar `kw'=1 + } + if "`kernel'" == "Bartlett" { + scalar `kw'=(1-`karg') + } + if "`kernel'" == "Parzen" { + if `karg' <= 0.5 { + scalar `kw' = 1-6*`karg'^2+6*`karg'^3 + } + else { + scalar `kw' = 2*(1-`karg')^3 + } + } + if "`kernel'" == "Tukey-Hanning" { + scalar `kw'=0.5+0.5*cos(_pi*`karg') + } + if "`kernel'" == "Tukey-Hamming" { + scalar `kw'=0.54+0.46*cos(_pi*`karg') + } + if "`kernel'" == "Tent" { + scalar `kw'=2*(1-cos(`tau'*`karg')) / (`karg'^2) + } + if "`kernel'" == "Daniell" { + scalar `kw'=sin(_pi*`karg') / (_pi*`karg') + } + if "`kernel'" == "Quadratic spectral" { + scalar `kw'=25/(12*_pi^2*`karg'^2) /* + */ * ( sin(6*_pi*`karg'/5)/(6*_pi*`karg'/5) /* + */ - cos(6*_pi*`karg'/5) ) + } +* Need -capture-s here because tt may not exist (because of insufficient observations/lags) + capture mat `tt' = (`tt'+`tt'')*`kw' + if _rc != 0 { + local errflag = 1 + } +* Accumulate if tt is valid + if `errflag' == 0 { + capture mat `S' = `S' + `tt' + } + local tau = `tau'+1 + } + if "`weight'" == "" { +* If no weights specified, remove neutral weight variables + local wtexp "" + } + } +* End of AC code +* To give S the right col/row names + mat `S'=`S'+0*diag(`s1Zu') + mat `S'=(`S'+`S'')/2 + mat `Sinv'=syminv(`S') + local rankS = rowsof(`Sinv') - diag0cnt(`Sinv') + } + +* End of non-robust S and Sinv code (including AC) +******************************************************************************************* +* 2nd step and final coefficients +******************************************************************************************* +* User-supplied b0. CUE objective function. + if "`b0'" ~= "" { + mat `B' = `b0' + scalar `rss'=`b0rss' + scalar `sigmasq'=`b0_s2' + mat `W' = `Sinv' + } +******************************************************************************************* +* Block for gmm 2nd step to get coefficients and 2nd step residuals + +* Non-robust IV, LIML, k-class, CUE do not enter + if "`gmm2s'`robust'`cluster'`kernel'`wmatrix'" != "" & "`cue'"=="" { + mat `tempmat'=`XZ'*`Sinv'*`XZ'' + mat `tempmat'=(`tempmat'+`tempmat'')/2 + mat `B1'=syminv(`tempmat') + mat `B1'=(`B1'+`B1'')/2 + mat `gmmB'=(`B1'*`XZ'*`Sinv'*`Zy')' + + capture drop `yhat' + qui mat score double `yhat'=`gmmB' if `touse' + qui gen double `gresid'=`lhs'-`yhat' + qui gen double `gresid2'=`gresid'^2 + qui mat vecaccum `s2Zu'=`gresid' `exexog1' `inexog1' /* + */ `wtexp' if `touse', `noconstant' + } +******************************************************************************************* +* GMM with arbitrary weighting matrix + if ("`wmatrix'"~="") & ("`gmm2s'"=="") & ("`liml'`kclass'`cue'"=="") & "`b0'"=="" { + mat `B'=`wB' + scalar `rss'=`wbrss' + scalar `sigmasq'=`wb_s2' +* Weighting matrix wmatrix already checked and assigned to macro W + } +******************************************************************************************* +* IV coefficients + if ("`wmatrix'"=="") & ("`gmm2s'"=="") & ("`liml'`kclass'`cue'"=="") & "`b0'"=="" { + mat `B'=`ivB' + scalar `rss'=`ivrss' + scalar `sigmasq'=`iv_s2' +* IV weighting matrix. By convention, no small-sample adjustment (consistent with S) +* No dofminus correction (needed in sigma^2, not ZZ) + mat `W' = `ZZinv'*`N'/`iv_s2' + } +******************************************************************************************* +* LIML, k-class coefficients + if "`liml'`kclass'" ~= "" { + mat `B'=`lB' + scalar `rss'=`lrss' + scalar `sigmasq'=`l_s2' +* No weighting matrix. + } +******************************************************************************************* +* Efficient GMM coefficients + if "`gmm2s'"!="" & ("`liml'`kclass'`cue'"=="") & "`b0'"=="" { + mat `B'=`gmmB' + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `gresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `rss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `sigmasq'=`rss'/(`N'-`dofminus') + mat `W'=`Sinv' + } +******************************************************************************************* +* Var-cov matrix +******************************************************************************************* +* Expressions below multipy by N because we are working with cross-products (XZ) not vcvs (Qxz) +* Efficient GMM: homoskedastic IV, 2-step FEGMM. LIML, k-class, CUE handled separately. +* No robust, cluster, kernel => must be efficient GMM +* GMM option => must be efficient GMM +* b0 => must be efficient GMM +* wmatrix => (possibly) inefficient GMM + tempname rankV + if ("`robust'`cluster'`kernel'`liml'`kclass'`cue'`wmatrix'"=="") /* + */ | ("`gmm2s'"~="") /* + */ | ("`b0'"~="") { + mat `tempmat'=`XZ'*`Sinv'*`XZ'' + mat `tempmat'=(`tempmat'+`tempmat'')/2 + mat `V' = syminv(`tempmat')*`N' + mat `V'=(`V'+`V'')/2 + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + } +* Possibly inefficient GMM: robust of all sorts with no 2nd step. LIML, k-class, CUE handled separately. + else if ("`liml'`kclass'`cue'"=="") { + mat `V'=`XZWZXinv'*`XZW'*`S'* /* + */ `XZW''*`XZWZXinv'*`N' + mat `V'=(`V'+`V'')/2 + mat `tempmat'=syminv(`V') + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + } +* LIML and k-class non-robust + else if ("`liml'`kclass'" ~= "") & ("`robust'`cluster'`kernel'" == "") { + if "`coviv'"== "" { +* LIML or k-class cov matrix + mat `V'=`sigmasq'*`XhXhinv' + scalar `rankV'=rowsof(`XhXh') - diag0cnt(`XhXh') + } + else { +* IV cov matrix + mat `V'=`sigmasq'*`XPZXinv' + scalar `rankV'=rowsof(`XPZXinv') - diag0cnt(`XPZXinv') + } + mat `V'=(`V'+`V'')/2 + } +* LIML and k-class robust + else if ("`liml'`kclass'" ~= "") & ("`robust'`cluster'`kernel'" ~= "") { + if "`coviv'"== "" { +* Use LIML or k-class cov matrix + mat `V'=`XhXhinv'*`XZ'*`ZZinv'*`S'*`N'* /* + */ `ZZinv'*`XZ''*`XhXhinv' + } + else { +* Use IV cov matrix + mat `V'=`XPZXinv'*`XZ'*`ZZinv'*`S'*`N'* /* + */ `ZZinv'*`XZ''*`XPZXinv' + } + mat `V'=(`V'+`V'')/2 + mat `tempmat'=syminv(`V') + scalar `rankV'=rowsof(`tempmat') - diag0cnt(`tempmat') + } +* End of VCV block +******************************************************************************** +* Sargan-Hansen-Anderson-Rubin statistics +******************************************************************************************* +* Robust requires using gmm residuals; otherwise use iv residuals. CUE handled separately. +* wmatrix is possibly inefficient GMM so require 2nd step residuals +* b0 => return value of CUE objective function. b0 is efficient GMM. + if ("`robust'`cluster'`kernel'`wmatrix'" == "") & ("`cue'"=="") & ("`b0'"=="") { + mat `uZSinvZu'= (`s1Zu'/`N')*`Sinv'*(`s1Zu''/`N') + scalar `j' = `N'*`uZSinvZu'[1,1] + } + if ("`robust'`cluster'`kernel'`wmatrix'" ~= "") & ("`cue'"=="") & ("`b0'"=="") { + mat `uZSinvZu'= (`s2Zu'/`N')*`Sinv'*(`s2Zu''/`N') + scalar `j' = `N'*`uZSinvZu'[1,1] + } + if "`b0'"~="" { + mat `uZSinvZu'= (`s1Zu'/`N')*`Sinv'*(`s1Zu''/`N') + scalar `j' = `N'*`uZSinvZu'[1,1] + } + if "`liml'" != "" { +* Also save Anderson-Rubin overid stat if LIML +* Note dofminus is required because unlike Sargan and 2-step GMM J, doesn't derive from S + scalar `arubin'=(`N'-`dofminus')*ln(`lambda') + scalar `arubin_lin'=(`N'-`dofminus')*(`lambda'-1) + } + +*************************************************************************************** +* Block for cue gmm +******************************************************************************************* + if "`cue'" != "" { + tempname b_init temphold + capture _estimates hold `temphold', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + if "`robust'`cluster'`bwopt'"~="" { + local init_opt "gmm2s" + } + qui `ivreg2_cmd' `lhs' `inexog1' (`endo1'=`exexog1') `wtexp' /* + */ if `touse', `robust' `clopt' `bwopt' `kernopt' `dofmopt' /* + */ `noconstant' `init_opt' noid nocollin + if e(rankzz)>e(rankxx) { + if "`cueinit'"== "" { + mat `b_init'=e(b) + } + else { + mat `b_init'=`cueinit' + } +* Use ML for numerical optimization. Always nocons since not there by user or partialled-out +* Set up variables and options as globals + global IV_lhs "`lhs'" + global IV_inexog "`inexog1'" + global IV_endog "`endo1'" + global IV_exexog "`exexog1'" + global IV_wt "`wtexp'" + global IV_opt "`noconstant' `robust' `clopt' `bwopt' `kernopt' `dofmopt'" +* `gmm2s' not in IV_opt because cue+gmm2s not allowed + ml model d0 `ivreg2_cmd'_cue (`lhs' = `endo1' `inexog1', `noconstant') `wtexp' /* + */ if `touse', maximize init(`b_init') `cueoptions' /* + */ crittype(neg GMM obj function -J) /* + */ collinear nooutput nopreserve missing noscvars + } + else { +di in ye "Equation exactly-identified: CUE and 2-step GMM coincide" + } + + mat `B'=e(b) + mat colnames `B' = _: +* Last call to get vcv, j, Sinv etc. + qui `ivreg2_cmd' `lhs' `inexog1' (`endo1'=`exexog1') `wtexp' if `touse', /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' `dofmopt' /* + */ b0(`B') noid nocollin + +* Save all results + mat `V'=e(V) + mat `S'=e(S) + mat `Sinv'=syminv(`S') + mat `W'=`Sinv' + + local rankS = e(rankS) + scalar `j'=e(j) + local df_m = e(df_m) + scalar `rankV'=e(rankV) + + if "`cluster'" != "" { + local N_clust=e(N_clust) + } + capture drop `yhat' + qui mat score double `yhat'=`B' if `touse' + qui gen double `gresid'=`lhs'-`yhat' + qui gen double `gresid2'=`gresid'^2 + capture drop `ysum' + qui matrix vecaccum `ysum' = `iota' `gresid2' /* + */ `wtexp' if `touse', `noconstant' + scalar `rss'= `ysum'[1,1] +* Adjust sigma-squared for dofminus + scalar `sigmasq'=`rss'/(`N'-`dofminus') + + macro drop IV_lhs IV_inexog IV_endog IV_exexog IV_wt IV_opt + capture _estimates unhold `temphold' + + } + +******************************************************************************************* +* RSS, counts, dofs, F-stat, small-sample corrections +******************************************************************************************* + + scalar `rmse'=sqrt(`sigmasq') + if "`noconstant'"=="" { + scalar `mss'=`yyc' - `rss' + } + else { + scalar `mss'=`yy' - `rss' + } + +* Counts modified to include constant if appropriate + if "`noconstant'"=="" { + local iv1_ct = `iv1_ct' + 1 + local rhs1_ct = `rhs1_ct' + 1 + } +* Correct count of rhs variables accounting for dropped collinear vars +* Count includes constant + + local rhs_ct = rowsof(`XX') - diag0cnt(`XXinv') + local Fdf1 = `rhs_ct' - `cons' +* CUE handled separately + if "`cue'"=="" { + local df_m = `rhs_ct' - `cons' + (`sdofminus'-`partialcons') + } + if "`cluster'"=="" { +* Residual dof adjusted for dofminus + local df_r = `N' - `rhs_ct' - `dofminus' - `sdofminus' + } + else { +* To match Stata, subtract 1 + local df_r = `N_clust' - 1 + } + +* Sargan-Hansen J dof and p-value +* df=0 doesn't guarantee j=0 since can be call to get value of CUE obj fn + local jdf = `iv_ct' - `rhs_ct' + if `jdf' == 0 & "`b0'"=="" { + scalar `j' = 0 + } + else { + scalar `jp' = chiprob(`jdf',`j') + } + if "`liml'"~="" { + scalar `arubinp' = chiprob(`jdf',`arubin') + scalar `arubin_linp' = chiprob(`jdf',`arubin_lin') + } + +* Small sample corrections for var-cov matrix. +* If robust, the finite sample correction is N/(N-K), and with no small +* we change this to 1 (a la Davidson & MacKinnon 1993, p. 554, HC0). +* If cluster, the finite sample correction is (N-1)/(N-K)*M/(M-1), and with no small +* we change this to 1 (a la Wooldridge 2002, p. 193), where M=number of clusters. + + if "`small'" != "" { + if "`cluster'"=="" { + matrix `V'=`V'*(`N'-`dofminus')/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + } + else { + matrix `V'=`V'*(`N'-1)/(`N'-`rhs_ct'-`sdofminus') /* + */ * `N_clust'/(`N_clust'-1) + } + scalar `sigmasq'=`rss'/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + scalar `rmse'=sqrt(`sigmasq') + } + + scalar `r2u'=1-`rss'/`yy' + scalar `r2c'=1-`rss'/`yyc' + if "`noconstant'"=="" { + scalar `r2'=`r2c' + scalar `r2_a'=1-(1-`r2')*(`N'-1)/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + } + else { + scalar `r2'=`r2u' + scalar `r2_a'=1-(1-`r2')*`N'/(`N'-`rhs_ct'-`dofminus'-`sdofminus') + } + +* Fstat +* To get it to match Stata's, must post separately with dofs and then do F stat by hand +* in case weights generate non-integer obs and dofs +* Create copies so they can be posted + tempname FB FV + mat `FB'=`B' + mat `FV'=`V' + capture ereturn post `FB' `FV' +* If the cov matrix wasn't positive definite, the post fails with error code 506 + local rc = _rc + if `rc' != 506 { + local Frhs1 `rhs1' + capture test `Frhs1' + if "`small'" == "" { + if "`cluster'"=="" { + capture scalar `F' = r(chi2)/`Fdf1' * `df_r'/(`N'-`dofminus') + } + else { + capture scalar `F' = r(chi2)/`Fdf1' * /* +* sdofminus used here so that F-stat matches test stat from regression with no partial and small + */ (`N_clust'-1)/`N_clust' * (`N'-`rhs_ct'-`sdofminus')/(`N'-1) + } + } + else { + capture scalar `F' = r(chi2)/`Fdf1' + } + capture scalar `Fp'=Ftail(`Fdf1',`df_r',`F') + capture scalar `Fdf2'=`df_r' + } + +* If j==. or vcv wasn't full rank, then vcv problems and F is meaningless + if `j' == . | `rc'==506 { + scalar `F' = . + scalar `Fp' = . + } + +* End of counts, dofs, F-stat, small sample corrections +******************************************************************************************* +* orthog option: C statistic (difference of Sargan statistics) +******************************************************************************************* +* Requires j dof from above + if "`orthog'"!="" { + tempname cj cstat cstatp +* Initialize cstat + scalar `cstat' = 0 +* Each variable listed must be in instrument list. +* To avoid overwriting, use cendo, cinexog1, cexexog, cendo_ct, cex_ct + local cendo1 "`endo1'" + local cinexog1 "`inexog1'" + local cexexog1 "`exexog1'" + local cinsts1 "`insts1'" + local crhs1 "`rhs1'" + local clist1 "`orthog'" + local clist_ct : word count `clist1' + +* Check to see if c-stat vars are in original list of all ivs +* cinexog1 and cexexog1 are after c-stat exog list vars have been removed +* cendo1 is endo1 after included exog being tested has been added + foreach x of local clist1 { + local llex_ct : word count `cexexog1' + Subtract cexexog1 : "`cexexog1'" "`x'" + local cex1_ct : word count `cexexog1' + local ok = `llex_ct' - `cex1_ct' + if (`ok'==0) { +* Not in excluded, check included and add to endog list if it appears + local llin_ct : word count `cinexog1' + Subtract cinexog1 : "`cinexog1'" "`x'" + local cin1_ct : word count `cinexog1' + local ok = `llin_ct' - `cin1_ct' + if (`ok'==0) { +* Not in either list +di in r "Error: `x' listed in orthog() but does not appear as exogenous." + error 198 + } + else { + local cendo1 "`cendo1' `x'" + } + } + } + +* If robust, HAC/AC or GMM (but not LIML or IV), create optimal weighting matrix to pass to ivreg2 +* by extracting the submatrix from the full S and then inverting. +* This guarantees the C stat will be non-negative. See Hayashi (2000), p. 220. +* Calculate C statistic with recursive call to ivreg2 +* Collinearities may cause problems, hence -capture-. +* smatrix works generally, including homoskedastic case with Sargan stat + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + if "`kernel'" != "" { + local bwopt "bw(`bw')" + local kernopt "kernel(`kernel')" + } +* clopt is omitted because it requires calculation of numbers of clusters, which is done +* only when S matrix is calculated + capture `ivreg2_cmd' `lhs' `cinexog1' /* + */ (`cendo1'=`cexexog1') /* + */ if `touse' `wtexp', `noconstant' /* + */ `options' `small' `robust' /* + */ `gmm2s' `bwopt' `kernopt' `dofmopt' /* + */ smatrix("`S'") noid nocollin + local rc = _rc + if `rc' == 481 { + scalar `cstat' = 0 + local cstatdf = 0 + } + else { + scalar `cj'=e(j) + local cjdf=e(jdf) + } + scalar `cstat' = `j' - `cj' + local cstatdf = `jdf' - `cjdf' + _estimates unhold `ivest' + scalar `cstatp'= chiprob(`cstatdf',`cstat') +* Collinearities may cause C-stat dof to differ from the number of variables in orthog() +* If so, set cstat=0 + if `cstatdf' != `clist_ct' { + scalar `cstat' = 0 + } + } + } +* End of orthog block + +******************************************************************************************* +* Endog option +******************************************************************************************* +* Uses recursive call with orthog + if "`endogtest'"!="" { + tempname estat estatp +* Initialize estat + scalar `estat' = 0 +* Each variable to test must be in endo list. +* To avoid overwriting, use eendo, einexog1, etc. + local eendo1 "`endo1'" + local einexog1 "`inexog1'" + local einsts1 "`insts1'" + local elist1 "`endogtest'" + local elist_ct : word count `elist1' +* Check to see if endog test vars are in original endo1 list of endogeneous variables +* eendo1 and einexog1 are after endog test vars have been removed from endo and added to inexog + foreach x of local elist1 { + local llendo_ct : word count `eendo1' + local eendo1 : list eendo1 - x + local eendo1_ct : word count `eendo1' + local ok = `llendo_ct' - `eendo1_ct' + if (`ok'==0) { +* Not in endogenous list +di in r "Error: `x' listed in endog() but does not appear as endogenous." + error 198 + } + else { + local einexog1 "`einexog1' `x'" + } + } +* Recursive call to ivreg2 using orthog option to obtain endogeneity test statistic +* Collinearities may cause problems, hence -capture-. + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + capture `ivreg2_cmd' `lhs' `einexog1' /* + */ (`eendo1'=`exexog1') if `touse' /* + */ `wtexp', `noconstant' `robust' `clopt' /* + */ `gmm2s' `liml' `bwopt' `kernopt' /* + */ `small' `dofmopt' `options' /* + */ orthog(`elist1') noid nocollin + local rc = _rc + if `rc' == 481 { + scalar `estat' = 0 + local estatdf = 0 + } + else { + scalar `estat'=e(cstat) + local estatdf=e(cstatdf) + scalar `estatp'=e(cstatp) + } + _estimates unhold `ivest' +* Collinearities may cause endog stat dof to differ from the number of variables in endog() +* If so, set estat=0 + if `estatdf' != `elist_ct' { + scalar `estat' = 0 + } + } +* End of endogeneity test block + } + +******************************************************************************************* +* Rank identification and redundancy block +******************************************************************************************* + if `endo1_ct' > 0 & "`noid'"=="" { + +* id=underidentification statistic, wid=weak identification statistic + tempname rkmatrix idrkstat widrkstat iddf idp + tempname ccf cdf rkf cceval cdeval cd cc + tempname idstat widstat + +* Anderson canon corr underidentification statistic if homo, rk stat if not +* Need id stat for testing full rank=(#cols-1) but might need all rk stats for -redundant- test + if "`redundant'"=="" { + local rkopt "full" + } + else { + local rkopt "all" + } + qui ranktest (`endo1') (`exexog1') `wtexp' if `touse', partial(`inexog1') `rkopt' /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' + mat `rkmatrix'=r(rkmatrix) + if "`cluster'"=="" { + scalar `idstat'=r(chi2)/r(N)*(`N'-`dofminus') + } + else { +* No dofminus adjustment needed for cluster-robust + scalar `idstat'=r(chi2) + } + mat `cceval'=r(ccorr) + mat `cdeval' = J(1,`endo1_ct',.) + forval i=1/`endo1_ct' { + mat `cceval'[1,`i'] = (`cceval'[1,`i'])^2 + mat `cdeval'[1,`i'] = `cceval'[1,`i'] / (1 - `cceval'[1,`i']) + } + local iddf = `iv_ct' - (`rhs_ct'-1) + scalar `idp' = chiprob(`iddf',`idstat') +* Cragg-Donald F statistic. +* Under homoskedasticity, Wald cd eigenvalue = cc/(1-cc) Anderson canon corr eigenvalue. + scalar `cd'=`cdeval'[1,`endo1_ct'] + scalar `cdf'=`cd'*(`N'-`sdofminus'-`iv_ct'-`dofminus')/`exex1_ct' + +* Weak id statistic is Cragg-Donald F stat, rk Wald F stat if not + if "`robust'`cluster'`kernel'"=="" { + scalar `widstat'=`cdf' + } + else { +* Need only test of full rank + qui ranktest (`endo1') (`exexog1') `wtexp' if `touse', partial(`inexog1') full wald /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' +* sdofminus used here so that F-stat matches test stat from regression with no partial + if "`cluster'"=="" { + scalar `rkf'=r(chi2)/r(N)*(`N'-`iv_ct'-`sdofminus'-`dofminus')/`exex1_ct' + } + else { + scalar `rkf'=r(chi2)/(`N'-1) /* + */ *(`N'-`iv_ct'-`sdofminus') /* + */ *(`N_clust'-1)/(`N_clust')/`exex1_ct' + } + scalar `widstat'=`rkf' + } + } + +* LM redundancy test + if `endo1_ct' > 0 & "`redundant'" ~= "" & "`noid'"=="" { +* Use K-P rk statistics and LM version of test +* Statistic is the rank of the matrix of Z_1B*X_2, where Z_1B are the possibly redundant +* instruments and X_1 are the endogenous regressors; both have X_2 (exogenous regressors) +* and Z_1A (maintained excluded instruments) partialled out. LM test of rank is +* is numerically equivalent to estimation of set of RF regressions and performing +* standard LM test of possibly redundant instruments. + + local redlist1 "`redundant'" + local rexexog1 : list exexog1 - redlist1 + local notlisted : list redlist1 - exexog1 + if "`notlisted'" ~= "" { +di in r "Error: `notlisted' listed in redundant() but does not appear as excluded instrument." + error 198 + } + local rexexog1_ct : word count `rexexog1' + if `rexexog1_ct' < `endo1_ct' { +di in r "Error: specification with redundant() option is unidentified (fails rank condition)" + error 198 + } +* LM version requires only -nullrank- rk statistics so would not need -all- option + tempname rrkmatrix + qui ranktest (`endo1') (`redlist1') `wtexp' if `touse', partial(`inexog1' `rexexog1') null /* + */ `noconstant' `robust' `clopt' `bwopt' `kernopt' + mat `rrkmatrix'=r(rkmatrix) + tempname redstat redp + local redlist_ct : word count `redlist1' +* dof adjustment needed because it doesn't use the adjusted S + if "`cluster'"=="" { + scalar `redstat' = `rrkmatrix'[1,1]/r(N)*(`N'-`dofminus') + } + else { +* No dofminus adjustment needed for cluster-robust + scalar `redstat' = `rrkmatrix'[1,1] + } + local reddf = `endo1_ct'*`redlist_ct' + scalar `redp' = chiprob(`reddf',`redstat') + } + +* End of identification stats block + +******************************************************************************************* +* Error-checking block +******************************************************************************************* + +* Check if adequate number of observations + if `N' <= `iv_ct' { +di in r "Error: number of observations must be greater than number of instruments" +di in r " including constant." + error 2001 + } + +* Check if robust VCV matrix is of full rank + if "`gmm2s'`robust'`cluster'`kernel'" != "" { +* Robust covariance matrix not of full rank means either a singleton dummy or too few +* clusters (in which case the indiv SEs are OK but no F stat or 2-step GMM is possible), +* or there are too many AC/HAC-lags, or the HAC covariance estimator +* isn't positive definite (possible with truncated and Tukey-Hanning kernels) + if `rankS' < `iv_ct' { +* If two-step GMM then exit with error ... + if "`gmm2s'" != "" { +di in r "Error: estimated covariance matrix of moment conditions not of full rank;" +di in r " cannot calculate optimal weighting matrix for GMM estimation." +di in r "Possible causes:" + if "`cluster'" != "" { +di in r " number of clusters insufficient to calculate optimal weighting matrix" + } + if "`kernel'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in r "-partial- option may address problem. See help " _c +di in smcl "{help ivreg2}". + error 498 + } +* Estimation isn't two-step GMM so continue but J, F, and C stat (if present) all meaningless +* Must set Sargan-Hansen j = missing so that problem can be reported in output + else { + scalar `j' = . + if "`orthog'"!="" { + scalar `cstat' = . + } + if "`endogtest'"!="" { + scalar `estat' = . + } + } + } + } + +* End of error-checking block +******************************************************************************************** +* Reduced form and first stage regression options +******************************************************************************************* +* Relies on proper count of (non-collinear) IVs generated earlier. +* Note that nocons option + constant in instrument list means first-stage +* regressions are reported with nocons option. First-stage F-stat therefore +* correctly includes the constant as an explanatory variable. + + if "`rf'`saverf'`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) & "`noid'"=="" { +* Reduced form needed for AR first-stage test stat. Also estimated if requested. + tempname archi2 archi2p arf arfp ardf ardf_r sstat sstatp sstatdf + doRF "`lhs'" "`inexog1'" "`exexog1'" /* + */ `touse' `"`wtexp'"' `"`noconstant'"' `"`robust'"' /* + */ `"`clopt'"' `"`bwopt'"' `"`kernopt'"' /* + */ `"`saverfprefix'"' /* + */ "`dofminus'" "`sdofminus'" "`ivreg2_cmd'" + scalar `archi2'=r(archi2) + scalar `archi2p'=r(archi2p) + scalar `arf'=r(arf) + scalar `arfp'=r(arfp) + scalar `ardf'=r(ardf) + scalar `ardf_r'=r(ardf_r) + local rfeq "`r(rfeq)'" +* Drop saved rf results if needed only for first-stage estimations + if "`rf'`saverf'" == "" { + capture estimates drop `rfeq' + } +* Stock-Wright S statistic. Evaluate CUE objective function at b=0. +* Included exogenous, if any, are partialled out. + tempname b1 + mat `b1'=J(1,`endo1_ct',0) + matrix colnames `b1' = `endo1' + if `inexog1_ct' > 0 { + qui `ivreg2_cmd' `lhs' `inexog1' (`endo1'=`exexog1') `wtexp' if `touse', /* + */ b0(`b1') `noconstant' dofminus(`dofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' nocollin partial(`inexog1') + } + else if `cons' > 0 { + qui `ivreg2_cmd' `lhs' (`endo1'=`exexog1') `wtexp' if `touse', /* + */ b0(`b1') `noconstant' dofminus(`dofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' nocollin partial(_cons) + } + else { + qui `ivreg2_cmd' `lhs' (`endo1'=`exexog1') `wtexp' if `touse', /* + */ b0(`b1') `noconstant' dofminus(`dofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' nocollin + } + + scalar `sstat'=e(j) + scalar `sstatdf'=`ardf' + scalar `sstatp'=chiprob(`sstatdf',`sstat') + } + + if "`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) { + + if `iv1_ct' > `iv_ct' { +di +di in gr "Warning: collinearities detected among instruments" +di in gr "1st stage tests of excluded exogenous variables may be incorrect" + } + + tempname firstmat + local sdofmopt = "sdofminus(`sdofminus')" + doFirst "`endo1'" "`inexog1'" "`exexog1'" "`XXinv'" "`XPZXinv'" /* + */ `touse' `"`wtexp'"' `"`noconstant'"' `"`robust'"' /* + */ `"`clopt'"' `"`bwopt'"' `"`kernopt'"' /* + */ `"`savefprefix'"' `"`dofmopt'"' `"`sdofmopt'"' /* + */ "`ivreg2_cmd'" + + local firsteqs "`r(firsteqs)'" + capture mat `firstmat' = r(firstmat) + if _rc != 0 { +di in ye "Warning: missing values encountered; first stage regression results not saved" + } + } +* End of first-stage regression code +********************************************************************************************** +* Post and display results. +******************************************************************************************* + +* restore data if preserved for partial option + if "`partial'" != "" { + restore + } + + if "`small'"!="" { + local NminusK = `N'-`rhs_ct'-`sdofminus' + capture ereturn post `B' `V', dep(`depname') obs(`N') esample(`touse') /* + */ dof(`NminusK') + } + else { + capture ereturn post `B' `V', dep(`depname') obs(`N') esample(`touse') + } + local rc = _rc + if `rc' == 504 { +di in red "Error: estimated variance-covariance matrix has missing values" + exit 504 + } + if `rc' == 506 { +di in red "Error: estimated variance-covariance matrix not positive-definite" + exit 506 + } + if `rc' > 0 { +di in red "Error: estimation failed - could not post estimation results" + exit `rc' + } + + ereturn local instd `endo' + local insts : colnames `S' +* Stata convention is to exclude constant from instrument list +* Need word option so that varnames with "_cons" in them aren't zapped + local insts : subinstr local insts "_cons" "", word + ereturn local insts `insts' + ereturn local inexog `inexog' + ereturn local exexog `exexog' + ereturn local partial `partial' + ereturn scalar inexog_ct=`inexog1_ct' + ereturn scalar exexog_ct=`exex1_ct' + ereturn scalar endog_ct =`endo1_ct' + ereturn scalar partial_ct =`partial_ct' + if "`collin'`ecollin'`dups'" != "" | `partial_ct' > 0 { + ereturn local collin `collin' + ereturn local ecollin `ecollin' + ereturn local dups `dups' + ereturn local instd1 `endo1' + ereturn local inexog1 `inexog1' + ereturn local exexog1 `exexog1' + ereturn local partial1 `partial1' + } + + if "`smatrix'" == "" { + ereturn matrix S `S' + } + else { +* Create a copy so posting doesn't zap the original + tempname Scopy + mat `Scopy'=`smatrix' + ereturn matrix S `Scopy' + } + +* No weighting matrix defined for LIML and kclass + if "`wmatrix'"=="" & "`liml'`kclass'"=="" { + ereturn matrix W `W' + } + else if "`liml'`kclass'"=="" { +* Create a copy so posting doesn't zap the original + tempname Wcopy + mat `Wcopy'=`wmatrix' + ereturn matrix W `Wcopy' + } + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + if "`bwchoice'" ~= "" { + ereturn local bwchoice "`bwchoice'" + } + } + + if "`small'"!="" { + ereturn scalar df_r=`df_r' + ereturn local small "small" + } + if "`nopartialsmall'"=="" { + ereturn local partialsmall "small" + } + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + + ereturn scalar df_m=`df_m' + ereturn scalar sdofminus =`sdofminus' + ereturn scalar dofminus=`dofminus' + ereturn scalar r2=`r2' + ereturn scalar rmse=`rmse' + ereturn scalar rss=`rss' + ereturn scalar mss=`mss' + ereturn scalar r2_a=`r2_a' + ereturn scalar F=`F' + ereturn scalar Fp=`Fp' + ereturn scalar Fdf1=`Fdf1' + ereturn scalar Fdf2=`Fdf2' + ereturn scalar yy=`yy' + ereturn scalar yyc=`yyc' + ereturn scalar r2u=`r2u' + ereturn scalar r2c=`r2c' + ereturn scalar rankzz=`iv_ct' + ereturn scalar rankxx=`rhs_ct' + if "`gmm2s'`robust'`cluster'`kernel'" != "" { + ereturn scalar rankS=`rankS' + } + ereturn scalar rankV=`rankV' + ereturn scalar ll = -0.5 * (`N'*ln(2*_pi) + `N'*ln(`rss'/`N') + `N') + +* Always save J. Also save as Sargan if homoskedastic; save A-R if LIML. + ereturn scalar j=`j' + ereturn scalar jdf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar jp=`jp' + } + if ("`robust'`cluster'"=="") { + ereturn scalar sargan=`j' + ereturn scalar sargandf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar sarganp=`jp' + } + } + if "`liml'"!="" { + ereturn scalar arubin=`arubin' + ereturn scalar arubin_lin=`arubin_lin' + if `j' != 0 & `j' != . { + ereturn scalar arubinp=`arubinp' + ereturn scalar arubin_linp=`arubin_linp' + } + ereturn scalar arubindf=`jdf' + } + + if "`orthog'"!="" { + ereturn scalar cstat=`cstat' + if `cstat'!=0 & `cstat' != . { + ereturn scalar cstatp=`cstatp' + ereturn scalar cstatdf=`cstatdf' + ereturn local clist `clist1' + } + } + + if "`endogtest'"!="" { + ereturn scalar estat=`estat' + if `estat'!=0 & `estat' != . { + ereturn scalar estatp=`estatp' + ereturn scalar estatdf=`estatdf' + ereturn local elist `elist1' + } + } + + if `endo1_ct' > 0 & "`noid'"=="" { + ereturn scalar idstat=`idstat' + ereturn scalar iddf=`iddf' + ereturn scalar idp=`idp' + ereturn scalar cd=`cd' + ereturn scalar widstat=`widstat' + ereturn scalar cdf=`cdf' + capture ereturn matrix ccev=`cceval' + capture ereturn matrix cdev `cdeval' + capture ereturn scalar rkf=`rkf' + } + + if "`redundant'"!="" & "`noid'"=="" { + ereturn scalar redstat=`redstat' + ereturn scalar redp=`redp' + ereturn scalar reddf=`reddf' + ereturn local redlist `redlist1' + } + + if "`first'`ffirst'`savefirst'" != "" & `endo1_ct'>0 & "`noid'"=="" { +* Capture here because firstmat empty if mvs encountered in 1st stage regressions + capture ereturn matrix first `firstmat' + ereturn scalar arf=`arf' + ereturn scalar arfp=`arfp' + ereturn scalar archi2=`archi2' + ereturn scalar archi2p=`archi2p' + ereturn scalar ardf=`ardf' + ereturn scalar ardf_r=`ardf_r' + ereturn scalar sstat=`sstat' + ereturn scalar sstatp=`sstatp' + ereturn scalar sstatdf=`sstatdf' + ereturn local firsteqs `firsteqs' + } + if "`rf'`saverf'" != "" & `endo1_ct'>0 { + ereturn local rfeq `rfeq' + } + + ereturn local depvar `lhs' + + if "`liml'"!="" { + ereturn local model "liml" + ereturn scalar kclass=`kclass2' + ereturn scalar lambda=`lambda' + if `fuller' > 0 & `fuller' < . { + ereturn scalar fuller=`fuller' + } + } + else if "`kclass'" != "" { + ereturn local model "kclass" + ereturn scalar kclass=`kclass2' + } + else if "`gmm2s'`cue'`b0'`wmatrix'"=="" { + if "`endo1'" == "" { + ereturn local model "ols" + } + else { + ereturn local model "iv" + } + } + else if "`cue'`b0'"~="" { + ereturn local model "cue" + } + else if "`gmm2s'"~="" { + ereturn local model "gmm2s" + } + else if "`wmatrix'"~="" { + ereturn local model "gmmw" + } + else { +* Should never enter here + ereturn local model "unknown" + } + + if "`weight'" != "" { + ereturn local wexp "=`exp'" + ereturn local wtype `weight' + } + ereturn local cmd `ivreg2_cmd' + ereturn local cmdline `cmdline' + ereturn local version `lversion' + ereturn scalar cons=`cons' + ereturn scalar partialcons=`partialcons' + + ereturn local predict "`ivreg2_cmd'_p" + + if "`e(model)'"=="gmm2s" & "`wmatrix'"=="" { + local title2 "2-Step GMM estimation" + } + else if "`e(model)'"=="gmm2s" & "`wmatrix'"~="" { + local title2 "2-Step GMM estimation with user-supplied first-step weighting matrix" + } + else if "`e(model)'"=="gmmw" { + local title2 "GMM estimation with user-supplied weighting matrix" + } + else if "`e(model)'"=="cue" & "`b0'"=="" { + local title2 "CUE estimation" + } + else if "`e(model)'"=="cue" & "`b0'"~="" { + local title2 "CUE evaluated at user-supplied parameter vector" + } + else if "`e(model)'"=="ols" { + local title2 "OLS estimation" + } + else if "`e(model)'"=="iv" { + local title2 "IV (2SLS) estimation" + } + else if "`e(model)'"=="liml" { + local title2 "LIML estimation" + } + else if "`e(model)'"=="kclass" { + local title2 "k-class estimation" + } + else { +* Should never reach here + local title2 "unknown estimation" + } + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" { + local hacsubtitle3 "autocorrelation" + } + if "`e(clustvar)'"!="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + if "`title'"=="" { + ereturn local title "`title1'`title2'" + } + else { + ereturn local title "`title'" + } + if "`subtitle'"~="" { + ereturn local subtitle "`subtitle'" + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`b0'"~="" { + ereturn local hacsubtitleB "Estimates based on supplied parameter vector" + } + else if "`hacsubtitle'"~="" & "`gmm2s'`cue'"~="" { + ereturn local hacsubtitleB "Estimates efficient for arbitrary `hacsubtitle'" + } + else if "`wmatrix'"~="" { + ereturn local hacsubtitleB "Efficiency of estimates dependent on weighting matrix" + } + else { + ereturn local hacsubtitleB "Estimates efficient for homoskedasticity only" + } + if "`hacsubtitle'"~="" { + ereturn local hacsubtitleV "Statistics robust to `hacsubtitle'" + } + else { + ereturn local hacsubtitleV "Statistics consistent for homoskedasticity only" + } + } + +******************************************************************************************* +* Display results unless ivreg2 called just to generate stats or nooutput option + + if "`nooutput'" == "" { + if "`savefirst'`saverf'" != "" { + DispStored `"`saverf'"' `"`savefirst'"' `"`ivreg2_cmd'"' + } + if "`rf'" != "" { + DispRF + } + if "`first'" != "" { + DispFirst `"`ivreg2_cmd'"' + } + if "`first'`ffirst'" != "" { + DispFFirst `"`ivreg2_cmd'"' + } + if "`eform'"!="" { + local efopt "eform(`eform')" + } + DispMain `"`noheader'"' `"`plus'"' `"`efopt'"' `"`level'"' `"`nofooter'"' `"`ivreg2_cmd'"' + } + +* Drop first stage estimations unless explicitly saved or if replay + if "`savefirst'" == "" { + local firsteqs "`e(firsteqs)'" + foreach eqname of local firsteqs { + capture estimates drop `eqname' + } + ereturn local firsteqs + } + +* Drop reduced form estimation unless explicitly saved or if replay + if "`saverf'" == "" { + local eqname "`e(rfeq)'" + capture estimates drop `eqname' + ereturn local rfeq + } + +end + +******************************************************************************************* +* SUBROUTINES +******************************************************************************************* + +program define DispMain, eclass + args noheader plus efopt level nofooter helpfile + version 8.2 +* Prepare for problem resulting from rank(S) being insufficient +* Results from insuff number of clusters, too many lags in HAC, +* to calculate robust S matrix, HAC matrix not PD, singleton dummy, +* and indicated by missing value for j stat +* Macro `rprob' is either 1 (problem) or 0 (no problem) + capture local rprob ("`e(j)'"==".") + + if "`noheader'"=="" { + if "`e(title)'" ~= "" { +di in gr _n "`e(title)'" + local tlen=length("`e(title)'") +di in gr "{hline `tlen'}" + } + if "`e(subtitle)'" ~= "" { +di in gr "`e(subtitle)'" + } + if "`e(model)'"=="liml" | "`e(model)'"=="kclass" { +di in gr "k =" %7.5f `e(kclass)' + } + if "`e(model)'"=="liml" { +di in gr "lambda =" %7.5f `e(lambda)' + } + if e(fuller) > 0 & e(fuller) < . { +di in gr "Fuller parameter=" %-5.0f `e(fuller)' + } + if "`e(hacsubtitleB)'" ~= "" { +di in gr _n "`e(hacsubtitleB)'" _c + } + if "`e(hacsubtitleV)'" ~= "" { +di in gr _n "`e(hacsubtitleV)'" + } + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=" %6.2g `e(bw)' + if "`e(bwchoice)'"!="" { +di in gr " `e(bwchoice)'" + } +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + di + if "`e(clustvar)'"!="" { +di in gr "Number of clusters (" "`e(clustvar)'" ") = " in ye %-4.0f e(N_clust) _continue + } + else { +di in gr " " _continue + } +di in gr _col(55) "Number of obs = " in ye %8.0f e(N) +di in gr _c _col(55) "F(" %3.0f e(Fdf1) "," %6.0f e(Fdf2) ") = " + if e(F) < 99999 { +di in ye %8.2f e(F) + } + else { +di in ye %8.2e e(F) + } +di in gr _col(55) "Prob > F = " in ye %8.4f e(Fp) + +di in gr "Total (centered) SS = " in ye %12.0g e(yyc) _continue +di in gr _col(55) "Centered R2 = " in ye %8.4f e(r2c) +di in gr "Total (uncentered) SS = " in ye %12.0g e(yy) _continue +di in gr _col(55) "Uncentered R2 = " in ye %8.4f e(r2u) +di in gr "Residual SS = " in ye %12.0g e(rss) _continue +di in gr _col(55) "Root MSE = " in ye %8.4g e(rmse) +di + } + +* Display coefficients etc. +* Unfortunate but necessary hack here: to suppress message about cluster adjustment of +* standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + ereturn local clustvar + ereturn display, `plus' `efopt' level(`level') + ereturn local clustvar `cluster' + +* Display 1st footer with identification stats +* Footer not displayed if -nofooter- option or if pure OLS, i.e., model="ols" and Sargan-Hansen=0 + if ~("`nofooter'"~="" | (e(model)=="ols" & (e(sargan)==0 | e(j)==0))) { + +* Under ID test + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##idtest:Underidentification test}" + if "`e(vcetype)'`e(kernel)'"=="" { +di in gr _c " (Anderson canon. corr. LM statistic):" + } + else { +di in gr _c " (Kleibergen-Paap rk LM statistic):" + } +di in ye _col(71) %8.3f e(idstat) +di in gr _col(52) "Chi-sq(" in ye e(iddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(idp) +* IV redundancy statistic + if "`e(redlist)'"!="" { +di in gr "-redundant- option:" +di in smcl _c "{help `helpfile'##redtest:IV redundancy test}" +di in gr _c " (LM test of redundancy of specified instruments):" +di in ye _col(71) %8.3f e(redstat) +di in gr _col(52) "Chi-sq(" in ye e(reddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(redp) +di in gr "Instruments tested: " _c + Disp `e(redlist)', _col(23) + } +di in smcl in gr "{hline 78}" + } +* Report Cragg-Donald statistic + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##widtest:Weak identification test}" +di in gr " (Cragg-Donald Wald F statistic):" in ye _col(71) %8.3f e(cdf) + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr " (Kleibergen-Paap rk Wald F statistic):" in ye _col(71) %8.3f e(widstat) + } +di in gr _c "Stock-Yogo weak ID test critical values:" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmmw" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% maximal IV relative bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "15% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "25% maximal IV size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "15% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "25% maximal LIML size" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% maximal Fuller rel. bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "10% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "20% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(42) "30% Fuller maximum bias" in ye _col(73) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + if `cdmissing' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + } + di in smcl in gr "{hline 78}" + } + +* Report either (a) Sargan-Hansen-C stats, or (b) robust covariance matrix problem +* e(model)="gmmw" means user-supplied weighting matrix and Hansen J using 2nd-step resids reported + if `rprob' == 0 { +* Display overid statistic + if "`e(vcetype)'" == "Robust" | "`e(model)'" == "gmmw" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } + else { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } +di in ye _col(71) %8.3f e(j) + if e(rankxx) < e(rankzz) { +di in gr _col(52) "Chi-sq(" in ye e(jdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(jp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + +* Display orthog option: C statistic (difference of Sargan statistics) + if e(cstat) != . { +* If C-stat = 0 then warn, otherwise output + if e(cstat) > 0 { +di in gr "-orthog- option:" + if "`e(vcetype)'" == "Robust" { +di in gr _c "Hansen J statistic (eqn. excluding suspect orthog. conditions): " + } + else { +di in gr _c "Sargan statistic (eqn. excluding suspect orthogonality conditions):" + } +di in ye _col(71) %8.3f e(j)-e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(jdf)-e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f chiprob(e(jdf)-e(cstatdf),e(j)-e(cstat)) +di in smcl _c "{help `helpfile'##ctest:C statistic}" +di in gr _c " (exogeneity/orthogonality of suspect instruments): " +di in ye _col(71) %8.3f e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f e(cstatp) +di in gr "Instruments tested: " _c + Disp `e(clist)', _col(23) + } + if e(cstat) == 0 { +di in gr _n "Collinearity/identification problems in eqn. excl. suspect orthog. conditions:" +di in gr " C statistic not calculated for -orthog- option" + } + } + } + else { +* Problem exists with robust VCV - notify and list possible causes +di in r "Warning: estimated covariance matrix of moment conditions not of full rank." + if e(rankxx) < e(rankzz) { +di in r " overidentification statistic not reported, and" + } +di in r " standard errors and model tests should be interpreted with caution." +di in r "Possible causes:" + if "`e(N_clust)'" != "" { +di in r " number of clusters insufficient to calculate robust covariance matrix" + } + if "`e(kernel)'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in r in smcl _c "{help `helpfile'##partial:partial}" +di in r " option may address problem." + } + +* Display endog option: endogeneity test statistic + if e(estat) != . { +* If stat = 0 then warn, otherwise output + if e(estat) > 0 { +di in gr "-endog- option:" +di in smcl _c "{help `helpfile'##endogtest:Endogeneity test}" +di in gr _c " of endogenous regressors: " +di in ye _col(71) %8.3f e(estat) +di in gr _col(52) "Chi-sq(" in ye e(estatdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(estatp) +di in gr "Regressors tested: " _c + Disp `e(elist)', _col(23) + } + if e(estat) == 0 { +di in gr _n "Collinearity/identification problems in restricted equation:" +di in gr " Endogeneity test statistic not calculated for -endog- option" + } + } + + di in smcl in gr "{hline 78}" +* Display AR overid statistic if LIML and not robust + if "`e(model)'" == "liml" & "`e(vcetype)'" ~= "Robust" & "`e(kernel)'" == "" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (LR test of excluded instruments):" + } +di in ye _col(72) %7.3f e(arubin) + if e(rankxx) < e(rankzz) { +di in gr _col(52) "Chi-sq(" in ye e(arubindf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(arubinp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + di in smcl in gr "{hline 78}" + } + } + +* Display 2nd footer with variable lists + if "`nofooter'"=="" { + +* Warn about dropped instruments if any +* (Re-)calculate number of user-supplied instruments + local iv1_ct : word count `e(insts)' + local iv1_ct = `iv1_ct' + `e(cons)' + + if `iv1_ct' > e(rankzz) { +di in gr "Collinearities detected among instruments: " _c +di in gr `iv1_ct'-e(rankzz) " instrument(s) dropped" + } + + if "`e(collin)'`e(dups)'" != "" | `e(partial_ct)'>0 { +* If collinearities, duplicates or partial, abbreviated varlists saved with a 1 at the end + local one "1" + } + if "`e(instd)'" != "" { + di in gr "Instrumented:" _c + Disp `e(instd`one')', _col(23) + } + if "`e(inexog)'" != "" { + di in gr "Included instruments:" _c + Disp `e(inexog`one')', _col(23) + } + if "`e(exexog)'" != "" { + di in gr "Excluded instruments:" _c + Disp `e(exexog`one')', _col(23) + } + if `e(partial_ct)' > 0 { + if e(partialcons) { + local partial "`e(partial`one')' _cons" + } + else { + local partial "`e(partial`one')'" + } +di in smcl _c "{help `helpfile'##partial:Partialled-out}" + di in gr ":" _c + Disp `partial', _col(23) + if "`e(partialsmall)'"=="" { +di in gr _col(23) "nb: small-sample adjustments do not account for" +di in gr _col(23) " partialled-out variables" + } + else { +di in gr _col(23) "nb: small-sample adjustments account for" +di in gr _col(23) " partialled-out variables" + } + } + if "`e(dups)'" != "" { + di in gr "Duplicates:" _c + Disp `e(dups)', _col(23) + } + if "`e(collin)'" != "" { + di in gr "Dropped collinear:" _c + Disp `e(collin)', _col(23) + } + if "`e(ecollin)'" != "" { + di in gr "Reclassified as exog:" _c + Disp `e(ecollin)', _col(23) + } + di in smcl in gr "{hline 78}" + } +end + +************************************************************************************** + +program define DispRF + version 8.2 + local eqname "`e(rfeq)'" + local depvar "`e(depvar)'" + local strlen : length local depvar + local strlen = `strlen'+25 +di +di in gr "Reduced-form regression: `e(depvar)'" +di in smcl in gr "{hline `strlen'}" + capture estimates replay `eqname' + if "`eqname'"=="" | _rc != 0 { +di in ye "Unable to display reduced-form regression of `e(depvar)'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + else { + estimates replay `eqname', noheader +di + } +end + +program define DispFirst + version 8.2 + args helpfile + tempname firstmat ivest sheapr2 pr2 F df df_r pvalue APF APFdf1 APFdf2 APFp APr2 + + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display first-stage estimates; macro e(first) is missing" + exit + } +di in gr _newline "First-stage regressions" +di in smcl in gr "{hline 23}" +di + local endo1 : colnames(`firstmat') + local nrvars : word count `endo1' + local firsteqs "`e(firsteqs)'" + local nreqs : word count `firsteqs' + if `nreqs' < `nrvars' { +di in ye "Unable to display all first-stage regressions." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + foreach eqname of local firsteqs { + _estimates hold `ivest' + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to list stored estimation `eqname'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + else { + local vn "`e(depvar)'" +di in gr "First-stage regression of `vn':" + estimates replay `eqname', noheader + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `APF' =`firstmat'["APF","`vn'"] + mat `APFdf1' =`firstmat'["APFdf1","`vn'"] + mat `APFdf2' =`firstmat'["APFdf2","`vn'"] + mat `APFp' =`firstmat'["APFp","`vn'"] + mat `APr2' =`firstmat'["APr2","`vn'"] + +di in gr "F test of excluded instruments:" +di in gr " F(" %3.0f `df'[1,1] "," %6.0f `df_r'[1,1] ") = " in ye %8.2f `F'[1,1] +di in gr " Prob > F = " in ye %8.4f `pvalue'[1,1] + +di in smcl "{help `helpfile'##apstats:Angrist-Pischke multivariate F test of excluded instruments:}" +di in gr " F(" %3.0f `APFdf1'[1,1] "," %6.0f `APFdf2'[1,1] ") = " in ye %8.2f `APF'[1,1] +di in gr " Prob > F = " in ye %8.4f `APFp'[1,1] + +di + } + _estimates unhold `ivest' + } +end + +program define DispStored + args saverf savefirst helpfile + version 8.2 + if "`saverf'" != "" { + local eqlist "`e(rfeq)'" + } + if "`savefirst'" != "" { + local eqlist "`eqlist' `e(firsteqs)'" + } + local eqlist : list retokenize eqlist +di in gr _newline "Stored estimation results" +di in smcl in gr "{hline 25}" _c + capture estimates dir `eqlist' + if "`eqlist'" != "" & _rc == 0 { +* Estimates exist and can be listed + estimates dir `eqlist' + } + else if "`eqlist'" != "" & _rc != 0 { +di +di in ye "Unable to list stored estimations." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } +end + +program define DispFFirst + version 8.2 + args helpfile + tempname firstmat + tempname sheapr2 pr2 F df df_r pvalue APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display summary of first-stage estimates; macro e(first) is missing" + exit + } + local endo : colnames(`firstmat') + local nrvars : word count `endo' + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + local efirsteqs "`e(firsteqs)'" + + mat `df' =`firstmat'["df",1] + mat `df_r' =`firstmat'["df_r",1] + mat `APFdf1' =`firstmat'["APFdf1",1] + mat `APFdf2' =`firstmat'["APFdf2",1] + +di +di in gr _newline "Summary results for first-stage regressions" +di in smcl in gr "{hline 43}" +di + +di _c in smcl _col(44) "{help `helpfile'##apstats:(Underid)}" +di in smcl _col(65) "{help `helpfile'##apstats:(Weak id)}" + +di _c in gr "Variable |" +di _c in smcl _col(16) "{help `helpfile'##apstats:F}" in gr "(" +di _c in ye _col(17) %3.0f `df'[1,1] in gr "," in ye %6.0f `df_r'[1,1] in gr ") P-val" +di _c in gr _col(37) "|" +di _c in smcl _col(39) "{help `helpfile'##apstats:AP Chi-sq}" in gr "(" +di _c in ye %3.0f `APFdf1'[1,1] in gr ") P-val" +di _c in gr _col(60) "|" +di _c in smcl _col(62) "{help `helpfile'##apstats:AP F}" in gr "(" +di in ye _col(67) %3.0f `APFdf1'[1,1] in gr "," in ye %6.0f `APFdf2'[1,1] in gr ")" + + local i = 1 + foreach vn of local endo { + + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `APF' =`firstmat'["APF","`vn'"] + mat `APFdf1' =`firstmat'["APFdf1","`vn'"] + mat `APFdf2' =`firstmat'["APFdf2","`vn'"] + mat `APFp' =`firstmat'["APFp","`vn'"] + mat `APchi2' =`firstmat'["APchi2","`vn'"] + mat `APchi2p' =`firstmat'["APchi2p","`vn'"] + mat `APr2' =`firstmat'["APr2","`vn'"] + + local vnlen : length local vn + if `vnlen' > 12 { + local vn : piece 1 12 of "`vn'" + } +di _c in y %-12s "`vn'" _col(14) in gr "|" _col(18) in y %8.2f `F'[1,1] +di _c _col(28) in y %8.4f `pvalue'[1,1] +di _c _col(37) in g "|" _col(42) in y %8.2f `APchi2'[1,1] _col(51) in y %8.4f `APchi2p'[1,1] +di _col(60) in g "|" _col(65) in y %8.2f `APF'[1,1] + local i = `i' + 1 + } +di + + if "`robust'`cluster'" != "" { + if "`cluster'" != "" { + local rtype "cluster-robust" + } + else if "`kernel'" != "" { + local rtype "heteroskedasticity and autocorrelation-robust" + } + else { + local rtype "heteroskedasticity-robust" + } + } + else if "`kernel'" != "" { + local rtype "autocorrelation-robust" + } + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: first-stage test statistics `rtype'" +di + } + + local k2 = `APFdf1'[1,1] +di in gr "Stock-Yogo weak ID test critical values for single endogenous regressor:" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmmw" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(37) "5% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "10% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "20% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`k2') nendog(1) + if "`r(cv)'"~="." { + di in gr _col(36) "30% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." +di + } + else { +di + } + +* Check that AP chi-sq and F denominator are correct and = underid test dof + if e(iddf)~=`APFdf1'[1,1] { +di in red "Warning: Error in calculating first-stage id statistics above;" +di in red " dof of AP statistics is " `APFdf1'[1,1] ", should be L-(K-1)=`e(iddf)'." + } + + tempname iddf idstat idp widstat cdf rkf + scalar `iddf'=e(iddf) + scalar `idstat'=e(idstat) + scalar `idp'=e(idp) + scalar `widstat'=e(widstat) + scalar `cdf'=e(cdf) + capture scalar `rkf'=e(rkf) +di in smcl "{help `helpfile'##idtest:Underidentification test}" +di in gr "Ho: matrix of reduced form coefficients has rank=K1-1 (underidentified)" +di in gr "Ha: matrix has rank=K1 (identified)" + if "`robust'`kernel'"=="" { +di in ye "Anderson canon. corr. LM statistic" _c + } + else { +di in ye "Kleibergen-Paap rk LM statistic" _c + } +di in gr _col(42) "Chi-sq(" in ye `iddf' in gr ")=" %-7.2f in ye `idstat' /* + */ _col(61) in gr "P-val=" %6.4f in ye `idp' + +di +di in smcl "{help `helpfile'##widtest:Weak identification test}" +di in gr "Ho: equation is weakly identified" +di in ye "Cragg-Donald Wald F statistic" _col(65) %8.2f `cdf' + if "`robust'`kernel'"~="" { +di in ye "Kleibergen-Paap Wald rk F statistic" _col(65) %8.2f `rkf' + } +di + +di in gr "Stock-Yogo weak ID test critical values for K1=`e(endog_ct)' and L1=`e(exexog_ct)':" + local cdmissing=1 + if "`e(model)'"=="iv" | "`e(model)'"=="gmm2s" | "`e(model)'"=="gmmw" { + cdsy, type(ivbias5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(37) "5% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal IV relative bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal IV size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)==.) | "`e(model)'"=="cue" { + cdsy, type(limlsize10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "15% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "25% maximal LIML size" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`e(model)'"=="liml" & e(fuller)<.) { + cdsy, type(fullrel5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "30% maximal Fuller rel. bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(43) "5% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "10% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "20% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`e(exexog_ct)') nendog(`e(endog_ct)') + if "`r(cv)'"~="." { + di in gr _col(36) "30% Fuller maximum bias" in ye _col(67) %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + if `cdmissing' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + di + } + + tempname arf arfp archi2 archi2p ardf ardf_r + tempname sstat sstatp sstatdf +di in smcl "{help `helpfile'##wirobust:Weak-instrument-robust inference}" +di in gr "Tests of joint significance of endogenous regressors B1 in main equation" +di in gr "Ho: B1=0 and orthogonality conditions are valid" +* Needs to be small so that adjusted dof is reflected in F stat + scalar `arf'=e(arf) + scalar `arfp'=e(arfp) + scalar `archi2'=e(archi2) + scalar `archi2p'=e(archi2p) + scalar `ardf'=e(ardf) + scalar `ardf_r'=e(ardf_r) + scalar `sstat'=e(sstat) + scalar `sstatp'=e(sstatp) + scalar `sstatdf'=e(sstatdf) +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "F(" in ye `ardf' in gr "," in ye `ardf_r' in gr ")=" /* + */ _col(49) in ye %7.2f `arf' _col(61) in gr "P-val=" in ye %6.4f `arfp' +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "Chi-sq(" in ye `ardf' in gr ")=" /* + */ _col(49) in ye %7.2f `archi2' _col(61) in gr "P-val=" in ye %6.4f `archi2p' +di in ye _c "Stock-Wright LM S statistic" +di in gr _col(36) "Chi-sq(" in ye `sstatdf' in gr ")=" /* + */ _col(49) in ye %7.2f `sstat' _col(61) in gr "P-val=" in ye %6.4f `sstatp' +di + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: Underidentification, weak identification and weak-identification-robust" +di in gr " test statistics `rtype'" +di + } + + if "`cluster'" != "" { +di in gr "Number of clusters N_clust = " in ye %10.0f e(N_clust) + } +di in gr "Number of observations N = " in ye %10.0f e(N) +di in gr "Number of regressors K = " in ye %10.0f e(rankxx) +di in gr "Number of endogenous regressors K1 = " in ye %10.0f e(endog_ct) +di in gr "Number of instruments L = " in ye %10.0f e(rankzz) +di in gr "Number of excluded instruments L1 = " in ye %10.0f e(ardf) + if "`e(partial)'" != "" { +di in gr "Number of partialled-out regressors/IVs = " in ye %10.0f e(partial_ct) +di in gr "NB: K & L do not included partialled-out variables" + } + +end + +* Performs first-stage regressions + +program define doFirst, rclass + version 8.2 + args endog /* variable list (including depvar) + */ inexog /* list of included exogenous + */ exexog /* list of excluded exogenous + */ XXinv /* + */ XPZXinv /* + */ touse /* touse sample + */ weight /* full weight expression w/ [] + */ nocons /* + */ robust /* + */ clopt /* + */ bwopt /* + */ kernopt /* + */ savefprefix /* + */ dofmopt /* + */ sdofmopt /* + */ ivreg2_cmd + +* Create fitted values of endogenous regressors + local endog_hat "" + tsrevar `endog' + local ts_endog "`r(varlist)'" + foreach x of local ts_endog { + qui regress `x' `inexog' `exexog' `weight' if `touse', `nocons' + tempname `x'_hat + qui predict double ``x'_hat', xb + local endog_hat "`endog_hat' ``x'_hat'" + } +* inexog and exexog are used with partial() option so must tsrevar them + tsrevar `inexog' + local ts_inexog "`r(varlist)'" + tsrevar `exexog' + local ts_exexog "`r(varlist)'" + + tempname statmat statmat1 + local i 1 + foreach x of local endog { + capture `ivreg2_cmd' `x' `inexog' `exexog' `weight' /* + */ if `touse', `nocons' `robust' `clopt' `bwopt' `kernopt' /* + */ `dofmopt' `sdofmopt' small nocollin + if _rc ~= 0 { +* First-stage regression failed +di in ye "Unable to estimate first-stage regression of `x'" + if _rc == 506 { +di in ye " var-cov matrix of first-stage regression of `x' not positive-definite" + } + } + else { +* First-stage regression successful +* Check if there is enough room to save results; leave one free. Allow for overwriting. +* Max is 20-1=19 for Stata 9.0 and earlier, 300-1=299 for Stata 9.1+ + local maxest=299 + local vn "`x'" + local plen : length local savefprefix + local vlen : length local vn + if `plen'+`vlen' > 27 { + local vlen=27-`plen' + local vn : permname `vn', length(`vlen') +* Must create a variable so that permname doesn't reuse it + gen `vn'=0 + local dropvn "`dropvn' `vn'" + } + local eqname "`savefprefix'`vn'" + local eqname : subinstr local eqname "." "_" + qui estimates dir + local est_list "`r(names)'" + Subtract est_list : "`est_list'" "`eqname'" + local est_ct : word count `est_list' + if `est_ct' < `maxest' { + capture est store `eqname', title("First-stage regression: `x'") + if _rc == 0 { + local firsteqs "`firsteqs' `eqname'" + } + } + else { +di +di in ye "Unable to store first-stage regression of `x'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + tempname rssall rssinc pr2 F p + scalar `rssall'=e(rss) + quietly test `exexog' + scalar `F'=r(F) + scalar `p'=r(p) + local df=r(df) + local df_r=r(df_r) +* 1st stage regression without excluded exogenous +* Use regress since need only RSS and handles all cases, including perverse ones (e.g. no regressors) + qui regress `x' `inexog' `weight' if `touse', `nocons' + scalar `rssinc'=e(rss) +* NB: uncentered R2 for main regression is 1-rssall/yy; for restricted is 1-rssinc/yy; +* squared semipartial correlation=(rssinc-rssall)/yy=diff of 2 R2s +* Squared partial correlation (="partialled-out R2") + scalar `pr2'=(`rssinc'-`rssall')/`rssinc' + +* A-P multivariate F and corresponding r-sq + tempname APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + local x_hat "`endog_hat'" + tokenize `endog_hat' + Subtract x_hat : "`x_hat'" "``i''" + tokenize `ts_endog' + local ts_x "``i''" + qui `ivreg2_cmd' `ts_x' `x_hat' `ts_inexog' `ts_exexog' `weight' /* + */ if `touse', `nocons' `robust' `clopt' `bwopt' `kernopt' /* + */ `dofmopt' `sdofmopt' small nocollin partial(`x_hat' `ts_inexog') + scalar `APF' = e(F) + scalar `APFdf1' = e(Fdf1) + scalar `APFdf2' = e(Fdf2) + scalar `APFp' = e(Fp) + if "`clopt'"=="" { + scalar `APchi2' = e(F)*e(Fdf1)/e(Fdf2)*(e(N)-e(dofminus)) + } + else { + scalar `APchi2' = e(F)*e(Fdf1)/e(Fdf2)*e(N_clust)/(e(N)-e(rankxx)-e(sdofminus))*(e(N)-1) + } + scalar `APchi2p'= chi2tail(e(Fdf1),`APchi2') + scalar `APr2' = e(r2) + +* End of first-stage successful block + } + +* Godfrey method of Shea partial R2 uses IV and OLS estimates without robust vcvs: +* Partial R2 = OLS V[d,d] / IV V[d,d] * IV s2 / OLS s2 +* where d,d is the diagonal element corresponding to the endog regressor +* ... but this simplifies to matrices that have already been calculated: +* = XXinv[d,d] / XPZXinv[d,d] + tempname sols siv + tempname sheapr2 + mat `sols'=`XXinv'["`x'","`x'"] + mat `siv'=`XPZXinv'["`x'","`x'"] + scalar `sheapr2' = `sols'[1,1]/`siv'[1,1] + + capture { + mat `statmat1' = (`sheapr2' \ `pr2' \ `F' \ `df' \ `df_r' \ `p' /* + */ \ `APF' \ `APFdf1' \ `APFdf2' \ `APFp' \ `APchi2' \ `APchi2p' \ `APr2') + mat colname `statmat1' = `x' + mat `statmat' = nullmat(`statmat') , `statmat1' + } + local i = `i' + 1 + } +* Drop any temporarily-created permname variables + if trim("`dropvn'")~="" { + foreach vn of varlist `dropvn' { + capture drop `vn' + } + } + capture mat rowname `statmat' = sheapr2 pr2 F df df_r pvalue APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + if _rc==0 { + return matrix firstmat `statmat' + } + return local firsteqs "`firsteqs'" +end + +program define doRF, rclass + version 8.2 + args lhs /* + */ inexog /* list of included exogenous + */ exexog /* list of excluded exogenous + */ touse /* touse sample + */ weight /* full weight expression w/ [] + */ nocons /* + */ robust /* + */ clopt /* + */ bwopt /* + */ kernopt /* + */ saverfprefix /* + */ dofminus /* + */ sdofminus /* + */ ivreg2_cmd + +* Anderson-Rubin test of signif of endog regressors (Bo=0) +* In case ivreg2 called with adjusted dof, first stage should adjust dof as well + tempname arf arfp archi2 archi2p ardf ardf_r tempest + capture _estimates hold `tempest' + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } +* Needs to be small so that adjusted dof is reflected in F stat +* capture to prevent not-full-rank error warning + capture `ivreg2_cmd' `lhs' `inexog' `exexog' `weight' if `touse', /* + */ small `nocons' dofminus(`dofminus') sdofminus(`sdofminus') /* + */ `robust' `clopt' `bwopt' `kernopt' nocollin + if _rc != 0 { +di as err "Error: reduced form estimation failed" + exit 498 + } + qui test `exexog' + scalar `arf'=r(F) + scalar `arfp'=r(p) + scalar `ardf'=r(df) + scalar `ardf_r'=r(df_r) + if "`clopt'"=="" { + scalar `archi2'=`arf'*`ardf'*(e(N)-`dofminus')/(e(N)-e(rankxx)-`dofminus'-`sdofminus') + } + else { + scalar `archi2'=`arf'*`ardf'*e(N_clust)/r(df_r)*(e(N)-1)/(e(N)-e(rankxx)-`sdofminus') + } + scalar `archi2p'=chiprob(`ardf',`archi2') + +* Check if there is enough room to save results; leave one free. Allow for overwriting. +* Max is 20-1=19 for Stata 9.0 and earlier, 300-1=299 for Stata 9.1+ + local maxest=299 + local vn "`lhs'" + local plen : length local saverfprefix + local vlen : length local lhs + if `plen'+`vlen' > 27 { + local vlen=27-`plen' + local vn : permname `vn', length(`vlen') + } + local eqname "`saverfprefix'`vn'" + local eqname : subinstr local eqname "." "_" + qui estimates dir + local est_list "`r(names)'" + Subtract est_list : "`est_list'" "`eqname'" + local est_ct : word count `est_list' + if `est_ct' < `maxest' { + capture est store `eqname', title("Reduced-form regression: `lhs'") + return local rfeq "`eqname'" + } + else { +di +di in ye "Unable to store reduced-form regression of `lhs'." +di in ye "There may be insufficient room to store results using -estimates store-." +di in ye "Try dropping one or more estimation results using -estimates drop-." +di + } + _estimates unhold `tempest' + return scalar arf=`arf' + return scalar arfp=`arfp' + return scalar ardf=`ardf' + return scalar ardf_r=`ardf_r' + return scalar archi2=`archi2' + return scalar archi2p=`archi2p' +end + +************************************************************************************** +program define IsStop, sclass + /* sic, must do tests one-at-a-time, + * 0, may be very large */ + version 8.2 + if `"`0'"' == "[" { + sret local stop 1 + exit + } + if `"`0'"' == "," { + sret local stop 1 + exit + } + if `"`0'"' == "if" { + sret local stop 1 + exit + } +* per official ivreg 5.1.3 + if substr(`"`0'"',1,3) == "if(" { + sret local stop 1 + exit + } + if `"`0'"' == "in" { + sret local stop 1 + exit + } + if `"`0'"' == "" { + sret local stop 1 + exit + } + else sret local stop 0 +end + +program define Disp + version 8.2 + syntax [anything] [, _col(integer 15) ] + local len = 80-`_col'+1 + local piece : piece 1 `len' of `"`anything'"' + local i 1 + while "`piece'" != "" { + di in gr _col(`_col') "`first'`piece'" + local i = `i' + 1 + local piece : piece `i' `len' of `"`anything'"' + } + if `i'==1 { + di + } +end + + + +* Remove all tokens in dirt from full +* Returns "cleaned" full list in cleaned + +program define Subtract /* : */ + version 8.2 + args cleaned /* macro name to hold cleaned list + */ colon /* ":" + */ full /* list to be cleaned + */ dirt /* tokens to be cleaned from full */ + + tokenize `dirt' + local i 1 + while "``i''" != "" { + local full : subinstr local full "``i''" "", word all + local i = `i' + 1 + } + + tokenize `full' /* cleans up extra spaces */ + c_local `cleaned' `*' +end + +program define vecsort /* Also clears col/row names */ + version 8.2 + args vmat + tempname hold + mat `vmat'=`vmat'+J(rowsof(`vmat'),colsof(`vmat'),0) + local lastcol = colsof(`vmat') + local i 1 + while `i' < `lastcol' { + if `vmat'[1,`i'] > `vmat'[1,`i'+1] { + scalar `hold' = `vmat'[1,`i'] + mat `vmat'[1,`i'] = `vmat'[1,`i'+1] + mat `vmat'[1,`i'+1] = `hold' + local i = 1 + } + else { + local i = `i' + 1 + } + } +end + +program define matsort + version 8.2 + args vmat names + tempname hold + foreach vn in `names' { + mat `hold'=nullmat(`hold'), `vmat'[1...,"`vn'"] + } + mat `vmat'=`hold' + mat drop `hold' + foreach vn in `names' { + mat `hold'=nullmat(`hold') \ `vmat'["`vn'",1...] + } + mat `vmat'=`hold' +end + +program define cdsy, rclass + version 8.2 + syntax , type(string) k2(integer) nendog(integer) + +* type() can be ivbias5 (k2<=100, nendog<=3) +* ivbias10 (ditto) +* ivbias20 (ditto) +* ivbias30 (ditto) +* ivsize10 (k2<=100, nendog<=2) +* ivsize15 (ditto) +* ivsize20 (ditto) +* ivsize25 (ditto) +* fullrel5 (ditto) +* fullrel10 (ditto) +* fullrel20 (ditto) +* fullrel30 (ditto) +* fullmax5 (ditto) +* fullmax10 (ditto) +* fullmax20 (ditto) +* fullmax30 (ditto) +* limlsize10 (ditto) +* limlsize15 (ditto) +* limlsize20 (ditto) +* limlsize25 (ditto) + + tempname temp cv + +* Initialize critical value as MV + scalar `cv'=. + + if "`type'"=="ivbias5" { + matrix input `temp' = ( /* + */ . , . , . \ /* + */ . , . , . \ /* + */ 13.91 , . , . \ /* + */ 16.85 , 11.04 , . \ /* + */ 18.37 , 13.97 , 9.53 \ /* + */ 19.28 , 15.72 , 12.20 \ /* + */ 19.86 , 16.88 , 13.95 \ /* + */ 20.25 , 17.70 , 15.18 \ /* + */ 20.53 , 18.30 , 16.10 \ /* + */ 20.74 , 18.76 , 16.80 \ /* + */ 20.90 , 19.12 , 17.35 \ /* + */ 21.01 , 19.40 , 17.80 \ /* + */ 21.10 , 19.64 , 18.17 \ /* + */ 21.18 , 19.83 , 18.47 \ /* + */ 21.23 , 19.98 , 18.73 \ /* + */ 21.28 , 20.12 , 18.94 \ /* + */ 21.31 , 20.23 , 19.13 \ /* + */ 21.34 , 20.33 , 19.29 \ /* + */ 21.36 , 20.41 , 19.44 \ /* + */ 21.38 , 20.48 , 19.56 \ /* + */ 21.39 , 20.54 , 19.67 \ /* + */ 21.40 , 20.60 , 19.77 \ /* + */ 21.41 , 20.65 , 19.86 \ /* + */ 21.41 , 20.69 , 19.94 \ /* + */ 21.42 , 20.73 , 20.01 \ /* + */ 21.42 , 20.76 , 20.07 \ /* + */ 21.42 , 20.79 , 20.13 \ /* + */ 21.42 , 20.82 , 20.18 \ /* + */ 21.42 , 20.84 , 20.23 \ /* + */ 21.42 , 20.86 , 20.27 \ /* + */ 21.41 , 20.88 , 20.31 \ /* + */ 21.41 , 20.90 , 20.35 \ /* + */ 21.41 , 20.91 , 20.38 \ /* + */ 21.40 , 20.93 , 20.41 \ /* + */ 21.40 , 20.94 , 20.44 \ /* + */ 21.39 , 20.95 , 20.47 \ /* + */ 21.39 , 20.96 , 20.49 \ /* + */ 21.38 , 20.97 , 20.51 \ /* + */ 21.38 , 20.98 , 20.54 \ /* + */ 21.37 , 20.99 , 20.56 \ /* + */ 21.37 , 20.99 , 20.57 \ /* + */ 21.36 , 21.00 , 20.59 \ /* + */ 21.35 , 21.00 , 20.61 \ /* + */ 21.35 , 21.01 , 20.62 \ /* + */ 21.34 , 21.01 , 20.64 \ /* + */ 21.34 , 21.02 , 20.65 \ /* + */ 21.33 , 21.02 , 20.66 \ /* + */ 21.32 , 21.02 , 20.67 \ /* + */ 21.32 , 21.03 , 20.68 \ /* + */ 21.31 , 21.03 , 20.69 \ /* + */ 21.31 , 21.03 , 20.70 \ /* + */ 21.30 , 21.03 , 20.71 \ /* + */ 21.30 , 21.03 , 20.72 \ /* + */ 21.29 , 21.03 , 20.73 \ /* + */ 21.28 , 21.03 , 20.73 \ /* + */ 21.28 , 21.04 , 20.74 \ /* + */ 21.27 , 21.04 , 20.75 \ /* + */ 21.27 , 21.04 , 20.75 \ /* + */ 21.26 , 21.04 , 20.76 \ /* + */ 21.26 , 21.04 , 20.76 \ /* + */ 21.25 , 21.04 , 20.77 \ /* + */ 21.24 , 21.04 , 20.77 \ /* + */ 21.24 , 21.04 , 20.78 \ /* + */ 21.23 , 21.04 , 20.78 \ /* + */ 21.23 , 21.03 , 20.79 \ /* + */ 21.22 , 21.03 , 20.79 \ /* + */ 21.22 , 21.03 , 20.79 \ /* + */ 21.21 , 21.03 , 20.80 \ /* + */ 21.21 , 21.03 , 20.80 \ /* + */ 21.20 , 21.03 , 20.80 \ /* + */ 21.20 , 21.03 , 20.80 \ /* + */ 21.19 , 21.03 , 20.81 \ /* + */ 21.19 , 21.03 , 20.81 \ /* + */ 21.18 , 21.03 , 20.81 \ /* + */ 21.18 , 21.02 , 20.81 \ /* + */ 21.17 , 21.02 , 20.82 \ /* + */ 21.17 , 21.02 , 20.82 \ /* + */ 21.16 , 21.02 , 20.82 \ /* + */ 21.16 , 21.02 , 20.82 \ /* + */ 21.15 , 21.02 , 20.82 \ /* + */ 21.15 , 21.02 , 20.82 \ /* + */ 21.15 , 21.02 , 20.83 \ /* + */ 21.14 , 21.01 , 20.83 \ /* + */ 21.14 , 21.01 , 20.83 \ /* + */ 21.13 , 21.01 , 20.83 \ /* + */ 21.13 , 21.01 , 20.83 \ /* + */ 21.12 , 21.01 , 20.84 \ /* + */ 21.12 , 21.01 , 20.84 \ /* + */ 21.11 , 21.01 , 20.84 \ /* + */ 21.11 , 21.01 , 20.84 \ /* + */ 21.10 , 21.00 , 20.84 \ /* + */ 21.10 , 21.00 , 20.84 \ /* + */ 21.09 , 21.00 , 20.85 \ /* + */ 21.09 , 21.00 , 20.85 \ /* + */ 21.08 , 21.00 , 20.85 \ /* + */ 21.08 , 21.00 , 20.85 \ /* + */ 21.07 , 21.00 , 20.85 \ /* + */ 21.07 , 20.99 , 20.86 \ /* + */ 21.06 , 20.99 , 20.86 \ /* + */ 21.06 , 20.99 , 20.86 ) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + + + if "`type'"=="ivbias10" { + matrix input `temp' = /* + */ (.,.,. \ /* + */ .,.,. \ /* + */ 9.08,.,. \ /* + */ 10.27,7.56,. \ /* + */ 10.83,8.78,6.61 \ /* + */ 11.12,9.48,7.77 \ /* + */ 11.29,9.92,8.5 \ /* + */ 11.39,10.22,9.01 \ /* + */ 11.46,10.43,9.37 \ /* + */ 11.49,10.58,9.64 \ /* + */ 11.51,10.69,9.85 \ /* + */ 11.52,10.78,10.01 \ /* + */ 11.52,10.84,10.14 \ /* + */ 11.52,10.89,10.25 \ /* + */ 11.51,10.93,10.33 \ /* + */ 11.5,10.96,10.41 \ /* + */ 11.49,10.99,10.47 \ /* + */ 11.48,11,10.52 \ /* + */ 11.46,11.02,10.56 \ /* + */ 11.45,11.03,10.6 \ /* + */ 11.44,11.04,10.63 \ /* + */ 11.42,11.05,10.65 \ /* + */ 11.41,11.05,10.68 \ /* + */ 11.4,11.05,10.7 \ /* + */ 11.38,11.06,10.71 \ /* + */ 11.37,11.06,10.73 \ /* + */ 11.36,11.06,10.74 \ /* + */ 11.34,11.05,10.75 \ /* + */ 11.33,11.05,10.76 \ /* + */ 11.32,11.05,10.77 \ /* + */ 11.3,11.05,10.78 \ /* + */ 11.29,11.05,10.79 \ /* + */ 11.28,11.04,10.79 \ /* + */ 11.27,11.04,10.8 \ /* + */ 11.26,11.04,10.8 \ /* + */ 11.25,11.03,10.8 \ /* + */ 11.24,11.03,10.81 \ /* + */ 11.23,11.02,10.81 \ /* + */ 11.22,11.02,10.81 \ /* + */ 11.21,11.02,10.81 \ /* + */ 11.2,11.01,10.81 \ /* + */ 11.19,11.01,10.81 \ /* + */ 11.18,11,10.81 \ /* + */ 11.17,11,10.81 \ /* + */ 11.16,10.99,10.81 \ /* + */ 11.15,10.99,10.81 \ /* + */ 11.14,10.98,10.81 \ /* + */ 11.13,10.98,10.81 \ /* + */ 11.13,10.98,10.81 \ /* + */ 11.12,10.97,10.81 \ /* + */ 11.11,10.97,10.81 \ /* + */ 11.1,10.96,10.81 \ /* + */ 11.1,10.96,10.81 \ /* + */ 11.09,10.95,10.81 \ /* + */ 11.08,10.95,10.81 \ /* + */ 11.07,10.94,10.8 \ /* + */ 11.07,10.94,10.8 \ /* + */ 11.06,10.94,10.8 \ /* + */ 11.05,10.93,10.8 \ /* + */ 11.05,10.93,10.8 \ /* + */ 11.04,10.92,10.8 \ /* + */ 11.03,10.92,10.79 \ /* + */ 11.03,10.92,10.79 \ /* + */ 11.02,10.91,10.79 \ /* + */ 11.02,10.91,10.79 \ /* + */ 11.01,10.9,10.79 \ /* + */ 11,10.9,10.79 \ /* + */ 11,10.9,10.78 \ /* + */ 10.99,10.89,10.78 \ /* + */ 10.99,10.89,10.78 \ /* + */ 10.98,10.89,10.78 \ /* + */ 10.98,10.88,10.78 \ /* + */ 10.97,10.88,10.77 \ /* + */ 10.97,10.88,10.77 \ /* + */ 10.96,10.87,10.77 \ /* + */ 10.96,10.87,10.77 \ /* + */ 10.95,10.86,10.77 \ /* + */ 10.95,10.86,10.76 \ /* + */ 10.94,10.86,10.76 \ /* + */ 10.94,10.85,10.76 \ /* + */ 10.93,10.85,10.76 \ /* + */ 10.93,10.85,10.76 \ /* + */ 10.92,10.84,10.75 \ /* + */ 10.92,10.84,10.75 \ /* + */ 10.91,10.84,10.75 \ /* + */ 10.91,10.84,10.75 \ /* + */ 10.91,10.83,10.75 \ /* + */ 10.9,10.83,10.74 \ /* + */ 10.9,10.83,10.74 \ /* + */ 10.89,10.82,10.74 \ /* + */ 10.89,10.82,10.74 \ /* + */ 10.89,10.82,10.74 \ /* + */ 10.88,10.81,10.74 \ /* + */ 10.88,10.81,10.73 \ /* + */ 10.87,10.81,10.73 \ /* + */ 10.87,10.81,10.73 \ /* + */ 10.87,10.8,10.73 \ /* + */ 10.86,10.8,10.73 \ /* + */ 10.86,10.8,10.72 \ /* + */ 10.86,10.8,10.72) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivbias20" { + matrix input `temp' = ( /* + */ . , . , . \ /* + */ . , . , . \ /* + */ 6.46 , . , . \ /* + */ 6.71 , 5.57 , . \ /* + */ 6.77 , 5.91 , 4.99 \ /* + */ 6.76 , 6.08 , 5.35 \ /* + */ 6.73 , 6.16 , 5.56 \ /* + */ 6.69 , 6.20 , 5.69 \ /* + */ 6.65 , 6.22 , 5.78 \ /* + */ 6.61 , 6.23 , 5.83 \ /* + */ 6.56 , 6.23 , 5.87 \ /* + */ 6.53 , 6.22 , 5.90 \ /* + */ 6.49 , 6.21 , 5.92 \ /* + */ 6.45 , 6.20 , 5.93 \ /* + */ 6.42 , 6.19 , 5.94 \ /* + */ 6.39 , 6.17 , 5.94 \ /* + */ 6.36 , 6.16 , 5.94 \ /* + */ 6.33 , 6.14 , 5.94 \ /* + */ 6.31 , 6.13 , 5.94 \ /* + */ 6.28 , 6.11 , 5.93 \ /* + */ 6.26 , 6.10 , 5.93 \ /* + */ 6.24 , 6.08 , 5.92 \ /* + */ 6.22 , 6.07 , 5.92 \ /* + */ 6.20 , 6.06 , 5.91 \ /* + */ 6.18 , 6.05 , 5.90 \ /* + */ 6.16 , 6.03 , 5.90 \ /* + */ 6.14 , 6.02 , 5.89 \ /* + */ 6.13 , 6.01 , 5.88 \ /* + */ 6.11 , 6.00 , 5.88 \ /* + */ 6.09 , 5.99 , 5.87 \ /* + */ 6.08 , 5.98 , 5.87 \ /* + */ 6.07 , 5.97 , 5.86 \ /* + */ 6.05 , 5.96 , 5.85 \ /* + */ 6.04 , 5.95 , 5.85 \ /* + */ 6.03 , 5.94 , 5.84 \ /* + */ 6.01 , 5.93 , 5.83 \ /* + */ 6.00 , 5.92 , 5.83 \ /* + */ 5.99 , 5.91 , 5.82 \ /* + */ 5.98 , 5.90 , 5.82 \ /* + */ 5.97 , 5.89 , 5.81 \ /* + */ 5.96 , 5.89 , 5.80 \ /* + */ 5.95 , 5.88 , 5.80 \ /* + */ 5.94 , 5.87 , 5.79 \ /* + */ 5.93 , 5.86 , 5.79 \ /* + */ 5.92 , 5.86 , 5.78 \ /* + */ 5.91 , 5.85 , 5.78 \ /* + */ 5.91 , 5.84 , 5.77 \ /* + */ 5.90 , 5.83 , 5.77 \ /* + */ 5.89 , 5.83 , 5.76 \ /* + */ 5.88 , 5.82 , 5.76 \ /* + */ 5.87 , 5.82 , 5.75 \ /* + */ 5.87 , 5.81 , 5.75 \ /* + */ 5.86 , 5.80 , 5.74 \ /* + */ 5.85 , 5.80 , 5.74 \ /* + */ 5.85 , 5.79 , 5.73 \ /* + */ 5.84 , 5.79 , 5.73 \ /* + */ 5.83 , 5.78 , 5.72 \ /* + */ 5.83 , 5.78 , 5.72 \ /* + */ 5.82 , 5.77 , 5.72 \ /* + */ 5.81 , 5.77 , 5.71 \ /* + */ 5.81 , 5.76 , 5.71 \ /* + */ 5.80 , 5.76 , 5.70 \ /* + */ 5.80 , 5.75 , 5.70 \ /* + */ 5.79 , 5.75 , 5.70 \ /* + */ 5.78 , 5.74 , 5.69 \ /* + */ 5.78 , 5.74 , 5.69 \ /* + */ 5.77 , 5.73 , 5.68 \ /* + */ 5.77 , 5.73 , 5.68 \ /* + */ 5.76 , 5.72 , 5.68 \ /* + */ 5.76 , 5.72 , 5.67 \ /* + */ 5.75 , 5.72 , 5.67 \ /* + */ 5.75 , 5.71 , 5.67 \ /* + */ 5.75 , 5.71 , 5.66 \ /* + */ 5.74 , 5.70 , 5.66 \ /* + */ 5.74 , 5.70 , 5.66 \ /* + */ 5.73 , 5.70 , 5.65 \ /* + */ 5.73 , 5.69 , 5.65 \ /* + */ 5.72 , 5.69 , 5.65 \ /* + */ 5.72 , 5.68 , 5.65 \ /* + */ 5.71 , 5.68 , 5.64 \ /* + */ 5.71 , 5.68 , 5.64 \ /* + */ 5.71 , 5.67 , 5.64 \ /* + */ 5.70 , 5.67 , 5.63 \ /* + */ 5.70 , 5.67 , 5.63 \ /* + */ 5.70 , 5.66 , 5.63 \ /* + */ 5.69 , 5.66 , 5.62 \ /* + */ 5.69 , 5.66 , 5.62 \ /* + */ 5.68 , 5.65 , 5.62 \ /* + */ 5.68 , 5.65 , 5.62 \ /* + */ 5.68 , 5.65 , 5.61 \ /* + */ 5.67 , 5.65 , 5.61 \ /* + */ 5.67 , 5.64 , 5.61 \ /* + */ 5.67 , 5.64 , 5.61 \ /* + */ 5.66 , 5.64 , 5.60 \ /* + */ 5.66 , 5.63 , 5.60 \ /* + */ 5.66 , 5.63 , 5.60 \ /* + */ 5.65 , 5.63 , 5.60 \ /* + */ 5.65 , 5.63 , 5.59 \ /* + */ 5.65 , 5.62 , 5.59 \ /* + */ 5.65 , 5.62 , 5.59 ) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias30" { + matrix input `temp' = ( /* + */ . , . , . \ /* + */ . , . , . \ /* + */ 5.39 , . , . \ /* + */ 5.34 , 4.73 , . \ /* + */ 5.25 , 4.79 , 4.30 \ /* + */ 5.15 , 4.78 , 4.40 \ /* + */ 5.07 , 4.76 , 4.44 \ /* + */ 4.99 , 4.73 , 4.46 \ /* + */ 4.92 , 4.69 , 4.46 \ /* + */ 4.86 , 4.66 , 4.45 \ /* + */ 4.80 , 4.62 , 4.44 \ /* + */ 4.75 , 4.59 , 4.42 \ /* + */ 4.71 , 4.56 , 4.41 \ /* + */ 4.67 , 4.53 , 4.39 \ /* + */ 4.63 , 4.50 , 4.37 \ /* + */ 4.59 , 4.48 , 4.36 \ /* + */ 4.56 , 4.45 , 4.34 \ /* + */ 4.53 , 4.43 , 4.32 \ /* + */ 4.51 , 4.41 , 4.31 \ /* + */ 4.48 , 4.39 , 4.29 \ /* + */ 4.46 , 4.37 , 4.28 \ /* + */ 4.43 , 4.35 , 4.27 \ /* + */ 4.41 , 4.33 , 4.25 \ /* + */ 4.39 , 4.32 , 4.24 \ /* + */ 4.37 , 4.30 , 4.23 \ /* + */ 4.35 , 4.29 , 4.21 \ /* + */ 4.34 , 4.27 , 4.20 \ /* + */ 4.32 , 4.26 , 4.19 \ /* + */ 4.31 , 4.24 , 4.18 \ /* + */ 4.29 , 4.23 , 4.17 \ /* + */ 4.28 , 4.22 , 4.16 \ /* + */ 4.26 , 4.21 , 4.15 \ /* + */ 4.25 , 4.20 , 4.14 \ /* + */ 4.24 , 4.19 , 4.13 \ /* + */ 4.23 , 4.18 , 4.13 \ /* + */ 4.22 , 4.17 , 4.12 \ /* + */ 4.20 , 4.16 , 4.11 \ /* + */ 4.19 , 4.15 , 4.10 \ /* + */ 4.18 , 4.14 , 4.09 \ /* + */ 4.17 , 4.13 , 4.09 \ /* + */ 4.16 , 4.12 , 4.08 \ /* + */ 4.15 , 4.11 , 4.07 \ /* + */ 4.15 , 4.11 , 4.07 \ /* + */ 4.14 , 4.10 , 4.06 \ /* + */ 4.13 , 4.09 , 4.05 \ /* + */ 4.12 , 4.08 , 4.05 \ /* + */ 4.11 , 4.08 , 4.04 \ /* + */ 4.11 , 4.07 , 4.03 \ /* + */ 4.10 , 4.06 , 4.03 \ /* + */ 4.09 , 4.06 , 4.02 \ /* + */ 4.08 , 4.05 , 4.02 \ /* + */ 4.08 , 4.05 , 4.01 \ /* + */ 4.07 , 4.04 , 4.01 \ /* + */ 4.06 , 4.03 , 4.00 \ /* + */ 4.06 , 4.03 , 4.00 \ /* + */ 4.05 , 4.02 , 3.99 \ /* + */ 4.05 , 4.02 , 3.99 \ /* + */ 4.04 , 4.01 , 3.98 \ /* + */ 4.04 , 4.01 , 3.98 \ /* + */ 4.03 , 4.00 , 3.97 \ /* + */ 4.02 , 4.00 , 3.97 \ /* + */ 4.02 , 3.99 , 3.96 \ /* + */ 4.01 , 3.99 , 3.96 \ /* + */ 4.01 , 3.98 , 3.96 \ /* + */ 4.00 , 3.98 , 3.95 \ /* + */ 4.00 , 3.97 , 3.95 \ /* + */ 3.99 , 3.97 , 3.94 \ /* + */ 3.99 , 3.97 , 3.94 \ /* + */ 3.99 , 3.96 , 3.94 \ /* + */ 3.98 , 3.96 , 3.93 \ /* + */ 3.98 , 3.95 , 3.93 \ /* + */ 3.97 , 3.95 , 3.93 \ /* + */ 3.97 , 3.95 , 3.92 \ /* + */ 3.96 , 3.94 , 3.92 \ /* + */ 3.96 , 3.94 , 3.92 \ /* + */ 3.96 , 3.93 , 3.91 \ /* + */ 3.95 , 3.93 , 3.91 \ /* + */ 3.95 , 3.93 , 3.91 \ /* + */ 3.95 , 3.92 , 3.90 \ /* + */ 3.94 , 3.92 , 3.90 \ /* + */ 3.94 , 3.92 , 3.90 \ /* + */ 3.93 , 3.91 , 3.89 \ /* + */ 3.93 , 3.91 , 3.89 \ /* + */ 3.93 , 3.91 , 3.89 \ /* + */ 3.92 , 3.91 , 3.89 \ /* + */ 3.92 , 3.90 , 3.88 \ /* + */ 3.92 , 3.90 , 3.88 \ /* + */ 3.91 , 3.90 , 3.88 \ /* + */ 3.91 , 3.89 , 3.87 \ /* + */ 3.91 , 3.89 , 3.87 \ /* + */ 3.91 , 3.89 , 3.87 \ /* + */ 3.90 , 3.89 , 3.87 \ /* + */ 3.90 , 3.88 , 3.86 \ /* + */ 3.90 , 3.88 , 3.86 \ /* + */ 3.89 , 3.88 , 3.86 \ /* + */ 3.89 , 3.87 , 3.86 \ /* + */ 3.89 , 3.87 , 3.85 \ /* + */ 3.89 , 3.87 , 3.85 \ /* + */ 3.88 , 3.87 , 3.85 \ /* + */ 3.88 , 3.86 , 3.85 ) + + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivsize10" { + matrix input `temp' = /* + */ (16.38,. \ /* + */ 19.93,7.03 \ /* + */ 22.3,13.43 \ /* + */ 24.58,16.87 \ /* + */ 26.87,19.45 \ /* + */ 29.18,21.68 \ /* + */ 31.5,23.72 \ /* + */ 33.84,25.64 \ /* + */ 36.19,27.51 \ /* + */ 38.54,29.32 \ /* + */ 40.9,31.11 \ /* + */ 43.27,32.88 \ /* + */ 45.64,34.62 \ /* + */ 48.01,36.36 \ /* + */ 50.39,38.08 \ /* + */ 52.77,39.8 \ /* + */ 55.15,41.51 \ /* + */ 57.53,43.22 \ /* + */ 59.92,44.92 \ /* + */ 62.3,46.62 \ /* + */ 64.69,48.31 \ /* + */ 67.07,50.01 \ /* + */ 69.46,51.7 \ /* + */ 71.85,53.39 \ /* + */ 74.24,55.07 \ /* + */ 76.62,56.76 \ /* + */ 79.01,58.45 \ /* + */ 81.4,60.13 \ /* + */ 83.79,61.82 \ /* + */ 86.17,63.51 \ /* + */ 88.56,65.19 \ /* + */ 90.95,66.88 \ /* + */ 93.33,68.56 \ /* + */ 95.72,70.25 \ /* + */ 98.11,71.94 \ /* + */ 100.5,73.62 \ /* + */ 102.88,75.31 \ /* + */ 105.27,76.99 \ /* + */ 107.66,78.68 \ /* + */ 110.04,80.37 \ /* + */ 112.43,82.05 \ /* + */ 114.82,83.74 \ /* + */ 117.21,85.42 \ /* + */ 119.59,87.11 \ /* + */ 121.98,88.8 \ /* + */ 124.37,90.48 \ /* + */ 126.75,92.17 \ /* + */ 129.14,93.85 \ /* + */ 131.53,95.54 \ /* + */ 133.92,97.23 \ /* + */ 136.3,98.91 \ /* + */ 138.69,100.6 \ /* + */ 141.08,102.29 \ /* + */ 143.47,103.97 \ /* + */ 145.85,105.66 \ /* + */ 148.24,107.34 \ /* + */ 150.63,109.03 \ /* + */ 153.01,110.72 \ /* + */ 155.4,112.4 \ /* + */ 157.79,114.09 \ /* + */ 160.18,115.77 \ /* + */ 162.56,117.46 \ /* + */ 164.95,119.15 \ /* + */ 167.34,120.83 \ /* + */ 169.72,122.52 \ /* + */ 172.11,124.2 \ /* + */ 174.5,125.89 \ /* + */ 176.89,127.58 \ /* + */ 179.27,129.26 \ /* + */ 181.66,130.95 \ /* + */ 184.05,132.63 \ /* + */ 186.44,134.32 \ /* + */ 188.82,136.01 \ /* + */ 191.21,137.69 \ /* + */ 193.6,139.38 \ /* + */ 195.98,141.07 \ /* + */ 198.37,142.75 \ /* + */ 200.76,144.44 \ /* + */ 203.15,146.12 \ /* + */ 205.53,147.81 \ /* + */ 207.92,149.5 \ /* + */ 210.31,151.18 \ /* + */ 212.69,152.87 \ /* + */ 215.08,154.55 \ /* + */ 217.47,156.24 \ /* + */ 219.86,157.93 \ /* + */ 222.24,159.61 \ /* + */ 224.63,161.3 \ /* + */ 227.02,162.98 \ /* + */ 229.41,164.67 \ /* + */ 231.79,166.36 \ /* + */ 234.18,168.04 \ /* + */ 236.57,169.73 \ /* + */ 238.95,171.41 \ /* + */ 241.34,173.1 \ /* + */ 243.73,174.79 \ /* + */ 246.12,176.47 \ /* + */ 248.5,178.16 \ /* + */ 250.89,179.84 \ /* + */ 253.28,181.53) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize15" { + matrix input `temp' = ( /* + */ 8.96 , . \ /* + */ 11.59 , 4.58 \ /* + */ 12.83 , 8.18 \ /* + */ 13.96 , 9.93 \ /* + */ 15.09 , 11.22 \ /* + */ 16.23 , 12.33 \ /* + */ 17.38 , 13.34 \ /* + */ 18.54 , 14.31 \ /* + */ 19.71 , 15.24 \ /* + */ 20.88 , 16.16 \ /* + */ 22.06 , 17.06 \ /* + */ 23.24 , 17.95 \ /* + */ 24.42 , 18.84 \ /* + */ 25.61 , 19.72 \ /* + */ 26.80 , 20.60 \ /* + */ 27.99 , 21.48 \ /* + */ 29.19 , 22.35 \ /* + */ 30.38 , 23.22 \ /* + */ 31.58 , 24.09 \ /* + */ 32.77 , 24.96 \ /* + */ 33.97 , 25.82 \ /* + */ 35.17 , 26.69 \ /* + */ 36.37 , 27.56 \ /* + */ 37.57 , 28.42 \ /* + */ 38.77 , 29.29 \ /* + */ 39.97 , 30.15 \ /* + */ 41.17 , 31.02 \ /* + */ 42.37 , 31.88 \ /* + */ 43.57 , 32.74 \ /* + */ 44.78 , 33.61 \ /* + */ 45.98 , 34.47 \ /* + */ 47.18 , 35.33 \ /* + */ 48.38 , 36.19 \ /* + */ 49.59 , 37.06 \ /* + */ 50.79 , 37.92 \ /* + */ 51.99 , 38.78 \ /* + */ 53.19 , 39.64 \ /* + */ 54.40 , 40.50 \ /* + */ 55.60 , 41.37 \ /* + */ 56.80 , 42.23 \ /* + */ 58.01 , 43.09 \ /* + */ 59.21 , 43.95 \ /* + */ 60.41 , 44.81 \ /* + */ 61.61 , 45.68 \ /* + */ 62.82 , 46.54 \ /* + */ 64.02 , 47.40 \ /* + */ 65.22 , 48.26 \ /* + */ 66.42 , 49.12 \ /* + */ 67.63 , 49.99 \ /* + */ 68.83 , 50.85 \ /* + */ 70.03 , 51.71 \ /* + */ 71.24 , 52.57 \ /* + */ 72.44 , 53.43 \ /* + */ 73.64 , 54.30 \ /* + */ 74.84 , 55.16 \ /* + */ 76.05 , 56.02 \ /* + */ 77.25 , 56.88 \ /* + */ 78.45 , 57.74 \ /* + */ 79.66 , 58.61 \ /* + */ 80.86 , 59.47 \ /* + */ 82.06 , 60.33 \ /* + */ 83.26 , 61.19 \ /* + */ 84.47 , 62.05 \ /* + */ 85.67 , 62.92 \ /* + */ 86.87 , 63.78 \ /* + */ 88.07 , 64.64 \ /* + */ 89.28 , 65.50 \ /* + */ 90.48 , 66.36 \ /* + */ 91.68 , 67.22 \ /* + */ 92.89 , 68.09 \ /* + */ 94.09 , 68.95 \ /* + */ 95.29 , 69.81 \ /* + */ 96.49 , 70.67 \ /* + */ 97.70 , 71.53 \ /* + */ 98.90 , 72.40 \ /* + */ 100.10 , 73.26 \ /* + */ 101.30 , 74.12 \ /* + */ 102.51 , 74.98 \ /* + */ 103.71 , 75.84 \ /* + */ 104.91 , 76.71 \ /* + */ 106.12 , 77.57 \ /* + */ 107.32 , 78.43 \ /* + */ 108.52 , 79.29 \ /* + */ 109.72 , 80.15 \ /* + */ 110.93 , 81.02 \ /* + */ 112.13 , 81.88 \ /* + */ 113.33 , 82.74 \ /* + */ 114.53 , 83.60 \ /* + */ 115.74 , 84.46 \ /* + */ 116.94 , 85.33 \ /* + */ 118.14 , 86.19 \ /* + */ 119.35 , 87.05 \ /* + */ 120.55 , 87.91 \ /* + */ 121.75 , 88.77 \ /* + */ 122.95 , 89.64 \ /* + */ 124.16 , 90.50 \ /* + */ 125.36 , 91.36 \ /* + */ 126.56 , 92.22 \ /* + */ 127.76 , 93.08 \ /* + */ 128.97 , 93.95 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize20" { + matrix input `temp' = ( /* + */ 6.66 , . \ /* + */ 8.75 , 3.95 \ /* + */ 9.54 , 6.40 \ /* + */ 10.26 , 7.54 \ /* + */ 10.98 , 8.38 \ /* + */ 11.72 , 9.10 \ /* + */ 12.48 , 9.77 \ /* + */ 13.24 , 10.41 \ /* + */ 14.01 , 11.03 \ /* + */ 14.78 , 11.65 \ /* + */ 15.56 , 12.25 \ /* + */ 16.35 , 12.86 \ /* + */ 17.14 , 13.45 \ /* + */ 17.93 , 14.05 \ /* + */ 18.72 , 14.65 \ /* + */ 19.51 , 15.24 \ /* + */ 20.31 , 15.83 \ /* + */ 21.10 , 16.42 \ /* + */ 21.90 , 17.02 \ /* + */ 22.70 , 17.61 \ /* + */ 23.50 , 18.20 \ /* + */ 24.30 , 18.79 \ /* + */ 25.10 , 19.38 \ /* + */ 25.90 , 19.97 \ /* + */ 26.71 , 20.56 \ /* + */ 27.51 , 21.15 \ /* + */ 28.31 , 21.74 \ /* + */ 29.12 , 22.33 \ /* + */ 29.92 , 22.92 \ /* + */ 30.72 , 23.51 \ /* + */ 31.53 , 24.10 \ /* + */ 32.33 , 24.69 \ /* + */ 33.14 , 25.28 \ /* + */ 33.94 , 25.87 \ /* + */ 34.75 , 26.46 \ /* + */ 35.55 , 27.05 \ /* + */ 36.36 , 27.64 \ /* + */ 37.17 , 28.23 \ /* + */ 37.97 , 28.82 \ /* + */ 38.78 , 29.41 \ /* + */ 39.58 , 30.00 \ /* + */ 40.39 , 30.59 \ /* + */ 41.20 , 31.18 \ /* + */ 42.00 , 31.77 \ /* + */ 42.81 , 32.36 \ /* + */ 43.62 , 32.95 \ /* + */ 44.42 , 33.54 \ /* + */ 45.23 , 34.13 \ /* + */ 46.03 , 34.72 \ /* + */ 46.84 , 35.31 \ /* + */ 47.65 , 35.90 \ /* + */ 48.45 , 36.49 \ /* + */ 49.26 , 37.08 \ /* + */ 50.06 , 37.67 \ /* + */ 50.87 , 38.26 \ /* + */ 51.68 , 38.85 \ /* + */ 52.48 , 39.44 \ /* + */ 53.29 , 40.02 \ /* + */ 54.09 , 40.61 \ /* + */ 54.90 , 41.20 \ /* + */ 55.71 , 41.79 \ /* + */ 56.51 , 42.38 \ /* + */ 57.32 , 42.97 \ /* + */ 58.13 , 43.56 \ /* + */ 58.93 , 44.15 \ /* + */ 59.74 , 44.74 \ /* + */ 60.54 , 45.33 \ /* + */ 61.35 , 45.92 \ /* + */ 62.16 , 46.51 \ /* + */ 62.96 , 47.10 \ /* + */ 63.77 , 47.69 \ /* + */ 64.57 , 48.28 \ /* + */ 65.38 , 48.87 \ /* + */ 66.19 , 49.46 \ /* + */ 66.99 , 50.05 \ /* + */ 67.80 , 50.64 \ /* + */ 68.60 , 51.23 \ /* + */ 69.41 , 51.82 \ /* + */ 70.22 , 52.41 \ /* + */ 71.02 , 53.00 \ /* + */ 71.83 , 53.59 \ /* + */ 72.64 , 54.18 \ /* + */ 73.44 , 54.77 \ /* + */ 74.25 , 55.36 \ /* + */ 75.05 , 55.95 \ /* + */ 75.86 , 56.54 \ /* + */ 76.67 , 57.13 \ /* + */ 77.47 , 57.72 \ /* + */ 78.28 , 58.31 \ /* + */ 79.08 , 58.90 \ /* + */ 79.89 , 59.49 \ /* + */ 80.70 , 60.08 \ /* + */ 81.50 , 60.67 \ /* + */ 82.31 , 61.26 \ /* + */ 83.12 , 61.85 \ /* + */ 83.92 , 62.44 \ /* + */ 84.73 , 63.03 \ /* + */ 85.53 , 63.62 \ /* + */ 86.34 , 64.21 \ /* + */ 87.15 , 64.80 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize25" { + matrix input `temp' = ( /* + */ 5.53 , . \ /* + */ 7.25 , 3.63 \ /* + */ 7.80 , 5.45 \ /* + */ 8.31 , 6.28 \ /* + */ 8.84 , 6.89 \ /* + */ 9.38 , 7.42 \ /* + */ 9.93 , 7.91 \ /* + */ 10.50 , 8.39 \ /* + */ 11.07 , 8.85 \ /* + */ 11.65 , 9.31 \ /* + */ 12.23 , 9.77 \ /* + */ 12.82 , 10.22 \ /* + */ 13.41 , 10.68 \ /* + */ 14.00 , 11.13 \ /* + */ 14.60 , 11.58 \ /* + */ 15.19 , 12.03 \ /* + */ 15.79 , 12.49 \ /* + */ 16.39 , 12.94 \ /* + */ 16.99 , 13.39 \ /* + */ 17.60 , 13.84 \ /* + */ 18.20 , 14.29 \ /* + */ 18.80 , 14.74 \ /* + */ 19.41 , 15.19 \ /* + */ 20.01 , 15.64 \ /* + */ 20.61 , 16.10 \ /* + */ 21.22 , 16.55 \ /* + */ 21.83 , 17.00 \ /* + */ 22.43 , 17.45 \ /* + */ 23.04 , 17.90 \ /* + */ 23.65 , 18.35 \ /* + */ 24.25 , 18.81 \ /* + */ 24.86 , 19.26 \ /* + */ 25.47 , 19.71 \ /* + */ 26.08 , 20.16 \ /* + */ 26.68 , 20.61 \ /* + */ 27.29 , 21.06 \ /* + */ 27.90 , 21.52 \ /* + */ 28.51 , 21.97 \ /* + */ 29.12 , 22.42 \ /* + */ 29.73 , 22.87 \ /* + */ 30.33 , 23.32 \ /* + */ 30.94 , 23.78 \ /* + */ 31.55 , 24.23 \ /* + */ 32.16 , 24.68 \ /* + */ 32.77 , 25.13 \ /* + */ 33.38 , 25.58 \ /* + */ 33.99 , 26.04 \ /* + */ 34.60 , 26.49 \ /* + */ 35.21 , 26.94 \ /* + */ 35.82 , 27.39 \ /* + */ 36.43 , 27.85 \ /* + */ 37.04 , 28.30 \ /* + */ 37.65 , 28.75 \ /* + */ 38.25 , 29.20 \ /* + */ 38.86 , 29.66 \ /* + */ 39.47 , 30.11 \ /* + */ 40.08 , 30.56 \ /* + */ 40.69 , 31.01 \ /* + */ 41.30 , 31.47 \ /* + */ 41.91 , 31.92 \ /* + */ 42.52 , 32.37 \ /* + */ 43.13 , 32.82 \ /* + */ 43.74 , 33.27 \ /* + */ 44.35 , 33.73 \ /* + */ 44.96 , 34.18 \ /* + */ 45.57 , 34.63 \ /* + */ 46.18 , 35.08 \ /* + */ 46.78 , 35.54 \ /* + */ 47.39 , 35.99 \ /* + */ 48.00 , 36.44 \ /* + */ 48.61 , 36.89 \ /* + */ 49.22 , 37.35 \ /* + */ 49.83 , 37.80 \ /* + */ 50.44 , 38.25 \ /* + */ 51.05 , 38.70 \ /* + */ 51.66 , 39.16 \ /* + */ 52.27 , 39.61 \ /* + */ 52.88 , 40.06 \ /* + */ 53.49 , 40.51 \ /* + */ 54.10 , 40.96 \ /* + */ 54.71 , 41.42 \ /* + */ 55.32 , 41.87 \ /* + */ 55.92 , 42.32 \ /* + */ 56.53 , 42.77 \ /* + */ 57.14 , 43.23 \ /* + */ 57.75 , 43.68 \ /* + */ 58.36 , 44.13 \ /* + */ 58.97 , 44.58 \ /* + */ 59.58 , 45.04 \ /* + */ 60.19 , 45.49 \ /* + */ 60.80 , 45.94 \ /* + */ 61.41 , 46.39 \ /* + */ 62.02 , 46.85 \ /* + */ 62.63 , 47.30 \ /* + */ 63.24 , 47.75 \ /* + */ 63.85 , 48.20 \ /* + */ 64.45 , 48.65 \ /* + */ 65.06 , 49.11 \ /* + */ 65.67 , 49.56 \ /* + */ 66.28 , 50.01 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel5" { + matrix input `temp' = ( /* + */ 24.09 , . \ /* + */ 13.46 , 15.50 \ /* + */ 9.61 , 10.83 \ /* + */ 7.63 , 8.53 \ /* + */ 6.42 , 7.16 \ /* + */ 5.61 , 6.24 \ /* + */ 5.02 , 5.59 \ /* + */ 4.58 , 5.10 \ /* + */ 4.23 , 4.71 \ /* + */ 3.96 , 4.41 \ /* + */ 3.73 , 4.15 \ /* + */ 3.54 , 3.94 \ /* + */ 3.38 , 3.76 \ /* + */ 3.24 , 3.60 \ /* + */ 3.12 , 3.47 \ /* + */ 3.01 , 3.35 \ /* + */ 2.92 , 3.24 \ /* + */ 2.84 , 3.15 \ /* + */ 2.76 , 3.06 \ /* + */ 2.69 , 2.98 \ /* + */ 2.63 , 2.91 \ /* + */ 2.58 , 2.85 \ /* + */ 2.52 , 2.79 \ /* + */ 2.48 , 2.73 \ /* + */ 2.43 , 2.68 \ /* + */ 2.39 , 2.63 \ /* + */ 2.36 , 2.59 \ /* + */ 2.32 , 2.55 \ /* + */ 2.29 , 2.51 \ /* + */ 2.26 , 2.47 \ /* + */ 2.23 , 2.44 \ /* + */ 2.20 , 2.41 \ /* + */ 2.18 , 2.37 \ /* + */ 2.16 , 2.35 \ /* + */ 2.13 , 2.32 \ /* + */ 2.11 , 2.29 \ /* + */ 2.09 , 2.27 \ /* + */ 2.07 , 2.24 \ /* + */ 2.05 , 2.22 \ /* + */ 2.04 , 2.20 \ /* + */ 2.02 , 2.18 \ /* + */ 2.00 , 2.16 \ /* + */ 1.99 , 2.14 \ /* + */ 1.97 , 2.12 \ /* + */ 1.96 , 2.10 \ /* + */ 1.94 , 2.09 \ /* + */ 1.93 , 2.07 \ /* + */ 1.92 , 2.05 \ /* + */ 1.91 , 2.04 \ /* + */ 1.89 , 2.02 \ /* + */ 1.88 , 2.01 \ /* + */ 1.87 , 2.00 \ /* + */ 1.86 , 1.98 \ /* + */ 1.85 , 1.97 \ /* + */ 1.84 , 1.96 \ /* + */ 1.83 , 1.95 \ /* + */ 1.82 , 1.94 \ /* + */ 1.81 , 1.92 \ /* + */ 1.80 , 1.91 \ /* + */ 1.79 , 1.90 \ /* + */ 1.79 , 1.89 \ /* + */ 1.78 , 1.88 \ /* + */ 1.77 , 1.87 \ /* + */ 1.76 , 1.87 \ /* + */ 1.75 , 1.86 \ /* + */ 1.75 , 1.85 \ /* + */ 1.74 , 1.84 \ /* + */ 1.73 , 1.83 \ /* + */ 1.72 , 1.83 \ /* + */ 1.72 , 1.82 \ /* + */ 1.71 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.79 \ /* + */ 1.68 , 1.79 \ /* + */ 1.68 , 1.78 \ /* + */ 1.67 , 1.77 \ /* + */ 1.67 , 1.77 \ /* + */ 1.66 , 1.76 \ /* + */ 1.65 , 1.76 \ /* + */ 1.65 , 1.75 \ /* + */ 1.64 , 1.75 \ /* + */ 1.64 , 1.74 \ /* + */ 1.63 , 1.74 \ /* + */ 1.63 , 1.73 \ /* + */ 1.62 , 1.73 \ /* + */ 1.61 , 1.73 \ /* + */ 1.61 , 1.72 \ /* + */ 1.60 , 1.72 \ /* + */ 1.60 , 1.71 \ /* + */ 1.59 , 1.71 \ /* + */ 1.59 , 1.71 \ /* + */ 1.58 , 1.71 \ /* + */ 1.58 , 1.70 \ /* + */ 1.57 , 1.70 \ /* + */ 1.57 , 1.70 \ /* + */ 1.56 , 1.69 \ /* + */ 1.56 , 1.69 \ /* + */ 1.55 , 1.69 \ /* + */ 1.55 , 1.69 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel10" { + matrix input `temp' = ( /* + */ 19.36 , . \ /* + */ 10.89 , 12.55 \ /* + */ 7.90 , 8.96 \ /* + */ 6.37 , 7.15 \ /* + */ 5.44 , 6.07 \ /* + */ 4.81 , 5.34 \ /* + */ 4.35 , 4.82 \ /* + */ 4.01 , 4.43 \ /* + */ 3.74 , 4.12 \ /* + */ 3.52 , 3.87 \ /* + */ 3.34 , 3.67 \ /* + */ 3.19 , 3.49 \ /* + */ 3.06 , 3.35 \ /* + */ 2.95 , 3.22 \ /* + */ 2.85 , 3.11 \ /* + */ 2.76 , 3.01 \ /* + */ 2.69 , 2.92 \ /* + */ 2.62 , 2.84 \ /* + */ 2.56 , 2.77 \ /* + */ 2.50 , 2.71 \ /* + */ 2.45 , 2.65 \ /* + */ 2.40 , 2.60 \ /* + */ 2.36 , 2.55 \ /* + */ 2.32 , 2.50 \ /* + */ 2.28 , 2.46 \ /* + */ 2.24 , 2.42 \ /* + */ 2.21 , 2.38 \ /* + */ 2.18 , 2.35 \ /* + */ 2.15 , 2.31 \ /* + */ 2.12 , 2.28 \ /* + */ 2.10 , 2.25 \ /* + */ 2.07 , 2.23 \ /* + */ 2.05 , 2.20 \ /* + */ 2.03 , 2.17 \ /* + */ 2.01 , 2.15 \ /* + */ 1.99 , 2.13 \ /* + */ 1.97 , 2.11 \ /* + */ 1.95 , 2.09 \ /* + */ 1.93 , 2.07 \ /* + */ 1.92 , 2.05 \ /* + */ 1.90 , 2.03 \ /* + */ 1.88 , 2.01 \ /* + */ 1.87 , 2.00 \ /* + */ 1.86 , 1.98 \ /* + */ 1.84 , 1.96 \ /* + */ 1.83 , 1.95 \ /* + */ 1.82 , 1.93 \ /* + */ 1.81 , 1.92 \ /* + */ 1.79 , 1.91 \ /* + */ 1.78 , 1.89 \ /* + */ 1.77 , 1.88 \ /* + */ 1.76 , 1.87 \ /* + */ 1.75 , 1.86 \ /* + */ 1.74 , 1.85 \ /* + */ 1.73 , 1.84 \ /* + */ 1.72 , 1.83 \ /* + */ 1.71 , 1.82 \ /* + */ 1.70 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.79 \ /* + */ 1.68 , 1.78 \ /* + */ 1.67 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.65 , 1.75 \ /* + */ 1.64 , 1.74 \ /* + */ 1.64 , 1.73 \ /* + */ 1.63 , 1.72 \ /* + */ 1.63 , 1.72 \ /* + */ 1.62 , 1.71 \ /* + */ 1.61 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.60 , 1.69 \ /* + */ 1.60 , 1.68 \ /* + */ 1.59 , 1.68 \ /* + */ 1.59 , 1.67 \ /* + */ 1.58 , 1.67 \ /* + */ 1.58 , 1.66 \ /* + */ 1.57 , 1.66 \ /* + */ 1.57 , 1.65 \ /* + */ 1.56 , 1.65 \ /* + */ 1.56 , 1.64 \ /* + */ 1.56 , 1.64 \ /* + */ 1.55 , 1.63 \ /* + */ 1.55 , 1.63 \ /* + */ 1.54 , 1.62 \ /* + */ 1.54 , 1.62 \ /* + */ 1.54 , 1.62 \ /* + */ 1.53 , 1.61 \ /* + */ 1.53 , 1.61 \ /* + */ 1.53 , 1.61 \ /* + */ 1.52 , 1.60 \ /* + */ 1.52 , 1.60 \ /* + */ 1.52 , 1.60 \ /* + */ 1.52 , 1.59 \ /* + */ 1.51 , 1.59 \ /* + */ 1.51 , 1.59 \ /* + */ 1.51 , 1.59 \ /* + */ 1.51 , 1.58 \ /* + */ 1.50 , 1.58 ) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel20" { + matrix input `temp' = ( /* + */ 15.64 , . \ /* + */ 9.00 , 9.72 \ /* + */ 6.61 , 7.18 \ /* + */ 5.38 , 5.85 \ /* + */ 4.62 , 5.04 \ /* + */ 4.11 , 4.48 \ /* + */ 3.75 , 4.08 \ /* + */ 3.47 , 3.77 \ /* + */ 3.25 , 3.53 \ /* + */ 3.07 , 3.33 \ /* + */ 2.92 , 3.17 \ /* + */ 2.80 , 3.04 \ /* + */ 2.70 , 2.92 \ /* + */ 2.61 , 2.82 \ /* + */ 2.53 , 2.73 \ /* + */ 2.46 , 2.65 \ /* + */ 2.39 , 2.58 \ /* + */ 2.34 , 2.52 \ /* + */ 2.29 , 2.46 \ /* + */ 2.24 , 2.41 \ /* + */ 2.20 , 2.36 \ /* + */ 2.16 , 2.32 \ /* + */ 2.13 , 2.28 \ /* + */ 2.10 , 2.24 \ /* + */ 2.06 , 2.21 \ /* + */ 2.04 , 2.18 \ /* + */ 2.01 , 2.15 \ /* + */ 1.99 , 2.12 \ /* + */ 1.96 , 2.09 \ /* + */ 1.94 , 2.07 \ /* + */ 1.92 , 2.04 \ /* + */ 1.90 , 2.02 \ /* + */ 1.88 , 2.00 \ /* + */ 1.87 , 1.98 \ /* + */ 1.85 , 1.96 \ /* + */ 1.83 , 1.94 \ /* + */ 1.82 , 1.93 \ /* + */ 1.80 , 1.91 \ /* + */ 1.79 , 1.89 \ /* + */ 1.78 , 1.88 \ /* + */ 1.76 , 1.86 \ /* + */ 1.75 , 1.85 \ /* + */ 1.74 , 1.84 \ /* + */ 1.73 , 1.82 \ /* + */ 1.72 , 1.81 \ /* + */ 1.71 , 1.80 \ /* + */ 1.70 , 1.79 \ /* + */ 1.69 , 1.78 \ /* + */ 1.68 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.65 , 1.74 \ /* + */ 1.65 , 1.73 \ /* + */ 1.64 , 1.72 \ /* + */ 1.63 , 1.71 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.68 \ /* + */ 1.60 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.56 , 1.62 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.54 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.44 , 1.50 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 ) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel30" { + matrix input `temp' = ( /* + */ 12.71 , . \ /* + */ 7.49 , 8.03 \ /* + */ 5.60 , 6.15 \ /* + */ 4.63 , 5.10 \ /* + */ 4.03 , 4.44 \ /* + */ 3.63 , 3.98 \ /* + */ 3.33 , 3.65 \ /* + */ 3.11 , 3.39 \ /* + */ 2.93 , 3.19 \ /* + */ 2.79 , 3.02 \ /* + */ 2.67 , 2.88 \ /* + */ 2.57 , 2.77 \ /* + */ 2.48 , 2.67 \ /* + */ 2.41 , 2.58 \ /* + */ 2.34 , 2.51 \ /* + */ 2.28 , 2.44 \ /* + */ 2.23 , 2.38 \ /* + */ 2.18 , 2.33 \ /* + */ 2.14 , 2.28 \ /* + */ 2.10 , 2.23 \ /* + */ 2.07 , 2.19 \ /* + */ 2.04 , 2.16 \ /* + */ 2.01 , 2.12 \ /* + */ 1.98 , 2.09 \ /* + */ 1.95 , 2.06 \ /* + */ 1.93 , 2.03 \ /* + */ 1.90 , 2.01 \ /* + */ 1.88 , 1.98 \ /* + */ 1.86 , 1.96 \ /* + */ 1.84 , 1.94 \ /* + */ 1.83 , 1.92 \ /* + */ 1.81 , 1.90 \ /* + */ 1.79 , 1.88 \ /* + */ 1.78 , 1.87 \ /* + */ 1.76 , 1.85 \ /* + */ 1.75 , 1.83 \ /* + */ 1.74 , 1.82 \ /* + */ 1.72 , 1.80 \ /* + */ 1.71 , 1.79 \ /* + */ 1.70 , 1.78 \ /* + */ 1.69 , 1.77 \ /* + */ 1.68 , 1.75 \ /* + */ 1.67 , 1.74 \ /* + */ 1.66 , 1.73 \ /* + */ 1.65 , 1.72 \ /* + */ 1.64 , 1.71 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.60 , 1.66 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.47 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.42 , 1.47 \ /* + */ 1.42 , 1.47 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.41 , 1.46 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax5" { + matrix input `temp' = ( /* + */ 23.81 , . \ /* + */ 12.38 , 14.19 \ /* + */ 8.66 , 10.00 \ /* + */ 6.81 , 7.88 \ /* + */ 5.71 , 6.60 \ /* + */ 4.98 , 5.74 \ /* + */ 4.45 , 5.13 \ /* + */ 4.06 , 4.66 \ /* + */ 3.76 , 4.30 \ /* + */ 3.51 , 4.01 \ /* + */ 3.31 , 3.77 \ /* + */ 3.15 , 3.57 \ /* + */ 3.00 , 3.41 \ /* + */ 2.88 , 3.26 \ /* + */ 2.78 , 3.13 \ /* + */ 2.69 , 3.02 \ /* + */ 2.61 , 2.92 \ /* + */ 2.53 , 2.84 \ /* + */ 2.47 , 2.76 \ /* + */ 2.41 , 2.69 \ /* + */ 2.36 , 2.62 \ /* + */ 2.31 , 2.56 \ /* + */ 2.27 , 2.51 \ /* + */ 2.23 , 2.46 \ /* + */ 2.19 , 2.42 \ /* + */ 2.15 , 2.37 \ /* + */ 2.12 , 2.33 \ /* + */ 2.09 , 2.30 \ /* + */ 2.07 , 2.26 \ /* + */ 2.04 , 2.23 \ /* + */ 2.02 , 2.20 \ /* + */ 1.99 , 2.17 \ /* + */ 1.97 , 2.14 \ /* + */ 1.95 , 2.12 \ /* + */ 1.93 , 2.10 \ /* + */ 1.91 , 2.07 \ /* + */ 1.90 , 2.05 \ /* + */ 1.88 , 2.03 \ /* + */ 1.87 , 2.01 \ /* + */ 1.85 , 1.99 \ /* + */ 1.84 , 1.98 \ /* + */ 1.82 , 1.96 \ /* + */ 1.81 , 1.94 \ /* + */ 1.80 , 1.93 \ /* + */ 1.79 , 1.91 \ /* + */ 1.78 , 1.90 \ /* + */ 1.76 , 1.88 \ /* + */ 1.75 , 1.87 \ /* + */ 1.74 , 1.86 \ /* + */ 1.73 , 1.85 \ /* + */ 1.73 , 1.83 \ /* + */ 1.72 , 1.82 \ /* + */ 1.71 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.79 \ /* + */ 1.68 , 1.78 \ /* + */ 1.68 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.65 , 1.74 \ /* + */ 1.65 , 1.74 \ /* + */ 1.64 , 1.73 \ /* + */ 1.63 , 1.72 \ /* + */ 1.63 , 1.71 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.70 \ /* + */ 1.61 , 1.69 \ /* + */ 1.60 , 1.68 \ /* + */ 1.60 , 1.68 \ /* + */ 1.59 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.64 \ /* + */ 1.56 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.62 \ /* + */ 1.54 , 1.61 \ /* + */ 1.54 , 1.61 \ /* + */ 1.53 , 1.60 \ /* + */ 1.53 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.50 , 1.57 \ /* + */ 1.50 , 1.57 \ /* + */ 1.50 , 1.56 \ /* + */ 1.49 , 1.56 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.48 , 1.55 \ /* + */ 1.48 , 1.54 \ /* + */ 1.47 , 1.54 \ /* + */ 1.47 , 1.54 \ /* + */ 1.47 , 1.53 \ /* + */ 1.46 , 1.53 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax10" { + matrix input `temp' = ( /* + */ 19.40 , . \ /* + */ 10.14 , 11.92 \ /* + */ 7.18 , 8.39 \ /* + */ 5.72 , 6.64 \ /* + */ 4.85 , 5.60 \ /* + */ 4.27 , 4.90 \ /* + */ 3.86 , 4.40 \ /* + */ 3.55 , 4.03 \ /* + */ 3.31 , 3.73 \ /* + */ 3.12 , 3.50 \ /* + */ 2.96 , 3.31 \ /* + */ 2.83 , 3.15 \ /* + */ 2.71 , 3.01 \ /* + */ 2.62 , 2.89 \ /* + */ 2.53 , 2.79 \ /* + */ 2.46 , 2.70 \ /* + */ 2.39 , 2.62 \ /* + */ 2.33 , 2.55 \ /* + */ 2.28 , 2.49 \ /* + */ 2.23 , 2.43 \ /* + */ 2.19 , 2.38 \ /* + */ 2.15 , 2.33 \ /* + */ 2.11 , 2.29 \ /* + */ 2.08 , 2.25 \ /* + */ 2.05 , 2.21 \ /* + */ 2.02 , 2.18 \ /* + */ 1.99 , 2.14 \ /* + */ 1.97 , 2.11 \ /* + */ 1.94 , 2.08 \ /* + */ 1.92 , 2.06 \ /* + */ 1.90 , 2.03 \ /* + */ 1.88 , 2.01 \ /* + */ 1.86 , 1.99 \ /* + */ 1.85 , 1.97 \ /* + */ 1.83 , 1.95 \ /* + */ 1.81 , 1.93 \ /* + */ 1.80 , 1.91 \ /* + */ 1.79 , 1.89 \ /* + */ 1.77 , 1.88 \ /* + */ 1.76 , 1.86 \ /* + */ 1.75 , 1.85 \ /* + */ 1.74 , 1.83 \ /* + */ 1.72 , 1.82 \ /* + */ 1.71 , 1.81 \ /* + */ 1.70 , 1.80 \ /* + */ 1.69 , 1.78 \ /* + */ 1.68 , 1.77 \ /* + */ 1.67 , 1.76 \ /* + */ 1.66 , 1.75 \ /* + */ 1.66 , 1.74 \ /* + */ 1.65 , 1.73 \ /* + */ 1.64 , 1.72 \ /* + */ 1.63 , 1.71 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.69 \ /* + */ 1.60 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.58 , 1.65 \ /* + */ 1.57 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.63 \ /* + */ 1.55 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.53 , 1.60 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.52 , 1.58 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.46 , 1.52 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.44 , 1.50 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.42 , 1.48 \ /* + */ 1.42 , 1.47 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax20" { + matrix input `temp' = ( /* + */ 15.39 , . \ /* + */ 8.16 , 9.41 \ /* + */ 5.87 , 6.79 \ /* + */ 4.75 , 5.47 \ /* + */ 4.08 , 4.66 \ /* + */ 3.64 , 4.13 \ /* + */ 3.32 , 3.74 \ /* + */ 3.08 , 3.45 \ /* + */ 2.89 , 3.22 \ /* + */ 2.74 , 3.03 \ /* + */ 2.62 , 2.88 \ /* + */ 2.51 , 2.76 \ /* + */ 2.42 , 2.65 \ /* + */ 2.35 , 2.56 \ /* + */ 2.28 , 2.48 \ /* + */ 2.22 , 2.40 \ /* + */ 2.17 , 2.34 \ /* + */ 2.12 , 2.28 \ /* + */ 2.08 , 2.23 \ /* + */ 2.04 , 2.19 \ /* + */ 2.01 , 2.15 \ /* + */ 1.98 , 2.11 \ /* + */ 1.95 , 2.07 \ /* + */ 1.92 , 2.04 \ /* + */ 1.89 , 2.01 \ /* + */ 1.87 , 1.98 \ /* + */ 1.85 , 1.96 \ /* + */ 1.83 , 1.93 \ /* + */ 1.81 , 1.91 \ /* + */ 1.79 , 1.89 \ /* + */ 1.77 , 1.87 \ /* + */ 1.76 , 1.85 \ /* + */ 1.74 , 1.83 \ /* + */ 1.73 , 1.82 \ /* + */ 1.72 , 1.80 \ /* + */ 1.70 , 1.79 \ /* + */ 1.69 , 1.77 \ /* + */ 1.68 , 1.76 \ /* + */ 1.67 , 1.74 \ /* + */ 1.66 , 1.73 \ /* + */ 1.65 , 1.72 \ /* + */ 1.64 , 1.71 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.69 \ /* + */ 1.61 , 1.68 \ /* + */ 1.60 , 1.67 \ /* + */ 1.59 , 1.66 \ /* + */ 1.58 , 1.65 \ /* + */ 1.58 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.56 , 1.62 \ /* + */ 1.56 , 1.62 \ /* + */ 1.55 , 1.61 \ /* + */ 1.54 , 1.60 \ /* + */ 1.54 , 1.59 \ /* + */ 1.53 , 1.59 \ /* + */ 1.52 , 1.58 \ /* + */ 1.52 , 1.57 \ /* + */ 1.51 , 1.57 \ /* + */ 1.51 , 1.56 \ /* + */ 1.50 , 1.56 \ /* + */ 1.50 , 1.55 \ /* + */ 1.49 , 1.54 \ /* + */ 1.49 , 1.54 \ /* + */ 1.48 , 1.53 \ /* + */ 1.48 , 1.53 \ /* + */ 1.47 , 1.52 \ /* + */ 1.47 , 1.52 \ /* + */ 1.47 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.46 , 1.51 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.50 \ /* + */ 1.45 , 1.49 \ /* + */ 1.44 , 1.49 \ /* + */ 1.44 , 1.48 \ /* + */ 1.44 , 1.48 \ /* + */ 1.43 , 1.48 \ /* + */ 1.43 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.41 \ /* + */ 1.37 , 1.41 \ /* + */ 1.37 , 1.41 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax30" { + matrix input `temp' = ( /* + */ 12.76 , . \ /* + */ 6.97 , 8.01 \ /* + */ 5.11 , 5.88 \ /* + */ 4.19 , 4.78 \ /* + */ 3.64 , 4.12 \ /* + */ 3.27 , 3.67 \ /* + */ 3.00 , 3.35 \ /* + */ 2.80 , 3.10 \ /* + */ 2.64 , 2.91 \ /* + */ 2.52 , 2.76 \ /* + */ 2.41 , 2.63 \ /* + */ 2.33 , 2.52 \ /* + */ 2.25 , 2.43 \ /* + */ 2.19 , 2.35 \ /* + */ 2.13 , 2.29 \ /* + */ 2.08 , 2.22 \ /* + */ 2.04 , 2.17 \ /* + */ 2.00 , 2.12 \ /* + */ 1.96 , 2.08 \ /* + */ 1.93 , 2.04 \ /* + */ 1.90 , 2.01 \ /* + */ 1.87 , 1.97 \ /* + */ 1.84 , 1.94 \ /* + */ 1.82 , 1.92 \ /* + */ 1.80 , 1.89 \ /* + */ 1.78 , 1.87 \ /* + */ 1.76 , 1.84 \ /* + */ 1.74 , 1.82 \ /* + */ 1.73 , 1.80 \ /* + */ 1.71 , 1.79 \ /* + */ 1.70 , 1.77 \ /* + */ 1.68 , 1.75 \ /* + */ 1.67 , 1.74 \ /* + */ 1.66 , 1.72 \ /* + */ 1.64 , 1.71 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.68 \ /* + */ 1.61 , 1.67 \ /* + */ 1.60 , 1.66 \ /* + */ 1.59 , 1.65 \ /* + */ 1.58 , 1.64 \ /* + */ 1.57 , 1.63 \ /* + */ 1.57 , 1.62 \ /* + */ 1.56 , 1.61 \ /* + */ 1.55 , 1.60 \ /* + */ 1.54 , 1.59 \ /* + */ 1.54 , 1.59 \ /* + */ 1.53 , 1.58 \ /* + */ 1.52 , 1.57 \ /* + */ 1.52 , 1.56 \ /* + */ 1.51 , 1.56 \ /* + */ 1.50 , 1.55 \ /* + */ 1.50 , 1.54 \ /* + */ 1.49 , 1.54 \ /* + */ 1.49 , 1.53 \ /* + */ 1.48 , 1.53 \ /* + */ 1.48 , 1.52 \ /* + */ 1.47 , 1.51 \ /* + */ 1.47 , 1.51 \ /* + */ 1.46 , 1.50 \ /* + */ 1.46 , 1.50 \ /* + */ 1.45 , 1.49 \ /* + */ 1.45 , 1.49 \ /* + */ 1.44 , 1.48 \ /* + */ 1.44 , 1.48 \ /* + */ 1.44 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.43 , 1.47 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.46 \ /* + */ 1.42 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.45 \ /* + */ 1.41 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.44 \ /* + */ 1.40 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.43 \ /* + */ 1.39 , 1.42 \ /* + */ 1.39 , 1.42 \ /* + */ 1.38 , 1.42 \ /* + */ 1.38 , 1.41 \ /* + */ 1.38 , 1.41 \ /* + */ 1.37 , 1.41 \ /* + */ 1.37 , 1.40 \ /* + */ 1.37 , 1.40 \ /* + */ 1.37 , 1.40 \ /* + */ 1.36 , 1.40 \ /* + */ 1.36 , 1.39 \ /* + */ 1.36 , 1.39 \ /* + */ 1.36 , 1.39 \ /* + */ 1.36 , 1.38 \ /* + */ 1.35 , 1.38 \ /* + */ 1.35 , 1.38 \ /* + */ 1.35 , 1.38 \ /* + */ 1.35 , 1.37 \ /* + */ 1.34 , 1.37 \ /* + */ 1.34 , 1.37 \ /* + */ 1.34 , 1.37 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize10" { + matrix input `temp' = ( /* + */ 16.38 , . \ /* + */ 8.68 , 7.03 \ /* + */ 6.46 , 5.44 \ /* + */ 5.44 , 4.72 \ /* + */ 4.84 , 4.32 \ /* + */ 4.45 , 4.06 \ /* + */ 4.18 , 3.90 \ /* + */ 3.97 , 3.78 \ /* + */ 3.81 , 3.70 \ /* + */ 3.68 , 3.64 \ /* + */ 3.58 , 3.60 \ /* + */ 3.50 , 3.58 \ /* + */ 3.42 , 3.56 \ /* + */ 3.36 , 3.55 \ /* + */ 3.31 , 3.54 \ /* + */ 3.27 , 3.55 \ /* + */ 3.24 , 3.55 \ /* + */ 3.20 , 3.56 \ /* + */ 3.18 , 3.57 \ /* + */ 3.21 , 3.58 \ /* + */ 3.39 , 3.59 \ /* + */ 3.57 , 3.60 \ /* + */ 3.68 , 3.62 \ /* + */ 3.75 , 3.64 \ /* + */ 3.79 , 3.65 \ /* + */ 3.82 , 3.67 \ /* + */ 3.85 , 3.74 \ /* + */ 3.86 , 3.87 \ /* + */ 3.87 , 4.02 \ /* + */ 3.88 , 4.12 \ /* + */ 3.89 , 4.19 \ /* + */ 3.89 , 4.24 \ /* + */ 3.90 , 4.27 \ /* + */ 3.90 , 4.31 \ /* + */ 3.90 , 4.33 \ /* + */ 3.90 , 4.36 \ /* + */ 3.90 , 4.38 \ /* + */ 3.90 , 4.39 \ /* + */ 3.90 , 4.41 \ /* + */ 3.90 , 4.43 \ /* + */ 3.90 , 4.44 \ /* + */ 3.90 , 4.45 \ /* + */ 3.90 , 4.47 \ /* + */ 3.90 , 4.48 \ /* + */ 3.90 , 4.49 \ /* + */ 3.90 , 4.50 \ /* + */ 3.90 , 4.51 \ /* + */ 3.90 , 4.52 \ /* + */ 3.90 , 4.53 \ /* + */ 3.90 , 4.54 \ /* + */ 3.90 , 4.55 \ /* + */ 3.90 , 4.56 \ /* + */ 3.90 , 4.56 \ /* + */ 3.90 , 4.57 \ /* + */ 3.90 , 4.58 \ /* + */ 3.90 , 4.59 \ /* + */ 3.90 , 4.59 \ /* + */ 3.90 , 4.60 \ /* + */ 3.90 , 4.61 \ /* + */ 3.90 , 4.61 \ /* + */ 3.90 , 4.62 \ /* + */ 3.90 , 4.62 \ /* + */ 3.90 , 4.63 \ /* + */ 3.90 , 4.63 \ /* + */ 3.89 , 4.64 \ /* + */ 3.89 , 4.64 \ /* + */ 3.89 , 4.64 \ /* + */ 3.89 , 4.65 \ /* + */ 3.89 , 4.65 \ /* + */ 3.89 , 4.65 \ /* + */ 3.89 , 4.66 \ /* + */ 3.89 , 4.66 \ /* + */ 3.89 , 4.66 \ /* + */ 3.89 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.88 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.87 , 4.66 \ /* + */ 3.86 , 4.65 \ /* + */ 3.86 , 4.65 \ /* + */ 3.86 , 4.65 \ /* + */ 3.86 , 4.64 \ /* + */ 3.85 , 4.64 \ /* + */ 3.85 , 4.64 \ /* + */ 3.85 , 4.63 \ /* + */ 3.85 , 4.63 \ /* + */ 3.84 , 4.62 \ /* + */ 3.84 , 4.62 \ /* + */ 3.84 , 4.61 \ /* + */ 3.84 , 4.60 \ /* + */ 3.83 , 4.60 \ /* + */ 3.83 , 4.59 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize15" { + matrix input `temp' = ( /* + */ 8.96 , . \ /* + */ 5.33 , 4.58 \ /* + */ 4.36 , 3.81 \ /* + */ 3.87 , 3.39 \ /* + */ 3.56 , 3.13 \ /* + */ 3.34 , 2.95 \ /* + */ 3.18 , 2.83 \ /* + */ 3.04 , 2.73 \ /* + */ 2.93 , 2.66 \ /* + */ 2.84 , 2.60 \ /* + */ 2.76 , 2.55 \ /* + */ 2.69 , 2.52 \ /* + */ 2.63 , 2.48 \ /* + */ 2.57 , 2.46 \ /* + */ 2.52 , 2.44 \ /* + */ 2.48 , 2.42 \ /* + */ 2.44 , 2.41 \ /* + */ 2.41 , 2.40 \ /* + */ 2.37 , 2.39 \ /* + */ 2.34 , 2.38 \ /* + */ 2.32 , 2.38 \ /* + */ 2.29 , 2.37 \ /* + */ 2.27 , 2.37 \ /* + */ 2.25 , 2.37 \ /* + */ 2.24 , 2.37 \ /* + */ 2.22 , 2.38 \ /* + */ 2.21 , 2.38 \ /* + */ 2.20 , 2.38 \ /* + */ 2.19 , 2.39 \ /* + */ 2.18 , 2.39 \ /* + */ 2.19 , 2.40 \ /* + */ 2.22 , 2.41 \ /* + */ 2.33 , 2.42 \ /* + */ 2.40 , 2.42 \ /* + */ 2.45 , 2.43 \ /* + */ 2.48 , 2.44 \ /* + */ 2.50 , 2.45 \ /* + */ 2.52 , 2.54 \ /* + */ 2.53 , 2.55 \ /* + */ 2.54 , 2.66 \ /* + */ 2.55 , 2.73 \ /* + */ 2.56 , 2.78 \ /* + */ 2.57 , 2.82 \ /* + */ 2.57 , 2.85 \ /* + */ 2.58 , 2.87 \ /* + */ 2.58 , 2.89 \ /* + */ 2.58 , 2.91 \ /* + */ 2.59 , 2.92 \ /* + */ 2.59 , 2.93 \ /* + */ 2.59 , 2.94 \ /* + */ 2.59 , 2.95 \ /* + */ 2.59 , 2.96 \ /* + */ 2.60 , 2.97 \ /* + */ 2.60 , 2.98 \ /* + */ 2.60 , 2.98 \ /* + */ 2.60 , 2.99 \ /* + */ 2.60 , 2.99 \ /* + */ 2.60 , 3.00 \ /* + */ 2.60 , 3.00 \ /* + */ 2.60 , 3.01 \ /* + */ 2.60 , 3.01 \ /* + */ 2.60 , 3.02 \ /* + */ 2.61 , 3.02 \ /* + */ 2.61 , 3.02 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.03 \ /* + */ 2.61 , 3.04 \ /* + */ 2.61 , 3.04 \ /* + */ 2.61 , 3.04 \ /* + */ 2.60 , 3.04 \ /* + */ 2.60 , 3.04 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.60 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.05 \ /* + */ 2.59 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.04 \ /* + */ 2.58 , 3.03 \ /* + */ 2.57 , 3.03 \ /* + */ 2.57 , 3.03 \ /* + */ 2.57 , 3.03 \ /* + */ 2.57 , 3.02 \ /* + */ 2.56 , 3.02 \ /* + */ 2.56 , 3.02 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize20" { + matrix input `temp' = ( /* + */ 6.66 , . \ /* + */ 4.42 , 3.95 \ /* + */ 3.69 , 3.32 \ /* + */ 3.30 , 2.99 \ /* + */ 3.05 , 2.78 \ /* + */ 2.87 , 2.63 \ /* + */ 2.73 , 2.52 \ /* + */ 2.63 , 2.43 \ /* + */ 2.54 , 2.36 \ /* + */ 2.46 , 2.30 \ /* + */ 2.40 , 2.25 \ /* + */ 2.34 , 2.21 \ /* + */ 2.29 , 2.17 \ /* + */ 2.25 , 2.14 \ /* + */ 2.21 , 2.11 \ /* + */ 2.18 , 2.09 \ /* + */ 2.14 , 2.07 \ /* + */ 2.11 , 2.05 \ /* + */ 2.09 , 2.03 \ /* + */ 2.06 , 2.02 \ /* + */ 2.04 , 2.01 \ /* + */ 2.02 , 1.99 \ /* + */ 2.00 , 1.98 \ /* + */ 1.98 , 1.98 \ /* + */ 1.96 , 1.97 \ /* + */ 1.95 , 1.96 \ /* + */ 1.93 , 1.96 \ /* + */ 1.92 , 1.95 \ /* + */ 1.90 , 1.95 \ /* + */ 1.89 , 1.95 \ /* + */ 1.88 , 1.94 \ /* + */ 1.87 , 1.94 \ /* + */ 1.86 , 1.94 \ /* + */ 1.85 , 1.94 \ /* + */ 1.84 , 1.94 \ /* + */ 1.83 , 1.94 \ /* + */ 1.82 , 1.94 \ /* + */ 1.81 , 1.95 \ /* + */ 1.81 , 1.95 \ /* + */ 1.80 , 1.95 \ /* + */ 1.79 , 1.95 \ /* + */ 1.79 , 1.96 \ /* + */ 1.78 , 1.96 \ /* + */ 1.78 , 1.97 \ /* + */ 1.80 , 1.97 \ /* + */ 1.87 , 1.98 \ /* + */ 1.92 , 1.98 \ /* + */ 1.95 , 1.99 \ /* + */ 1.97 , 2.00 \ /* + */ 1.99 , 2.00 \ /* + */ 2.00 , 2.01 \ /* + */ 2.01 , 2.09 \ /* + */ 2.02 , 2.11 \ /* + */ 2.03 , 2.18 \ /* + */ 2.04 , 2.23 \ /* + */ 2.04 , 2.27 \ /* + */ 2.05 , 2.29 \ /* + */ 2.05 , 2.31 \ /* + */ 2.06 , 2.33 \ /* + */ 2.06 , 2.34 \ /* + */ 2.07 , 2.35 \ /* + */ 2.07 , 2.36 \ /* + */ 2.07 , 2.37 \ /* + */ 2.08 , 2.38 \ /* + */ 2.08 , 2.39 \ /* + */ 2.08 , 2.39 \ /* + */ 2.08 , 2.40 \ /* + */ 2.09 , 2.40 \ /* + */ 2.09 , 2.41 \ /* + */ 2.09 , 2.41 \ /* + */ 2.09 , 2.41 \ /* + */ 2.09 , 2.42 \ /* + */ 2.09 , 2.42 \ /* + */ 2.09 , 2.42 \ /* + */ 2.09 , 2.43 \ /* + */ 2.10 , 2.43 \ /* + */ 2.10 , 2.43 \ /* + */ 2.10 , 2.43 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.10 , 2.44 \ /* + */ 2.09 , 2.44 \ /* + */ 2.09 , 2.44 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.09 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.08 , 2.45 \ /* + */ 2.07 , 2.44 \ /* + */ 2.07 , 2.44 \ /* + */ 2.07 , 2.44 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize25" { + matrix input `temp' = ( /* + */ 5.53 , . \ /* + */ 3.92 , 3.63 \ /* + */ 3.32 , 3.09 \ /* + */ 2.98 , 2.79 \ /* + */ 2.77 , 2.60 \ /* + */ 2.61 , 2.46 \ /* + */ 2.49 , 2.35 \ /* + */ 2.39 , 2.27 \ /* + */ 2.32 , 2.20 \ /* + */ 2.25 , 2.14 \ /* + */ 2.19 , 2.09 \ /* + */ 2.14 , 2.05 \ /* + */ 2.10 , 2.02 \ /* + */ 2.06 , 1.99 \ /* + */ 2.03 , 1.96 \ /* + */ 2.00 , 1.93 \ /* + */ 1.97 , 1.91 \ /* + */ 1.94 , 1.89 \ /* + */ 1.92 , 1.87 \ /* + */ 1.90 , 1.86 \ /* + */ 1.88 , 1.84 \ /* + */ 1.86 , 1.83 \ /* + */ 1.84 , 1.81 \ /* + */ 1.83 , 1.80 \ /* + */ 1.81 , 1.79 \ /* + */ 1.80 , 1.78 \ /* + */ 1.78 , 1.77 \ /* + */ 1.77 , 1.77 \ /* + */ 1.76 , 1.76 \ /* + */ 1.75 , 1.75 \ /* + */ 1.74 , 1.75 \ /* + */ 1.73 , 1.74 \ /* + */ 1.72 , 1.73 \ /* + */ 1.71 , 1.73 \ /* + */ 1.70 , 1.73 \ /* + */ 1.69 , 1.72 \ /* + */ 1.68 , 1.72 \ /* + */ 1.67 , 1.71 \ /* + */ 1.67 , 1.71 \ /* + */ 1.66 , 1.71 \ /* + */ 1.65 , 1.71 \ /* + */ 1.65 , 1.71 \ /* + */ 1.64 , 1.70 \ /* + */ 1.63 , 1.70 \ /* + */ 1.63 , 1.70 \ /* + */ 1.62 , 1.70 \ /* + */ 1.62 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.61 , 1.70 \ /* + */ 1.60 , 1.70 \ /* + */ 1.60 , 1.70 \ /* + */ 1.59 , 1.70 \ /* + */ 1.59 , 1.70 \ /* + */ 1.59 , 1.70 \ /* + */ 1.58 , 1.70 \ /* + */ 1.58 , 1.71 \ /* + */ 1.58 , 1.71 \ /* + */ 1.57 , 1.71 \ /* + */ 1.59 , 1.71 \ /* + */ 1.60 , 1.71 \ /* + */ 1.63 , 1.72 \ /* + */ 1.65 , 1.72 \ /* + */ 1.67 , 1.72 \ /* + */ 1.69 , 1.72 \ /* + */ 1.70 , 1.76 \ /* + */ 1.71 , 1.81 \ /* + */ 1.72 , 1.87 \ /* + */ 1.73 , 1.91 \ /* + */ 1.74 , 1.94 \ /* + */ 1.74 , 1.96 \ /* + */ 1.75 , 1.98 \ /* + */ 1.75 , 1.99 \ /* + */ 1.76 , 2.01 \ /* + */ 1.76 , 2.02 \ /* + */ 1.77 , 2.03 \ /* + */ 1.77 , 2.04 \ /* + */ 1.78 , 2.04 \ /* + */ 1.78 , 2.05 \ /* + */ 1.78 , 2.06 \ /* + */ 1.79 , 2.06 \ /* + */ 1.79 , 2.07 \ /* + */ 1.79 , 2.07 \ /* + */ 1.79 , 2.08 \ /* + */ 1.80 , 2.08 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.09 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.10 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 \ /* + */ 1.80 , 2.11 ) + + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + return scalar cv=`cv' +end + +// capt prog drop abw +// abw wants a varlist of [ eps | Z | touse] +// where Z includes all instruments, included and excluded, with constant if +// present as the last column; eps are a suitable set of residuals; and touse +// marks the observations in the data matrix used to generate the residuals +// (e.g. e(sample) of the appropriate model). +// The Noconstant option indicates that no constant term exists in the Z matrix. +// kern is the name of the HAC kernel. -ivregress- only provides definitions +// for Bartlett (default), Parzen, quadratic spectral. + +// returns the optimal bandwidth as local abw + +// abw 1.0.1 CFB 30jun2007 +// 1.0.1 : redefine kernel names (3 instances) to match ivreg2 + +prog def abw, rclass + version 9.2 + syntax varlist(ts), [ NOConstant Kernel(string)] +// validate kernel + if "`kernel'" == "" { + local kernel = "Bartlett" + } + if !inlist("`kernel'","Bartlett","Parzen","Quadratic spectral") { + di as err "Error: kernel `kernel' not compatible with bw(auto)" + return scalar abw = 1 + return local bwchoice "Kernel `kernel' not compatible with bw(auto); bw=1 (default)" + exit + } + else { +// set constant + local cons 1 + if "`noconstant'" != "" { + local cons 0 + } +// deal with ts ops + tsrevar `varlist' + local varlist1 `r(varlist)' + + mata: abw("`varlist1'",`cons',"`kernel'") + return scalar abw = `abw' + return local bwchoice "Automatic bw selection according to Newey-West (1994)" + } +end + +mata: +void abw (string scalar Zulist, + real scalar cons, + string scalar kernel + ) +{ + string rowvector Zunames, tov + string scalar v, v2 + real matrix uZ + real rowvector h + real scalar lenzu, abw + +// access the Stata variables in Zulist, honoring touse stored as last column + Zunames = tokens(Zulist) + lenzu=cols(Zunames)-1 + v = Zunames[|1\lenzu|] + v2 = Zunames[lenzu+1] + st_view(uZ,.,v,v2) + +// assume constant in last col of uZ if it exists +// account for eps as the first column of uZ + if (cons) { + nrows1=cols(uZ)-2 + nrows2=1 + } + else { + nrows1=cols(uZ)-1 + nrows2=0 + } +// [R] ivregress p.42: referencing Newey-West 1994 REStud 61(4):631-653 +// define h indicator rowvector + h = J(nrows1,1,1) \ J(nrows2,1,0) + +// calc mstar per p.43 +// Hannan (1971, 296) & Priestley (1981, 58) per Newey-West p. 633 +// corrected per Alistair Hall msg to Brian Poi 17jul2008 + T = rows(uZ) + oneT = 1/T + expo = 2/9 + q = 1 +// cgamma = 1.4117 + cgamma = 1.1447 + if(kernel == "Parzen") { + expo = 4/25 + q = 2 + cgamma = 2.6614 + } + if(kernel == "Quadratic spectral") { + expo = 2/25 + q = 2 + cgamma = 1.3221 + } +// per Newey-West p.639, Anderson (1971), Priestley (1981) may provide +// guidance on setting expo for other kernels + mstar = trunc(20 *(T/100)^expo) + +// calc uZ matrix + u = uZ[.,1] + Z = uZ[|1,2 \.,.|] + +// calc f vector: (u_i Z_i) * h + f = (u :* Z) * h + +// calc sigmahat vector + sigmahat = J(mstar+1,1,oneT) + for(j=0;j<=mstar;j++) { + for(i=j+1;i<=T;i++) { + sigmahat[j+1] = sigmahat[j+1] + f[i]*f[i-j] + } + } + +// calc shat(q), shat(0) + shatq = 0 + shat0 = sigmahat[1] + for(j=1;j<=mstar;j++) { + shatq = shatq + 2 * sigmahat[j+1] * j^q + shat0 = shat0 + 2 * sigmahat[j+1] + } + +// calc gammahat + expon = 1/(2*q+1) + gammahat = cgamma*( (shatq/shat0)^2 )^expon + m = gammahat * T^expon + +// calc opt lag + if(kernel == "Bartlett" | kernel == "Parzen") { + optlag = min((trunc(m),mstar)) + } + else if(kernel == "Quadratic spectral") { + optlag = min((m,mstar)) + } + +// if optlag is the optimal lag to be used, we need to add one to +// specify bandwidth in ivreg2 terms + abw = optlag + 1 + st_local("abw",strofreal(abw)) +} +end + + +*************************************** START **************************************** +********************************* livreg2.mlib CODE ********************************** +* Code from: +* livreg2 1.1.07 13july2014 +* authors cfb & mes +* compiled in Stata 9.2 +* Mata library for ranktest. +* Used by later versions of ivreg2 but NOT by ivreg29 - only by ranktest here. +* Introduced with ivreg2 v 3.1.01 and ranktest v 1.3.01. +* Imported into ivreg29 so that ivreg29 is free-standing. +* See end of file for version notes. + +version 9.2 +mata: + +// ********* struct ms_ivreg29_vcvorthog - shared by ivreg2 and ranktest ******************* // +struct ms_ivreg29_vcvorthog { + string scalar ename, Znames, touse, weight, wvarname + string scalar robust, clustvarname, clustvarname2, clustvarname3, kernel + string scalar sw, psd, ivarname, tvarname, tindexname + real scalar wf, N, bw, tdelta, dofminus + real matrix ZZ + pointer matrix e + pointer matrix Z + pointer matrix wvar +} + +// ********* s_ivreg29_vkernel - shared by ivreg2 and ranktest ******************* // +// Program checks whether kernel and bw choices are valid. +// s_ivreg29_vkernel is called from Stata. +// Arguments are the kernel name (req), bandwidth (req) and ivar name (opt). +// All 3 are strings. +// Returns results in r() macros. +// r(kernel) - name of kernel (string) +// r(bw) - bandwidth (scalar) + +void s_ivreg29_vkernel( string scalar kernel, + string scalar bwstring, + string scalar ivar + ) +{ + +// Check bandwidth + if (bwstring=="auto") { + bw=-1 + } + else { + bw=strtoreal(bwstring) + if (bw==.) { + printf("{err}bandwidth option bw() required for HAC-robust estimation\n") + exit(102) + } + if (bw<=0) { + printf("{err}invalid bandwidth in option bw() - must be real > 0\n") + exit(198) + } + } + +// Check ivar + if (bwstring=="auto" & ivar~="") { + printf("{err}Automatic bandwidth selection not available for panel data\n") + exit(198) + } + +// Check kernel +// Valid kernel list is abbrev, full name, whether special case if bw=1 +// First in list is default kernel = Barlett + vklist = ( ("", "bartlett", "0") + \ ("bar", "bartlett", "0") + \ ("bartlett", "bartlett", "0") + \ ("par", "parzen", "0") + \ ("parzen", "parzen", "0") + \ ("tru", "truncated", "1") + \ ("truncated", "truncated", "1") + \ ("thann", "tukey-hanning", "0") + \ ("tukey-hanning", "tukey-hanning", "0") + \ ("thamm", "tukey-hamming", "0") + \ ("tukey-hamming", "tukey-hamming", "0") + \ ("qua", "quadratic spectral", "1") + \ ("qs", "quadratic spectral", "1") + \ ("quadratic-spectral", "quadratic spectral", "1") + \ ("quadratic spectral", "quadratic spectral", "1") + \ ("dan", "danielle", "1") + \ ("danielle", "danielle", "1") + \ ("ten", "tent", "1") + \ ("tent", "tent", "1") + ) + kname=strltrim(strlower(kernel)) + pos = (vklist[.,1] :== kname) + +// Exit with error if not in list + if (sum(pos)==0) { + printf("{err}invalid kernel\n") + exit(198) + } + + vkname=strproper(select(vklist[.,2],pos)) + st_global("r(kernel)", vkname) + st_numscalar("r(bw)",bw) + +// Warn if kernel is type where bw=1 means no lags are used + if (bw==1 & select(vklist[.,3],pos)=="0") { + printf("{result}Note: kernel=%s", vkname) + printf("{result} and bw=1 implies zero lags used. Standard errors and\n") + printf("{result} test statistics are not autocorrelation-consistent.\n") + } +} // end of program s_ivreg29_vkernel + +// ********* m_ivreg29_omega - shared by ivreg2 and ranktest ********************* // + +// NB: ivreg2 always calls m_ivreg29_omega with e as column vector, i.e., K=1 // +// ranktest can call m_ivreg29_omega with e as matrix, i.e., K>=1 // + +real matrix m_ivreg29_omega(struct ms_ivreg29_vcvorthog scalar vcvo) +{ + if (vcvo.clustvarname~="") { + st_view(clustvar, ., vcvo.clustvarname, vcvo.touse) + info = panelsetup(clustvar, 1) + N_clust=rows(info) + if (vcvo.clustvarname2~="") { + st_view(clustvar2, ., vcvo.clustvarname2, vcvo.touse) + if (vcvo.kernel=="") { + st_view(clustvar3, ., vcvo.clustvarname3, vcvo.touse) // needed only if not panel tsset + } + } + } + + if (vcvo.kernel~="") { + st_view(t, ., st_tsrevar(vcvo.tvarname), vcvo.touse) + T=max(t)-min(t)+1 + } + + if ((vcvo.kernel=="Bartlett") | (vcvo.kernel=="Parzen") | (vcvo.kernel=="Truncated") /// + | (vcvo.kernel=="Tukey-Hanning")| (vcvo.kernel=="Tukey-Hamming")) { + window="lag" + } + else if ((vcvo.kernel=="Quadratic Spectral") | (vcvo.kernel=="Danielle") | (vcvo.kernel=="Tent")) { + window="spectral" + } + else if (vcvo.kernel~="") { +// Should never reach this point +printf("\n{error:Error: invalid kernel}\n") + exit(error(3351)) + } + + L=cols(*vcvo.Z) + K=cols(*vcvo.e) // ivreg2 always calls with K=1; ranktest may call with K>=1. + +// Covariance matrices +// shat * 1/N is same as estimated S matrix of orthog conditions + +// Block for homoskedastic and AC. dof correction if any incorporated into sigma estimates. + if ((vcvo.robust=="") & (vcvo.clustvarname=="")) { +// ZZ is already calculated as an external + ee = quadcross(*vcvo.e, vcvo.wf*(*vcvo.wvar), *vcvo.e) + sigma2=ee/(vcvo.N-vcvo.dofminus) + shat=sigma2#vcvo.ZZ + if (vcvo.kernel~="") { + if (window=="spectral") { + TAU=T/vcvo.tdelta-1 + } + else { + TAU=vcvo.bw + } + tnow=st_data(., vcvo.tindexname) + for (tau=1; tau<=TAU; tau++) { + kw = m_ivreg29_calckw(tau, vcvo.bw, vcvo.kernel) + if (kw~=0) { // zero weight possible with some kernels + // save an unnecessary loop if kw=0 + // remember, kw<0 possible with some kernels! + lstau = "L"+strofreal(tau) + tlag=st_data(., lstau+"."+vcvo.tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix +// if no lags exist, tmatrix has zero rows. + if (rows(tmatrix)>0) { +// col 1 of tmatrix has row numbers of all rows of data with this time period that have a corresponding lag +// col 2 of tmatrix has row numbers of all rows of data with lag tau that have a corresponding ob this time period. +// Should never happen that fweights or iweights make it here, +// but if they did the next line would be sqrt(wvari)*sqrt(wvari1) [with no wf since not needed for fw or iw] + wv = (*vcvo.wvar)[tmatrix[.,1]] /// + :* (*vcvo.wvar)[tmatrix[.,2]]*(vcvo.wf^2) // inner weighting matrix for quadcross + sigmahat = quadcross((*vcvo.e)[tmatrix[.,1],.], wv ,(*vcvo.e)[tmatrix[.,2],.]) /// + / (vcvo.N-vcvo.dofminus) // large dof correction + ZZhat = quadcross((*vcvo.Z)[tmatrix[.,1],.], wv, (*vcvo.Z)[tmatrix[.,2],.]) + ghat = sigmahat#ZZhat + shat=shat+kw*(ghat+ghat') + } + } // end non-zero kernel weight block + } // end tau loop + } // end kernel code +// Note large dof correction (if there is one) has already been incorporated + shat=shat/vcvo.N + } // end homoskedastic, AC code + +// Block for robust HC and HAC but not Stock-Watson and single clustering. +// Need to enter for double-clustering if one cluster is time. + if ( (vcvo.robust~="") & (vcvo.sw=="") & ((vcvo.clustvarname=="") /// + | ((vcvo.clustvarname2~="") & (vcvo.kernel~=""))) ) { + if (K==1) { // simple/fast where e is a column vector + if ((vcvo.weight=="fweight") | (vcvo.weight=="iweight")) { + wv = (*vcvo.e:^2) :* *vcvo.wvar + } + else { + wv = (*vcvo.e :* *vcvo.wvar * vcvo.wf):^2 // wf needed for aweights and pweights + } + shat=quadcross(*vcvo.Z, wv, *vcvo.Z) // basic Eicker-Huber-White-sandwich-robust vce + } + else { // e is a matrix so must loop + shat=J(L*K,L*K,0) + for (i=1; i<=rows(*vcvo.e); i++) { + eZi=((*vcvo.e)[i,.])#((*vcvo.Z)[i,.]) + if ((vcvo.weight=="fweight") | (vcvo.weight=="iweight")) { +// wvar is a column vector. wf not needed for fw and iw (=1 by dfn so redundant). + shat=shat+quadcross(eZi,eZi)*((*vcvo.wvar)[i]) + } + else { + shat=shat+quadcross(eZi,eZi)*((*vcvo.wvar)[i] * vcvo.wf)^2 // **** ADDED *vcvo.wf + } + } + } + if (vcvo.kernel~="") { +// Spectral windows require looping through all T-1 autocovariances + if (window=="spectral") { + TAU=T/vcvo.tdelta-1 + } + else { + TAU=vcvo.bw + } + tnow=st_data(., vcvo.tindexname) + for (tau=1; tau<=TAU; tau++) { + kw = m_ivreg29_calckw(tau, vcvo.bw, vcvo.kernel) + if (kw~=0) { // zero weight possible with some kernels + // save an unnecessary loop if kw=0 + // remember, kw<0 possible with some kernels! + lstau = "L"+strofreal(tau) + tlag=st_data(., lstau+"."+vcvo.tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix + +// col 1 of tmatrix has row numbers of all rows of data with this time period that have a corresponding lag +// col 2 of tmatrix has row numbers of all rows of data with lag tau that have a corresponding ob this time period. +// if no lags exist, tmatrix has zero rows + if (rows(tmatrix)>0) { + if (K==1) { // simple/fast where e is a column vector +// wv is inner weighting matrix for quadcross + wv = (*vcvo.e)[tmatrix[.,1]] :* (*vcvo.e)[tmatrix[.,2]] /// + :* (*vcvo.wvar)[tmatrix[.,1]] :* (*vcvo.wvar)[tmatrix[.,2]] * (vcvo.wf^2) + ghat = quadcross((*vcvo.Z)[tmatrix[.,1],.], wv, (*vcvo.Z)[tmatrix[.,2],.]) + } + else { // e is a matrix so must loop + ghat=J(L*K,L*K,0) + for (i=1; i<=rows(tmatrix); i++) { + wvari =(*vcvo.wvar)[tmatrix[i,1]] + wvari1=(*vcvo.wvar)[tmatrix[i,2]] + ei =(*vcvo.e)[tmatrix[i,1],.] + ei1 =(*vcvo.e)[tmatrix[i,2],.] + Zi =(*vcvo.Z)[tmatrix[i,1],.] + Zi1 =(*vcvo.Z)[tmatrix[i,2],.] + eZi =ei#Zi + eZi1=ei1#Zi1 +// Should never happen that fweights or iweights make it here, but if they did +// the next line would be ghat=ghat+eZi'*eZi1*sqrt(wvari)*sqrt(wvari1) +// [without *vcvo.wf since wf=1 for fw and iw] + ghat=ghat+quadcross(eZi,eZi1)*wvari*wvari1 * (vcvo.wf^2) // ADDED * (vcvo.wf^2) + } + } + shat=shat+kw*(ghat+ghat') + } // end non-zero-obs accumulation block + } // end non-zero kernel weight block + } // end tau loop + } // end kernel code +// Incorporate large dof correction if there is one + shat=shat/(vcvo.N-vcvo.dofminus) + } // end HC/HAC code + + if (vcvo.clustvarname~="") { +// Block for cluster-robust +// 2-level clustering: S = S(level 1) + S(level 2) - S(level 3 = intersection of levels 1 & 2) +// Prepare shat3 if 2-level clustering + if (vcvo.clustvarname2~="") { + if (vcvo.kernel~="") { // second cluster variable is time + // shat3 was already calculated above as shat + shat3=shat*(vcvo.N-vcvo.dofminus) + } + else { // calculate shat3 + // data were sorted on clustvar3-clustvar1 so + // clustvar3 is nested in clustvar1 and Mata panel functions + // work for both. + info3 = panelsetup(clustvar3, 1) + if (rows(info3)==rows(*vcvo.e)) { // intersection of levels 1 & 2 are all single obs + // so no need to loop through row by row + if (K==1) { // simple/fast where e is a column vector + wv = (*vcvo.e :* *vcvo.wvar * vcvo.wf):^2 + shat3=quadcross(*vcvo.Z, wv, *vcvo.Z) // basic Eicker-Huber-White-sandwich-robust vce + } + else { // e is a matrix so must loop + shat3=J(L*K,L*K,0) + for (i=1; i<=rows(*vcvo.e); i++) { + eZi=((*vcvo.e)[i,.])#((*vcvo.Z)[i,.]) + shat3=shat3+quadcross(eZi,eZi)*((*vcvo.wvar)[i] * vcvo.wf)^2 // **** ADDED *vcvo.wf + } + } + } + else { // intersection of levels 1 & 2 includes some groups of obs + N_clust3=rows(info3) + shat3=J(L*K,L*K,0) + for (i=1; i<=N_clust3; i++) { + esub=panelsubmatrix(*vcvo.e,i,info3) + Zsub=panelsubmatrix(*vcvo.Z,i,info3) + wsub=panelsubmatrix(*vcvo.wvar,i,info3) + wv = esub :* wsub * vcvo.wf + if (K==1) { // simple/fast where e is a column vector + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { + eZ = J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.] * vcvo.wf // **** ADDED *vcvo.wf + } + } + shat3=shat3+quadcross(eZ,eZ) + } + } + } + } + +// 1st level of clustering, no kernel-robust +// Entered unless 1-level clustering and kernel-robust + if (!((vcvo.kernel~="") & (vcvo.clustvarname2==""))) { + shat=J(L*K,L*K,0) + for (i=1; i<=N_clust; i++) { // loop through clusters, adding Z'ee'Z + // for indiv cluster in each loop + esub=panelsubmatrix(*vcvo.e,i,info) + Zsub=panelsubmatrix(*vcvo.Z,i,info) + wsub=panelsubmatrix(*vcvo.wvar,i,info) + if (K==1) { // simple/fast if e is a column vector + wv = esub :* wsub * vcvo.wf + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { + eZ=J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.]*vcvo.wf // **** ADDED *vcvo.wf + } + } + shat=shat+quadcross(eZ,eZ) + } // end loop through clusters + } + +// 2-level clustering, no kernel-robust + if ((vcvo.clustvarname2~="") & (vcvo.kernel=="")) { + imax=max(clustvar2) // clustvar2 is numbered 1..N_clust2 + shat2=J(L*K,L*K,0) + for (i=1; i<=imax; i++) { // loop through clusters, adding Z'ee'Z + // for indiv cluster in each loop + svar=(clustvar2:==i) // mimics panelsubmatrix but doesn't require sorted data + esub=select(*vcvo.e,svar) // it is, however, noticably slower. + Zsub=select(*vcvo.Z,svar) + wsub=select(*vcvo.wvar,svar) + if (K==1) { // simple/fast if e is a column vector + wv = esub :* wsub * vcvo.wf + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { + eZ=J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.]*vcvo.wf // **** ADDED *vcvo.wf + } + } + shat2=shat2+quadcross(eZ,eZ) + } + } + +// 1st level of cluster, kernel-robust OR +// 2-level clustering, kernel-robust and time is 2nd cluster variable + if (vcvo.kernel~="") { + shat2=J(L*K,L*K,0) +// First, standard cluster-robust, i.e., no lags. + i=min(t) + while (i<=max(t)) { // loop through all T clusters, adding Z'ee'Z + // for indiv cluster in each loop + eZ=J(1,L*K,0) + svar=(t:==i) // select obs with t=i + if (colsum(svar)>0) { // there are obs with t=i + esub=select(*vcvo.e,svar) + Zsub=select(*vcvo.Z,svar) + wsub=select(*vcvo.wvar,svar) + if (K==1) { // simple/fast if e is a column vector + wv = esub :* wsub * vcvo.wf + eZ = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + } + else { +// MISSING LINE IS NEXT + eZ=J(1,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZ=eZ+(esub[j,.]#Zsub[j,.])*wsub[j,.]*vcvo.wf // **** ADDED *vcvo.wf + } + } + shat2=shat2+quadcross(eZ,eZ) + } + i=i+vcvo.tdelta + } // end i loop through all T clusters + +// Spectral windows require looping through all T-1 autocovariances + if (window=="spectral") { + TAU=T/vcvo.tdelta-1 + } + else { + TAU=vcvo.bw + } + + for (tau=1; tau<=TAU; tau++) { + kw = m_ivreg29_calckw(tau, vcvo.bw, vcvo.kernel) // zero weight possible with some kernels + // save an unnecessary loop if kw=0 + // remember, kw<0 possible with some kernels! + if (kw~=0) { + i=min(t)+tau*vcvo.tdelta // Loop through all possible ts (time clusters) + while (i<=max(t)) { // Start at earliest possible t + svar=t[.,]:==i // svar is current, svar1 is tau-th lag + svar1=t[.,]:==(i-tau*vcvo.tdelta) // tau*vcvo.tdelta is usually just tau + if ((colsum(svar)>0) // there are current & lagged obs + & (colsum(svar1)>0)) { + wv = select((*vcvo.e),svar) :* select((*vcvo.wvar),svar) * vcvo.wf + wv1 = select((*vcvo.e),svar1) :* select((*vcvo.wvar),svar1) * vcvo.wf + Zsub =select((*vcvo.Z),svar) + Zsub1=select((*vcvo.Z),svar1) + if (K==1) { // simple/fast, e is column vector + eZsub = quadcross(1, wv, Zsub) // equivalent to colsum(wv :* Zsub) + eZsub1= quadcross(1, wv1, Zsub1) // equivalent to colsum(wv :* Zsub) + } + else { + eZsub=J(1,L*K,0) + for (j=1; j<=rows(Zsub); j++) { + wvj =wv[j,.] + Zj =Zsub[j,.] + eZsub=eZsub+(wvj#Zj) + } + eZsub1=J(1,L*K,0) + for (j=1; j<=rows(Zsub1); j++) { + wv1j =wv1[j,.] + Z1j =Zsub1[j,.] + eZsub1=eZsub1+(wv1j#Z1j) + } + } + ghat=quadcross(eZsub,eZsub1) + shat2=shat2+kw*(ghat+ghat') + } + i=i+vcvo.tdelta + } + } // end non-zero kernel weight block + } // end tau loop + +// If 1-level clustering, shat2 just calculated above is actually the desired shat + if (vcvo.clustvarname2=="") { + shat=shat2 + } + } + +// 2-level clustering, completion +// Cameron-Gelbach-Miller/Thompson method: +// Add 2 cluster variance matrices and subtract 3rd + if (vcvo.clustvarname2~="") { + shat = shat+shat2-shat3 + } + +// Note no dof correction required for cluster-robust + shat=shat/vcvo.N + } // end cluster-robust code + + if (vcvo.sw~="") { +// Stock-Watson adjustment. Calculate Bhat in their equation (6). Also need T=panel length. +// They define for balanced panels. Since T is not constant for unbalanced panels, need +// to incorporate panel-varying 1/T, 1/(T-1) and 1/(T-2) as weights in summation. + + st_view(ivar, ., st_tsrevar(vcvo.ivarname), vcvo.touse) + info_ivar = panelsetup(ivar, 1) + + shat=J(L*K,L*K,0) + bhat=J(L*K,L*K,0) + N_panels=0 + for (i=1; i<=rows(info_ivar); i++) { + esub=panelsubmatrix(*vcvo.e,i,info_ivar) + Zsub=panelsubmatrix(*vcvo.Z,i,info_ivar) + wsub=panelsubmatrix(*vcvo.wvar,i,info_ivar) + Tsub=rows(esub) + if (Tsub>2) { // SW cov estimator defined only for T>2 + N_panels=N_panels+1 + sigmahatsub=J(K,K,0) + ZZsub=J(L*K,L*K,0) + shatsub=J(L*K,L*K,0) + for (j=1; j<=rows(esub); j++) { + eZi=esub[j,1]#Zsub[j,.] + if ((vcvo.weight=="fweight") | (vcvo.weight=="iweight")) { + shatsub=shatsub+quadcross(eZi,eZi)*wsub[j]*vcvo.wf + sigmahatsub=sigmahatsub + quadcross(esub[j,1],esub[j,1])*wsub[j]*vcvo.wf + ZZsub=ZZsub+quadcross(Zsub[j,.],Zsub[j,.])*wsub[j]*vcvo.wf + } + else { + shatsub=shatsub+quadcross(eZi,eZi)*((wsub[j]*vcvo.wf)^2) + sigmahatsub=sigmahatsub + quadcross(esub[j,1],esub[j,1])*((wsub[j]*vcvo.wf)^2) + ZZsub=ZZsub+quadcross(Zsub[j,.],Zsub[j,.])*((wsub[j]*vcvo.wf)^2) + } + } // end loop through j obs of panel i + shat=shat + shatsub*(Tsub-1)/(Tsub-2) + bhat=bhat + ZZsub/Tsub#sigmahatsub/(Tsub-1)/(Tsub-2) + } + } // end loop through i panels + +// Note that Stock-Watson incorporate an N-n-k degrees of freedom correction in their eqn 4 +// for what we call shat. We use only an N-n degrees of freedom correction, i.e., we ignore +// the k regressors. This is because this is an estimate of S, the VCV of orthogonality conditions, +// independently of its use to obtain an estimate of the variance of beta. Makes no diff aysmptotically. +// Ignore dofminus correction since this is explicitly handled here. +// Use number of valid panels in denominator (SW cov estimator defined only for panels with T>2). + shat=shat/(vcvo.N-N_panels) + bhat=bhat/N_panels + shat=shat-bhat + } // end Stock-Watson block + + _makesymmetric(shat) + +// shat may not be positive-definite. Use spectral decomposition to obtain an invertable version. +// Extract Eigenvector and Eigenvalues, replace EVs, and reassemble shat. +// psda option: Stock-Watson 2008 Econometrica, Remark 8, say replace neg EVs with abs(EVs). +// psd0 option: Politis (2007) says replace neg EVs with zeros. + if (vcvo.psd~="") { + symeigensystem(shat,Evec,Eval) + if (vcvo.psd=="psda") { + Eval = abs(Eval) + } + else { + Eval = Eval + (abs(Eval) - Eval)/2 + } + shat = Evec*diag(Eval)*Evec' + _makesymmetric(shat) + } + + return(shat) + +} // end of program m_ivreg29_omega + +// *********************************************************************** // +// ********* m_ivreg29_calckw - shared by ivreg2 and ranktest ********************* // +// *********************************************************************** // + +real scalar m_ivreg29_calckw( real scalar tau, + real scalar bw, + string scalar kernel) + { + karg = tau / bw + if (kernel=="Truncated") { + kw=1 + } + if (kernel=="Bartlett") { + kw=(1-karg) + } + if (kernel=="Parzen") { + if (karg <= 0.5) { + kw = 1-6*karg^2+6*karg^3 + } + else { + kw = 2*(1-karg)^3 + } + } + if (kernel=="Tukey-Hanning") { + kw=0.5+0.5*cos(pi()*karg) + } + if (kernel=="Tukey-Hamming") { + kw=0.54+0.46*cos(pi()*karg) + } + if (kernel=="Tent") { + kw=2*(1-cos(tau*karg)) / (karg^2) + } + if (kernel=="Danielle") { + kw=sin(pi()*karg) / (pi()*karg) + } + if (kernel=="Quadratic Spectral") { + kw=25/(12*pi()^2*karg^2) /* + */ * ( sin(6*pi()*karg/5)/(6*pi()*karg/5) /* + */ - cos(6*pi()*karg/5) ) + } + return(kw) + } // end kw + +// *********************************************************************** // +// ********* END CODE SHARED BY ivreg2 AND ranktest ******************** // +// *********************************************************************** // + + +end + +****************************************** END *************************************** +*********************************** livreg2.mlib CODE ******************************** + +***************************************** START ************************************** +*********************************** ranktest.ado CODE ******************************** +* Code from: +* ranktest 1.3.04 24aug2014 +* author mes, based on code by fk +* Imported into ivreg29 so that ivreg29 is free-standing. +* See end of file for version notes. + +program define ivreg29_ranktest, rclass sortpreserve + version 9.2 + local lversion 01.3.04 + + if substr("`1'",1,1)== "," { + if "`2'"=="version" { + di in ye "`lversion'" + return local version `lversion' + exit + } + else { +di as err "invalid syntax" + exit 198 + } + } + +* If varlist 1 or varlist 2 have a single element, parentheses optional + + if substr("`1'",1,1)=="(" { + GetVarlist `0' + local y `s(varlist)' + local K : word count `y' + local 0 `"`s(rest)'"' + sret clear + } + else { + local y `1' + local K 1 + mac shift 1 + local 0 `"`*'"' + } + + if substr("`1'",1,1)=="(" { + GetVarlist `0' + local z `s(varlist)' + local L : word count `z' + local 0 `"`s(rest)'"' + sret clear + } + else { + local z `1' + local K 1 + mac shift 1 +* Need to reinsert comma before options (if any) for -syntax- command to work + local 0 `", `*'"' + } + +* Option version ignored here if varlists were provided + syntax [if] [in] [aw fw pw iw/] [, partial(varlist ts) fwl(varlist ts) /* + */ NOConstant wald ALLrank NULLrank FULLrank ROBust cluster(varlist) /* + */ BW(string) kernel(string) Tvar(varname) Ivar(varname) sw psd version /* + */ dofminus(integer 0) ] + + local partial "`partial' `fwl'" + + if "`noconstant'"=="" { + tempvar one + gen byte `one' = 1 + local partial "`partial' `one'" + } + + if "`wald'"~="" { + local LMWald "Wald" + } + else { + local LMWald "LM" + } + + local optct : word count `allrank' `nullrank' `fullrank' + if `optct' > 1 { +di as err "Incompatible options: `allrank' `nullrank' `fullrank'" + error 198 + } + else if `optct' == 0 { +* Default + local allrank "allrank" + } + +* Note that by tsrevar-ing here, subsequent disruption to the sort doesn't matter +* for TS operators. + tsrevar `y' + local vl1 `r(varlist)' + tsrevar `z' + local vl2 `r(varlist)' + tsrevar `partial' + local partial `r(varlist)' + + foreach vn of varlist `vl1' { + tempvar tv + qui gen double `tv' = . + local tempvl1 "`tempvl1' `tv'" + } + foreach vn of varlist `vl2' { + tempvar tv + qui gen double `tv' = . + local tempvl2 "`tempvl2' `tv'" + } + + marksample touse + markout `touse' `vl1' `vl2' `partial' `cluster', strok + +* Stock-Watson and cluster imply robust. + if "`sw'`cluster'" ~= "" { + local robust "robust" + } + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + gen double `wvar'=`exp' + } + if "`fsqrt(wf)*(wvar^0.5):*'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "fweight" & "`sw'" != "" { + di in red "fweights currently not supported with -sw- option" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`bw'" !="" { + di in red "iweights not allowed with robust, cluster, AC or HAC" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + + +* Every time a weight is used, must multiply by scalar wf ("weight factor") +* wf=1 for no weights, fw and iw, wf = scalar that normalizes sum to be N if aw or pw + sum `wvar' if `touse' `wtexp', meanonly +* Weight statement + if "`weight'" ~= "" { +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + if "`weight'"=="" | "`weight'"=="fweight" | "`weight'"=="iweight" { +* If weight is "", weight var must be column of ones and N is number of rows. +* With fw and iw, effective number of observations is sum of weight variable. + local wf=1 + local N=r(sum_w) + } + else if "`weight'"=="aweight" | "`weight'"=="pweight" { +* With aw and pw, N is number of obs, unadjusted. + local wf=r(N)/r(sum_w) + local N=r(N) + } + else { +* Should never reach here +di as err "ivreg29_ranktest error - misspecified weights" + exit 198 + } + +* HAC estimation. +* If bw is omitted, default `bw' is empty string. +* If bw or kernel supplied, check/set `kernel'. +* Macro `kernel' is also used for indicating HAC in use. + if "`bw'" == "" & "`kernel'" == "" { + local bw=0 + } + else { +* Need tvar for markout with time-series stuff +* Data must be tsset for time-series operators in code to work +* User-supplied tvar checked if consistent with tsset + capture tsset + if "`r(timevar)'" == "" { +di as err "must tsset data and specify timevar" + exit 5 + } + if "`tvar'" == "" { + local tvar "`r(timevar)'" + } + else if "`tvar'"!="`r(timevar)'" { +di as err "invalid tvar() option - data already -tsset-" + exit 5 + } +* If no panel data, ivar will still be empty + if "`ivar'" == "" { + local ivar "`r(panelvar)'" + } + else if "`ivar'"!="`r(panelvar)'" { +di as err "invalid ivar() option - data already -tsset-" + exit 5 + } + local tdelta `r(tdelta)' + tsreport if `touse', panel + if `r(N_gaps)' != 0 { +di in gr "Warning: time variable " in ye "`tvar'" in gr " has " /* + */ in ye "`r(N_gaps)'" in gr " gap(s) in relevant range" + } + +* Check it's a valid kernel and replace with unabbreviated kernel name; check bw. +* Automatic kernel selection allowed by ivreg2 but not ranktest so must trap. +* s_ivreg29_vkernel is in livreg2 mlib. + if "`bw'"=="auto" { +di as err "invalid bandwidth in option bw() - must be real > 0" + exit 198 + } + mata: s_ivreg29_vkernel("`kernel'", "`bw'", "`ivar'") + local kernel `r(kernel)' + local bw = `r(bw)' + } + +* tdelta missing if version 9 or if not tsset + if "`tdelta'"=="" { + local tdelta=1 + } + + if "`sw'"~="" { + capture xtset + if "`ivar'" == "" { + local ivar "`r(panelvar)'" + } + else if "`ivar'"!="`r(panelvar)'" { +di as err "invalid ivar() option - data already tsset or xtset" + exit 5 + } +* Exit with error if ivar is neither supplied nor tsset nor xtset + if "`ivar'"=="" { +di as err "Must -xtset- or -tsset- data or specify -ivar- with -sw- option" + exit 198 + } + qui describe, short varlist + local sortlist "`r(sortlist)'" + tokenize `sortlist' + if "`ivar'"~="`1'" { +di as err "Error - dataset must be sorted on panel var with -sw- option" + exit 198 + } + } + +* Create variable used for getting lags etc. in Mata + tempvar tindex + qui gen `tindex'=1 if `touse' + qui replace `tindex'=sum(`tindex') if `touse' + +********** CLUSTER SETUP ********************************************** + +* Mata code requires data are sorted on (1) the first var cluster if there +* is only one cluster var; (2) on the 3rd and then 1st if two-way clustering, +* unless (3) two-way clustering is combined with kernel option, in which case +* the data are tsset and sorted on panel id (first cluster variable) and time +* id (second cluster variable). +* Second cluster var is optional and requires an identifier numbered 1..N_clust2, +* unless combined with kernel option, in which case it's the time variable. +* Third cluster var is the intersection of 1 and 2, unless combined with kernel +* opt, in which case it's unnecessary. +* Sorting on "cluster3 cluster1" means that in Mata, panelsetup works for +* both, since cluster1 nests cluster3. +* Note that it is possible to cluster on time but not panel, in which case +* cluster1 is time, cluster2 is empty and data are sorted on panel-time. +* Note also that if no kernel-robust, sorting will disrupt any tsset-ing, +* but data are tsrevar-ed earlier to avoid any problems. + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + tokenize `cluster' + local cluster1 "`1'" + local cluster2 "`2'" + if "`kernel'"~="" { +* kernel requires either that cluster1 is time var and cluster2 is empty +* or that cluster1 is panel var and cluster2 is time var. +* Either way, data must be tsset and sorted for panel data. + if "`cluster2'"~="" { +* Allow backwards order + if "`cluster1'"=="`tvar'" & "`cluster2'"=="`ivar'" { + local cluster1 "`2'" + local cluster2 "`1'" + } + if "`cluster1'"~="`ivar'" | "`cluster2'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset panel & time vars." +di as err " tsset panel var=`ivar'; tsset time var=`tvar'; cluster vars=`cluster1',`cluster2'" + exit 198 + } + } + else { + if "`cluster1'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset time variable." +di as err " tsset time var=`tvar'; cluster var=`cluster1'" + exit 198 + } + } + } +* Simple way to get quick count of 1st cluster variable without disrupting sort +* clusterid1 is numbered 1.._Nclust1. + tempvar clusterid1 + qui egen `clusterid1'=group(`cluster1') if `touse' + sum `clusterid1' if `touse', meanonly + if "`cluster2'"=="" { + local N_clust=r(max) + local N_clust1=. + local N_clust2=. + if "`kernel'"=="" { +* Single level of clustering and no kernel-robust, so sort on single cluster var. +* kernel-robust already sorted via tsset. + sort `cluster1' + } + } + else { + local N_clust1=r(max) + if "`kernel'"=="" { + tempvar clusterid2 clusterid3 +* New cluster id vars are numbered 1..N_clust2 and 1..N_clust3 + qui egen `clusterid2'=group(`cluster2') if `touse' + qui egen `clusterid3'=group(`cluster1' `cluster2') if `touse' +* Two levels of clustering and no kernel-robust, so sort on cluster3/nested in/cluster1 +* kernel-robust already sorted via tsset. + sort `clusterid3' `cluster1' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) + } + else { +* Need to create this only to count the number of clusters + tempvar clusterid2 + qui egen `clusterid2'=group(`cluster2') if `touse' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) +* Now replace with original variable + local clusterid2 `cluster2' + } + local N_clust=min(`N_clust1',`N_clust2') + } + } + +************************************************************************************************ + +* Note that bw is passed as a value, not as a string + mata: ivreg29_rkstat( "`vl1'", /* + */ "`vl2'", /* + */ "`partial'", /* + */ "`wvar'", /* + */ "`weight'", /* + */ `wf', /* + */ `N', /* + */ "`touse'", /* + */ "`LMWald'", /* + */ "`allrank'", /* + */ "`nullrank'", /* + */ "`fullrank'", /* + */ "`robust'", /* + */ "`clusterid1'", /* + */ "`clusterid2'", /* + */ "`clusterid3'", /* + */ `bw', /* + */ "`tvar'", /* + */ "`ivar'", /* + */ "`tindex'", /* + */ `tdelta', /* + */ `dofminus', /* + */ "`kernel'", /* + */ "`sw'", /* + */ "`psd'", /* + */ "`tempvl1'", /* + */ "`tempvl2'") + + tempname rkmatrix chi2 df df_r p rank ccorr eval + mat `rkmatrix'=r(rkmatrix) + mat `ccorr'=r(ccorr) + mat `eval'=r(eval) + mat colnames `rkmatrix' = "rk" "df" "p" "rank" "eval" "ccorr" + +di +di "Kleibergen-Paap rk `LMWald' test of rank of matrix" + if "`robust'"~="" & "`kernel'"~= "" & "`cluster'"=="" { +di " Test statistic robust to heteroskedasticity and autocorrelation" +di " Kernel: `kernel' Bandwidth: `bw'" + } + else if "`kernel'"~="" & "`cluster'"=="" { +di " Test statistic robust to autocorrelation" +di " Kernel: `kernel' Bandwidth: `bw'" + } + else if "`cluster'"~="" { +di " Test statistic robust to heteroskedasticity and clustering on `cluster'" + if "`kernel'"~="" { +di " and kernel-robust to common correlated disturbances" +di " Kernel: `kernel' Bandwidth: `bw'" + } + } + else if "`robust'"~="" { +di " Test statistic robust to heteroskedasticity" + } + else if "`LMWald'"=="LM" { +di " Test assumes homoskedasticity (Anderson canonical correlations test)" + } + else { +di " Test assumes homoskedasticity (Cragg-Donald test)" + } + + local numtests = rowsof(`rkmatrix') + forvalues i=1(1)`numtests' { +di "Test of rank=" %3.0f `rkmatrix'[`i',4] " rk=" %8.2f `rkmatrix'[`i',1] /* + */ " Chi-sq(" %3.0f `rkmatrix'[`i',2] ") pvalue=" %8.6f `rkmatrix'[`i',3] + } + scalar `chi2' = `rkmatrix'[`numtests',1] + scalar `p' = `rkmatrix'[`numtests',3] + scalar `df' = `rkmatrix'[`numtests',2] + scalar `rank' = `rkmatrix'[`numtests',4] + local N `r(N)' + return scalar df = `df' + return scalar chi2 = `chi2' + return scalar p = `p' + return scalar rank = `rank' + if "`cluster'"~="" { + return scalar N_clust = `N_clust' + } + if "`cluster2'"~="" { + return scalar N_clust1 = `N_clust1' + return scalar N_clust2 = `N_clust2' + } + return scalar N = `N' + return matrix rkmatrix `rkmatrix' + return matrix ccorr `ccorr' + return matrix eval `eval' + + tempname S V Omega + if `K' > 1 { + foreach en of local y { +* Remove "." from equation name + local en1 : subinstr local en "." "_", all + foreach vn of local z { + local cn "`cn' `en1':`vn'" + } + } + } + else { + foreach vn of local z { + local cn "`cn' `vn'" + } + } + mat `V'=r(V) + matrix colnames `V' = `cn' + matrix rownames `V' = `cn' + return matrix V `V' + mat `S'=r(S) + matrix colnames `S' = `cn' + matrix rownames `S' = `cn' + return matrix S `S' +end + +* Adopted from -canon- +program define GetVarlist, sclass + sret clear + gettoken open 0 : 0, parse("(") + if `"`open'"' != "(" { + error 198 + } + gettoken next 0 : 0, parse(")") + while `"`next'"' != ")" { + if `"`next'"'=="" { + error 198 + } + local list `list'`next' + gettoken next 0 : 0, parse(")") + } + sret local rest `"`0'"' + tokenize `list' + local 0 `*' + sret local varlist "`0'" +end + + +******************************************************************************* +*************************** BEGIN MATA CODE *********************************** +******************************************************************************* + + + +version 9.2 +mata: + +void ivreg29_rkstat( string scalar vl1, + string scalar vl2, + string scalar partial, + string scalar wvarname, + string scalar weight, + scalar wf, + scalar N, + string scalar touse, + string scalar LMWald, + string scalar allrank, + string scalar nullrank, + string scalar fullrank, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + bw, + string scalar tvarname, + string scalar ivarname, + string scalar tindexname, + tdelta, + dofminus, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar tempvl1, + string scalar tempvl2) +{ + +// tempx, tempy and tempz are the Stata names of temporary variables that will be changed by rkstat + if (partial~="") { + tempx=tokens(partial) + } + tempy=tokens(tempvl1) + tempz=tokens(tempvl2) + + st_view(y=.,.,tokens(vl1),touse) + st_view(z=.,.,tokens(vl2),touse) + st_view(yhat=.,.,tempy,touse) + st_view(zhat=.,.,tempz,touse) + st_view(mtouse=.,.,tokens(touse),touse) + st_view(wvar=.,.,tokens(wvarname),touse) + noweight=(st_vartype(wvarname)=="byte") + +// Note that we now use wf*wvar instead of wvar +// because wvar is raw weighting variable and +// wf*wvar normalizes so that sum(wf*wvar)=N. + +// Partial out the X variables +// Note that this is entered if there is a constant, +// i.e., variables are centered + if (partial~="") { + st_view(x=.,.,tempx,touse) + xx = quadcross(x, wf*wvar, x) + xy = quadcross(x, wf*wvar, y) + xz = quadcross(x, wf*wvar, z) + + by = invsym(xx)*xy + bz = invsym(xx)*xz + + yhat[.,.] = y-x*by + zhat[.,.] = z-x*bz + } + else { + yhat[.,.] = y + zhat[.,.] = z + } + K=cols(y) + L=cols(z) + + zhzh = quadcross(zhat, wf*wvar, zhat) + zhyh = quadcross(zhat, wf*wvar, yhat) + yhyh = quadcross(yhat, wf*wvar, yhat) + + pihat = invsym(zhzh)*zhyh +// rzhat is F in paper (p. 103) +// iryhat is G in paper (p. 103) + ryhat=cholesky(yhyh) + rzhat=cholesky(zhzh) + iryhat=luinv(ryhat') + irzhat=luinv(rzhat') + that=rzhat'*pihat*iryhat + +// cc is canonical correlations. Squared cc is eigenvalues. + fullsvd(that, ut, cc, vt) + vt=vt' + vecth=vec(that) + ev = cc:^2 +// S matrix in paper (p. 100). Not used in code below. +// smat=fullsdiag(cc, rows(that)-cols(that)) + + if (abs(1-cc[1,1])<1e-10) { +printf("\n{text:Warning: collinearities detected between (varlist1) and (varlist2)}\n") + } + if ((missing(ryhat)>0) | (missing(iryhat)>0) | (missing(rzhat)>0) | (missing(irzhat)>0)) { +printf("\n{error:Error: non-positive-definite matrix. May be caused by collinearities.}\n") + exit(error(3351)) + } + +// If Wald, yhat is residuals + if (LMWald=="Wald") { + yhat[.,.]=yhat-zhat*pihat + yhyh = quadcross(yhat, wvar, yhat) + } + +// Covariance matrices +// vhat is W in paper (eqn below equation 17, p. 103) +// shat is V in paper (eqn below eqn 15, p. 103) + +// ************************************************************************************* // +// shat calculated using struct and programs m_ivreg29_omega, m_ivreg29_calckw shared with ivreg2 // + + struct ms_ivreg29_vcvorthog scalar vcvo + + + vcvo.ename = tempy // ivreg2 has = ename // + vcvo.Znames = tempz // ivreg2 has = Znames // + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.dofminus = dofminus + vcvo.ZZ = zhzh // ivreg2 has = st_matrix(ZZmatrix) // + + vcvo.e = &yhat // ivreg2 has = &e // + vcvo.Z = &zhat // ivreg2 has = &Z // + vcvo.wvar = &wvar + + shat=m_ivreg29_omega(vcvo) + +// *************************************************************************************** + +// Finally, calcluate vhat + if ((LMWald=="LM") & (kernel=="") & (robust=="") & (clustvarname=="")) { +// Homoskedastic, iid LM case means vcv is identity matrix +// Generates canonical correlation stats. Default. + vhat=I(L*K,L*K)/N + } + else { + vhat=(iryhat'#irzhat')*shat*(iryhat'#irzhat')' * N + _makesymmetric(vhat) + } + +// ready to start collecting test stats + if (allrank~="") { + firstrank=1 + lastrank=min((K,L)) + } + else if (nullrank~="") { + firstrank=1 + lastrank=1 + } + else if (fullrank~="") { + firstrank=min((K,L)) + lastrank=min((K,L)) + } + else { +// should never reach this point +printf("ivreg29_ranktest error\n") + exit + } + + rkmatrix=J(lastrank-firstrank+1,6,.) + for (i=firstrank; i<=lastrank; i++) { + + if (i>1) { + u12=ut[(1::i-1),(i..L)] + v12=vt[(1::i-1),(i..K)] + } + u22=ut[(i::L),(i..L)] + v22=vt[(i::K),(i..K)] + + symeigensystem(u22*u22', evec, eval) + u22v=evec + u22d=diag(eval) + u22h=u22v*(u22d:^0.5)*u22v' + + symeigensystem(v22*v22', evec, eval) + v22v=evec + v22d=diag(eval) + v22h=v22v*(v22d:^0.5)*v22v' + + if (i>1) { + aq=(u12 \ u22)*luinv(u22)*u22h + bq=v22h*luinv(v22')*(v12 \ v22)' + } + else { + aq=u22*luinv(u22)*u22h + bq=v22h*luinv(v22')*v22' + } + +// lab is lambda_q in paper (eqn below equation 21, p. 104) +// vlab is omega_q in paper (eqn 19 in paper, p. 104) + lab=(bq#aq')*vecth + vlab=(bq#aq')*vhat*(bq#aq')' + +// Symmetrize if numerical inaccuracy means it isn't + _makesymmetric(vlab) + vlabinv=invsym(vlab) +// rk stat Assumption 2: vlab (omega_q in paper) is nonsingular. Detected by a zero on the diagonal, +// since when returning a generalized inverse, Stata/Mata choose the generalized inverse that +// sets entire column(s)/row(s) to zeros. +// Save df and rank even if test stat not available. + df=(L-i+1)*(K-i+1) + rkmatrix[i-firstrank+1,2]=df + rkmatrix[i-firstrank+1,4]=i-1 + if (diag0cnt(vlabinv)>0) { +printf("\n{text:Warning: covariance matrix omega_%f}", i-1) +printf("{text: not full rank; test of rank %f}", i-1) +printf("{text: unavailable}\n") + } +// Note not multiplying by N - already incorporated in vhat. + else { + rk=lab'*vlabinv*lab + pvalue=chi2tail(df, rk) + rkmatrix[i-firstrank+1,1]=rk + rkmatrix[i-firstrank+1,3]=pvalue + } +// end of test loop + } + +// insert squared (=eigenvalues if canon corr) and unsquared canon correlations + for (i=firstrank; i<=lastrank; i++) { + rkmatrix[i-firstrank+1,6]=cc[i-firstrank+1,1] + rkmatrix[i-firstrank+1,5]=ev[i-firstrank+1,1] + } + st_matrix("r(rkmatrix)", rkmatrix) + st_matrix("r(ccorr)", cc') + st_matrix("r(eval)",ev') +// Save V matrix as in paper, without factor of 1/N + vhat=N*vhat*wf + st_matrix("r(V)", vhat) +// Save S matrix as in ivreg2, with factor of 1/N + st_matrix("r(S)", shat) + st_numscalar("r(N)", N) + if (clustvarname~="") { + st_numscalar("r(N_clust)", N_clust) + } + if (clustvarname2~="") { + st_numscalar("r(N_clust2)", N_clust2) + } +// end of program +} + + +end + +****************************************** END *************************************** +*********************************** ranktest.ado CODE ******************************** + +exit + +********************************** VERSION COMMENTS ********************************** +* Initial version cloned from official ivreg version 5.0.9 19Dec2001 +* 1.0.2: add logic for reg3. Sargan test +* 1.0.3: add prunelist to ensure that count of excluded exogeneous is correct +* 1.0.4: revise option to exog(), allow included exog to be specified as well +* 1.0.5: switch from reg3 to regress, many options and output changes +* 1.0.6: fixed treatment of nocons in Sargan and C-stat, and corrected problems +* relating to use of nocons combined with a constant as an IV +* 1.0.7: first option reports F-test of excluded exogenous; prunelist bug fix +* 1.0.8: dropped prunelist and switched to housekeeping of variable lists +* 1.0.9: added collinearity checks; C-stat calculated with recursive call; +* added ffirst option to report only F-test of excluded exogenous +* from 1st stage regressions +* 1.0.10: 1st stage regressions also report partial R2 of excluded exogenous +* 1.0.11: complete rewrite of collinearity approach - no longer uses calls to +* _rmcoll, does not track specific variables dropped; prunelist removed +* 1.0.12: reorganised display code and saved results to enable -replay()- +* 1.0.13: -robust- and -cluster- now imply -small- +* 1.0.14: fixed hascons bug; removed ivreg predict fn (it didn't work); allowed +* robust and cluster with z stats and correct dofs +* 1.0.15: implemented robust Sargan stat; changed to only F-stat, removed chi-sq; +* removed exog option (only orthog works) +* 1.0.16: added clusterised Sargan stat; robust Sargan handles collinearities; +* predict now works with standard SE options plus resids; fixed orthog() +* so it accepts time series operators etc. +* 1.0.17: fixed handling of weights. fw, aw, pw & iw all accepted. +* 1.0.18: fixed bug in robust Sargan code relating to time series variables. +* 1.0.19: fixed bugs in reporting ranks of X'X and Z'Z +* fixed bug in reporting presence of constant +* 1.0.20: added GMM option and replaced robust Sargan with (equivalent) J; +* added saved statistics of 1st stage regressions +* 1.0.21: added Cragg HOLS estimator, including allowing empty endog list; +* -regress- syntax now not allowed; revised code searching for "_cons" +* 1.0.22: modified cluster output message; fixed bug in replay for Sargan/Hansen stat; +* exactly identified Sargan/Hansen now exactly zero and p-value not saved as e(); +* cluster multiplier changed to 1 (from buggy multiplier), in keeping with +* eg Wooldridge 2002 p. 193. +* 1.0.23: fixed orthog option to prevent abort when restricted equation is underid. +* 1.0.24: fixed bug if 1st stage regressions yielded missing values for saving in e(). +* 1.0.25: Added Shea version of partial R2 +* 1.0.26: Replaced Shea algorithm with Godfrey algorithm +* 1.0.27: Main call to regress is OLS form if OLS or HOLS is specified; error variance +* in Sargan and C statistics use small-sample adjustment if -small- option is +* specified; dfn of S matrix now correctly divided by sample size +* 1.0.28: HAC covariance estimation implemented +* Symmetrize all matrices before calling syminv +* Added hack to catch F stats that ought to be missing but actually have a +* huge-but-not-missing value +* Fixed dof of F-stat - was using rank of ZZ, should have used rank of XX (couldn't use df_r +* because it isn't always saved. This is because saving df_r triggers small stats +* (t and F) even when -post- is called without dof() option, hence df_r saved only +* with -small- option and hence a separate saved macro Fdf2 is needed. +* Added rankS to saved macros +* Fixed trap for "no regressors specified" +* Added trap to catch gmm option with no excluded instruments +* Allow OLS syntax (no endog or excluded IVs specified) +* Fixed error messages and traps for rank-deficient robust cov matrix; includes +* singleton dummy possibility +* Capture error if posting estimated VCV that isn't pos def and report slightly +* more informative error message +* Checks 3 variable lists (endo, inexog, exexog) separately for collinearities +* Added AC (autocorrelation-consistent but conditionally-homoskedastic) option +* Sargan no longer has small-sample correction if -small- option +* robust, cluster, AC, HAC all passed on to first-stage F-stat +* bw must be < T +* 1.0.29 -orthog- also displays Hansen-Sargan of unrestricted equation +* Fixed collinearity check to include nocons as well as hascons +* Fixed small bug in Godfrey-Shea code - macros were global rather than local +* Fixed larger bug in Godfrey-Shea code - was using mixture of sigma-squares from IV and OLS +* with and without small-sample corrections +* Added liml and kclass +* 1.0.30 Changed order of insts macro to match saved matrices S and W +* 2.0.00 Collinearities no longer -qui- +* List of instruments tested in -orthog- option prettified +* 2.0.01 Fixed handling of nocons with no included exogenous, including LIML code +* 2.0.02 Allow C-test if unrestricted equation is just-identified. Implemented by +* saving Hansen-Sargan dof as = 0 in e() if just-identified. +* 2.0.03 Added score() option per latest revision to official ivreg +* 2.0.04 Changed score() option to pscore() per new official ivreg +* 2.0.05 Fixed est hold bug in first-stage regressions +* Fixed F-stat finite sample adjustment with cluster option to match official Stata +* Fixed F-stat so that it works with hascons (collinearity with constant is removed) +* Fixed bug in F-stat code - wasn't handling failed posting of vcv +* No longer allows/ignores nonsense options +* 2.0.06 Modified lsStop to sync with official ivreg 5.1.3 +* 2.0.07a Working version of CUE option +* Added sortpreserve, ivar and tvar options +* Fixed smalls bug in calculation of T for AC/HAC - wasn't using the last ob +* in QS kernel, and didn't take account of possible dropped observations +* 2.0.07b Fixed macro bug that truncated long varlists +* 2.0.07c Added dof option. +* Changed display of RMSE so that more digits are displayed (was %8.1g) +* Fixed small bug where cstat was local macro and should have been scalar +* Fixed bug where C stat failed with cluster. NB: wmatrix option and cluster are not compatible! +* 2.0.7d Fixed bug in dof option +* 2.1.0 Added first-stage identification, weak instruments, and redundancy stats +* 2.1.01 Tidying up cue option checks, reporting of cue in output header, etc. +* 2.1.02 Used Poskitt-Skeels (2002) result that C-D eval = cceval / (1-cceval) +* 2.1.03 Added saved lists of separate included and excluded exogenous IVs +* 2.1.04 Added Anderson-Rubin test of signif of endog regressors +* 2.1.05 Fix minor bugs relating to cluster and new first-stage stats +* 2.1.06 Fix bug in cue: capture estimates hold without corresponding capture on estimates unhold +* 2.1.07 Minor fix to ereturn local wexp, promote to version 8.2 +* 2.1.08 Added dofminus option, removed dof option. Added A-R test p-values to e(). +* Minor bug fix to A-R chi2 test - was N chi2, should have been N-L chi2. +* Changed output to remove potentially misleading refs to N-L etc. +* Bug fix to rhs count - sometimes regressors could have exact zero coeffs +* Bug fix related to cluster - if user omitted -robust-, orthog would use Sargan and not J +* Changed output of Shea R2 to make clearer that F and p-values do not refer to it +* Improved handling of collinearites to check across inexog, exexog and endo lists +* Total weight statement moved to follow summ command +* Added traps to catch errors if no room to save temporary estimations with _est hold +* Added -savefirst- option. Removed -hascons-, now synonymous with -nocons-. +* 2.1.09 Fixes to dof option with cluster so it no longer mimics incorrect areg behavior +* Local ivreg2_cmd to allow testing under name ivreg2 +* If wmatrix supplied, used (previously not used if non-robust sargan stat generated) +* Allowed OLS using (=) syntax (empty endo and exexog lists) +* Clarified error message when S matrix is not of full rank +* cdchi2p, ardf, ardf_r added to saved macros +* first and ffirst replay() options; DispFirst and DispFFirst separately codes 1st stage output +* Added savefprefix, macro with saved first-stage equation names. +* Added version option. +* Added check for duplicate variables to collinearity checks +* Rewrote/simplified Godfrey-Shea partial r2 code +* 2.1.10 Added NOOUTput option +* Fixed rf bug so that first does not trigger unnecessary saved rf +* Fixed cue bug - was not starting with robust 2-step gmm if robust/cluster +* 2.1.11 Dropped incorrect/misleading dofminus adjustments in first-stage output summary +* 2.1.12 Collinearity check now checks across inexog/exexog/endog simultaneously +* 2.1.13 Added check to catch failed first-stage regressions +* Fixed misleading failed C-stat message +* 2.1.14 Fixed mishandling of missing values in AC (non-robust) block +* 2.1.15 Fixed bug in RF - was ignoring weights +* Added -endog- option +* Save W matrix for all cases; ensured copy is posted with wmatrix option so original isn't zapped +* Fixed cue bug - with robust, was entering IV block and overwriting correct VCV +* 2.1.16 Added -fwl- option +* Saved S is now robust cov matrix of orthog conditions if robust, whereas W is possibly non-robust +* weighting matrix used by estmator. inv(S)=W if estimator is efficient GMM. +* Removed pscore option (dropped by official ivreg). +* Fixed bug where -post- would fail because of missing values in vcv +* Remove hascons as synonym for nocons +* OLS now outputs 2nd footer with variable lists +* 2.1.17 Reorganization of code +* Added ll() macro +* Fixed N bug where weights meant a non-integer ob count that was rounded down +* Fixed -fwl- option so it correctly handles weights (must include when partialling-out) +* smatrix option takes over from wmatrix option. Consistent treatment of both. +* Saved smatrix and wmatrix now differ in case of inefficient GMM. +* Added title() and subtitle() options. +* b0 option returns a value for the Sargan/J stat even if exactly id'd. +* (Useful for S-stat = value of GMM objective function.) +* HAC and AC now allowed with LIML and k-class. +* Collinearity improvements: bug fixed because collinearity was mistakenly checked across +* inexog/exexog/endog simultaneously; endog predicted exactly by IVs => reclassified as inexog; +* _rmcollright enforces inexog>endo>exexog priority for collinearities, if Stata 9.2 or later. +* K-class, LIML now report Sargan and J. C-stat based on Sargan/J. LIML reports AR if homosked. +* nb: can always easily get a C-stat for LIML based on diff of two AR stats. +* Always save Sargan-Hansen as e(j); also save as e(sargan) if homoskedastic. +* Added Stock-Watson robust SEs options sw() +* 2.1.18 Added Cragg-Donald-Stock-Yogo weak ID statistic critical values to main output +* Save exexog_ct, inexog_ct and endog_ct as macros +* Stock-Watson robust SEs now assume ivar is group variable +* Option -sw- is standard SW. Option -swpsd- is PSD version a la page 6 point 10. +* Added -noid- option. Suppresses all first-stage and identification statistics. +* Internal calls to ivreg2 use noid option. +* Added hyperlinks to ivreg2.hlp and helpfile argument to display routines to enable this. +* 2.1.19 Added matrix rearrangement and checks for smatrix and wmatrix options +* Recursive calls to cstat simplified - no matrix rearrangement or separate robust/nonrobust needed +* Reintroduced weak ID stats to ffirst output +* Added robust ID stats to ffirst output for case of single endogenous regressor +* Fixed obscure bug in reporting 1st stage partial r2 - would report zero if no included exogenous vars +* Removed "HOLS" in main output (misleading if, e.g., estimation is AC but not HAC) +* Removed "ML" in main output if no endogenous regressors - now all ML is labelled LIML +* model=gmm is now model=gmm2s; wmatrix estimation is model=gmm +* wmatrix relates to gmm estimator; smatrix relates to gmm var-cov matrix; b0 behavior equiv to wmatrix +* b0 option implies nooutput and noid options +* Added nocollin option to skip collinearity checks +* Fixed minor display bug in ffirst output for endog vars with varnames > 12 characters +* Fixed bug in saved rf and first-stage results for vars with long varnames; uses permname +* Fixed bug in model df - had counted RHS, now calculates rank(V) since latter may be rank-deficient +* Rank of V now saved as macro rankV +* fwl() now allows partialling-out of just constant with _cons +* Added Stock-Wright S statistic (but adds overhead - calls preserve) +* Properties now include svyj. +* Noted only: fwl bug doesn't allow time-series operators. +* 2.1.20 Fixed Stock-Wright S stat bug - didn't allow time-series operators +* 2.1.21 Fixed Stock-Wright S stat to allow for no exog regressors cases +* 2.2.00 CUE partials out exog regressors, estimates endog coeffs, then exog regressors separately - faster +* gmm2s becomes standard option, gmm supported as legacy option +* 2.2.01 Added explanatory messages if gmm2s used. +* States if estimates efficient for/stats consistent for het, AC, etc. +* Fixed small bug that prevented "{help `helpfile'##fwl:fwl}" from displaying when -capture-d. +* Error message in footer about insuff rank of S changed to warning message with more informative message. +* Fixed bug in CUE with weights. +* 2.2.02 Removed CUE partialling-out; still available with fwl +* smatrix and wmatrix become documented options. e(model)="gmmw" means GMM with arbitrary W +* 2.2.03 Fixed bug in AC with aweights; was weighting zi'zi but not ei'ei. +* 2.2.04 Added abw code for bw(), removed properties(svyj) +* 2.2.05 Fixed bug in AC; need to clear variable vt1 at start of loop +* If iweights, Nprec (#obs with precision) rounded to nearest integer to mimic official Stata treatment +* and therefore don't need Nprec scalar at all - will be same as N +* Saves fwl_ct as macro. +* -ffirst- output, weak id stat, etc. now adjust for number of partialled-out variables. +* Related changes: df_m, df_r include adjustments for partialled-out variables. +* Option nofwlsmall introduced - suppresses above adjustments. Undocumented in ivreg2.hlp. +* Replaced ID tests based on canon corr with Kleibergen-Paap rk-based stats if not homoskedastic +* Replaced LR ID test stats with LM test stats. +* Checks that -ranktest- is installed. +* 2.2.06 Fixed bug with missing F df when cue called; updated required version of ranktest +* 2.2.07 Modified redundancy test statistic to match standard regression-based LM tests +* Change name of -fwl- option to -partial-. +* Use of b0 means e(model)=CUE. Added informative b0 option titles. b0 generates output but noid. +* Removed check for integer bandwidth if auto option used. +* 2.2.08 Add -nocollin- to internal calls and to -ivreg2_cue- to speed performance. +* 2.2.09 Per msg from Brian Poi, Alastair Hall verifies that Newey-West cited constant of 1.1447 +* is correct. Corrected mata abw() function. Require -ranktest- 1.1.03. +* 2.2.10 Added Angrist-Pischke multivariate f stats. Rewrite of first and ffirst output. +* Added Cragg-Donald to weak ID output even when non-iid. +* Fixed small bug in non-robust HAC code whereby extra obs could be used even if dep var missing. +* (required addition of L`tau'.(`s1resid') in creation of second touse variable) +* Fixed bugs that zapped varnames with "_cons" in them +* Changed tvar and ivar setup so that data must be tsset or xtset; tvar and ivar can still be supplied +* but are only checked for consistency. +* Fixed bug in redundancy test stat when called by xtivreg2+cluster - no dofminus adj needed in this case +* Removed undocumented and unverified Stock-Watson option +* Bug in vecaccum meant that if there is only 1 observation it will crash with insufficient obs. +* Affected kernel-robust AC/HAC if there were gaps in the data. Fixed by replacing missings with zeros. +* Fixed bug in LIML with AC but not HAC that meant non-robust VCV was reported. +* Changed reporting so that gaps between panels are not reported as such. +* Changed Stock-Wright S statistic so that it uses straight partialling-out of exog regressors +* (had been, in effect, doing 2SGMM partialling-out) +* Fixed xtivreg2-related bug in AC (not HAC) where dofminus correction was not used in calc of sigmas. +* Fixed xtivreg2-related bug where IV weighting matrix didn't incorporate dofminus correction. +* Added check that weight variable is not transformed by partialling out. +* Fixed bug where dropped collinear endogenous didn't get a warning or listing +* Removed N*CDEV Wald chi-sq statistic from ffirst output (LM stat enough) +* Revised eigenvalue code of LIML with Mata version +* 2.2.11 -capture- instead of -qui- before reduced form to suppress not-full-rank error warning +* Repaired bug in Stock-Wright S stat so that it handles partialling-out consistently. +* 2.2.12 Disabled auto bandwidth selection with panel data +* 2.2.13 Fixed partial bug - partialcons macro saved =0 unless _cons explicitly in partial() varlist +* 2.2.14 Incorporated ranktest and Mata library code into ivreg29.ado - no longer needs separate installation. + +* Version notes for imported version of ranktest: +* 1.0.00 First distributed version +* 1.0.01 With iweights, rkstat truncates N to mimic official Stata treatment of noninteger iweights +* Added warning if shat/vhat/vlab not of full rank. +* 1.0.02 Added NULLrank option +* Added eq names to saved V and S matrices +* 1.0.03 Added error catching for collinearities between varlists +* Not saving S matrix; V matrix now as in paper (without 1/N factor) +* Statistic, p-value etc set to missing if vcv not of full rank (Assumpt 2 in paper fails) +* 1.0.04 Fixed touse bug - was treating missings as touse-able +* Change some cross-products in robust loops to quadcross +* 1.0.05 Fixed bug with col/row names and ts operators. Added eval to saved matrices. +* 1.1.00 First ssc-ideas version. Added version 9.2 prior to Mata compiled section. +* 1.1.01 Allow non-integer bandwidth +* 1.1.02 Changed calc of yhat, zhat and pihat to avoid needlessly large intermediate matrices +* and to use more accurate qrsolve instead of inverted X'X. +* 1.1.03 Fixed touse bug that didn't catch missing cluster variable +* Fixed cluster bug - data needed to be sorted by cluster for Mata panel functions to work properly +* 1.2.00 Changed reporting so that gaps between panels are not reported as such. +* Added support for tdelta in tsset data. +* Changed tvar and ivar setup so that data must be tsset or xtset. +* Removed unnecessary loops through panel data with spectral kernels +* shat vcv now also saved. +* Added support for Thompson/Cameron-Gelbach-Miller 2-level cluster-robust vcvv +* Added support for Stock-Watson vcv - but requires data to have FEs partialled out, & doesn't support fweights +* Removed mimicking of Stata mistake of truncated N with iweights to nearest integer +* Fixed small bug with quadratic kernel (wasn't using negative weights) +* Optimised code dealing with time-series data +* 1.2.01 Fixed bug that always used Stock-Watson spectral decomp to create invertible shat +* instead of only when (undocumented) spsd option is called. +* 1.2.02 Fixed bug that did not allow string cluster variables +* 1.2.03 Fixed bug in code for cluster+kernel robust (typo in imported code from ivreg2=>crash) +* 1.2.04 Replaced code for S with ivreg2 code modified to support e matrix (cols > 1) +* Code block (m_omega, m_calckw, struct definition) now shared by ranktest and ivreg2. +* Renamed spsd option to psd following ivreg2 3.0.07 +* Added wf ("weight factor") and statement about sum of weights, as in ivreg2 +* Added dofminus option, as in ivreg2 +* Fixed minor reporting bug - was reporting gaps in entire panel, not just touse-d portion +* Recoded kernel & bw checks to use shared ivreg2 subroutine vkernel +* 1.2.05 Fixed weighting bug introduced in 1.2.04. All weights were affected. +* Was result of incompatibility of code shared with ivreg2. +* 1.3.01 First ranktest version with accompanying Mata library (shared with -ivreg2-). +* Mata library includes struct ms_vcvorthog, m_omega, m_calckw, s_vkernel. +* Fixed bug in 2-way cluster code (now in m_omega in Mata library) - would crash if K>1. +* 1.3.02 Improved partialling out and matrix inversion - switched from qrsolve to invsym. +* Use _makesymmetric() instead of symmetrizing by hand. +* 1.3.03 01Jan14. Fixed reporting bug with 2-way clustering and kernel-robust that would give +* wrong count for 2nd cluster variable. +* 1.3.04 24Aug14. Fixed bug in markout - would include obs where some vars were missing + +* Version notes for imported version of Mata library +* 1.1.01 First version of library. +* Contains struct ms_vcvorthog, m_omega, m_calckw, s_vkernel. +* Compiled in Stata 9.2 for compatibility with ranktest 1.3.01 (a 9.2 program). +* 1.1.02 Add routine cdsy. Standardized spelling/caps/etc. of QS as "Quadratic Spectral" +* 1.1.03 Corrected spelling of "Danielle" kernel in m_omega() +* 1.1.04 Fixed weighting bugs in robust and cluster code of m_omega where K>1 +* 1.1.05 Added whichlivreg2(.) to aid in version control +* 1.1.06 Fixed remaining weighting bug (see 1.1.04) in 2-way clustering when interection +* of clustering levels is groups +* 1.1.07 Fixed HAC bug that crashed m_omega(.) when there were no obs for a particular lag diff --git a/110/replication_package/replication/ado/plus/i/ivreg29.hlp b/110/replication_package/replication/ado/plus/i/ivreg29.hlp new file mode 100644 index 0000000000000000000000000000000000000000..cddc41fcced31e1ff450d2804e6dfb171d2e28d5 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg29.hlp @@ -0,0 +1,1508 @@ +{smcl} +{* 2Jan2010}{...} +{hline} +help for {hi:ivreg29} +{hline} + +{title:Extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression} + +{p 4}Full syntax + +{p 8 14}{cmd:ivreg29} {it:depvar} [{it:varlist1}] +{cmd:(}{it:varlist2}{cmd:=}{it:varlist_iv}{cmd:)} [{it:weight}] +[{cmd:if} {it:exp}] [{cmd:in} {it:range}] +{bind:[{cmd:,} {cmd:gmm2s}} +{cmd:bw(}{it:#}{cmd:)} +{cmd:kernel(}{it:string}{cmd:)} +{cmd:liml} +{cmd:fuller(}{it:#}{cmd:)} +{cmd:kclass(}{it:#}{cmd:)} +{cmd:coviv} +{cmd:cue} +{cmd:cueinit}{cmd:(}{it:matrix}{cmd:)} +{cmdab:cueopt:ions}{cmd:(}{it:string}{cmd:)} +{cmd:b0}{cmd:(}{it:matrix}{cmd:)} +{cmdab:r:obust} +{cmdab:cl:uster}{cmd:(}{it:varname}{cmd:)} +{cmd:orthog(}{it:varlist_ex}{cmd:)} +{cmd:endog(}{it:varlist_en}{cmd:)} +{cmdab:red:undant(}{it:varlist_ex}{cmd:)} +{cmd:partial(}{it:varlist}{cmd:)} +{cmdab:sm:all} +{cmdab:noc:onstant} {cmdab:h}ascons +{cmd:smatrix}{cmd:(}{it:matrix}{cmd:)} +{cmd:wmatrix}{cmd:(}{it:matrix}{cmd:)} +{cmd:first} {cmd:ffirst} {cmd:savefirst} {cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:rf} {cmd:saverf} {cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} +{cmd:nocollin} {cmd:noid} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{bind:{cmd:plus} ]} + +{p 4}Replay syntax + +{p 8 14}{cmd:ivreg29} +{bind:[{cmd:,} {cmd:first}} +{cmd:ffirst} {cmd:rf} +{cmdab:l:evel}{cmd:(}{it:#}{cmd:)} +{cmdab:nohe:ader} +{cmdab:nofo:oter} +{cmdab:ef:orm}{cmd:(}{it:string}{cmd:)} +{cmdab:dep:name}{cmd:(}{it:varname}{cmd:)} +{cmd:plus} ]} + +{p 4}Version syntax + +{p 8 14}{cmd:ivreg29}, {cmd:version} + +{p}{cmd:ivreg29} is a version of {cmd:ivreg2} compatible with Stata version 9.2 or later. +{cmd:ivreg29} is not under active development. +See the online description of {view "http://fmwww.bc.edu/RePEc/bocode/i/ivreg2.hlp" :ivreg2} +for details. + +{p}{cmd:ivreg29} may be used with time-series or panel data, +in which case the data must be {cmd:tsset} +before using {cmd:ivreg29}; see help {help tsset}. + +{p}All {it:varlists} may contain time-series operators; +see help {help varlist}. + +{p}{cmd:by}, {cmd:rolling}, {cmd:statsby}, {cmd:xi}, +{cmd:bootstrap} and {cmd:jackknife} are allowed; see help {help prefix}. + +{p}{cmd:aweight}s, {cmd:fweight}s, {cmd:iweight}s and {cmd:pweight}s +are allowed; see help {help weights}. + +{p}The syntax of {help predict} following {cmd:ivreg29} is + +{p 8 16}{cmd:predict} [{it:type}] {it:newvarname} [{cmd:if} {it:exp}] +[{cmd:in} {it:range}] [{cmd:,} {it:statistic}] + +{p}where {it:statistic} is + +{p 8 23}{cmd:xb}{space 11}fitted values; the default{p_end} +{p 8 23}{cmdab:r:esiduals}{space 4}residuals{p_end} +{p 8 23}{cmd:stdp}{space 9}standard error of the prediction{p_end} + +{p}These statistics are available both in and out of sample; +type "{cmd:predict} {it:...} {cmd:if e(sample)} {it:...}" +if wanted only for the estimation sample. + +{title:Contents} +{p 2}{help ivreg29##s_description:Description}{p_end} +{p 2}{help ivreg29##s_robust:Calculation of robust, cluster-robust, AC, HAC standard errors}{p_end} +{p 2}{help ivreg29##s_gmm:GMM estimation}{p_end} +{p 2}{help ivreg29##s_liml:LIML, k-class and GMM-CUE estimation}{p_end} +{p 2}{help ivreg29##s_sumopt:Summary of robust, HAC, AC, GMM, LIML and CUE options}{p_end} +{p 2}{help ivreg29##s_overid:Testing overidentifying restrictions}{p_end} +{p 2}{help ivreg29##s_endog:Testing subsets of regressors and instruments for endogeneity}{p_end} +{p 2}{help ivreg29##s_relevance:Tests of under- and weak identification}{p_end} +{p 2}{help ivreg29##s_redundancy:Testing instrument redundancy}{p_end} +{p 2}{help ivreg29##s_first:First-stage regressions, identification, and weak-id-robust inference}{p_end} +{p 2}{help ivreg29##s_rf:Reduced form estimates}{p_end} +{p 2}{help ivreg29##s_partial:Partialling-out exogenous regressors}{p_end} +{p 2}{help ivreg29##s_ols:OLS and Heteroskedastic OLS (HOLS) estimation}{p_end} +{p 2}{help ivreg29##s_collin:Collinearities}{p_end} +{p 2}{help ivreg29##s_speed:Speed options: nocollin and noid}{p_end} +{p 2}{help ivreg29##s_small:Small sample corrections}{p_end} +{p 2}{help ivreg29##s_options:Options summary}{p_end} +{p 2}{help ivreg29##s_macros:Remarks and saved results}{p_end} +{p 2}{help ivreg29##s_examples:Examples}{p_end} +{p 2}{help ivreg29##s_refs:References}{p_end} +{p 2}{help ivreg29##s_acknow:Acknowledgements}{p_end} +{p 2}{help ivreg29##s_citation:Authors}{p_end} +{p 2}{help ivreg29##s_citation:Citation of ivreg29}{p_end} + +{marker s_description}{title:Description} + +{p}{cmd:ivreg29} implements a range of single-equation estimation methods +for the linear regression model: OLS, instrumental +variables (IV, also known as two-stage least squares, 2SLS), +the generalized method of moments (GMM), +limited-information maximum likelihood (LIML), and k-class estimators. +In the language of IV/GMM, {it:varlist1} are the exogenous +regressors or "included instruments", +{it:varlist_iv} are the exogenous variables excluded +from the regression or "excluded instruments", +and {it:varlist2} the endogenous regressors that are being "instrumented". + +{p}{cmd:ivreg29} will also estimate linear regression models using +robust (heteroskedastic-consistent), +autocorrelation-consistent (AC), +heteroskedastic and autocorrelation-consistent (HAC) +and cluster-robust variance estimates. + +{p}{cmd:ivreg29} provides extensions to Stata's official {cmd:ivreg} +and {cmd:newey}. +{cmd:ivreg29} supports the same command syntax as official {cmd:ivreg} +and (almost) all of its options. +The main extensions available are as follows: +two-step feasible GMM estimation ({cmd:gmm2s} option) +and continuously-updated GMM estimation ({cmd:cue} option); +LIML and k-class estimation; +automatic output of overidentification and underidentification test statistics; +C statistic test of exogeneity of subsets of instruments +({cmd:orthog()} option); +endogeneity tests of endogenous regressors +({cmd:endog()} option); +test of instrument redundancy +({cmd:redundant()} option); +kernel-based autocorrelation-consistent (AC) +and heteroskedastic and autocorrelation consistent (HAC) standard errors +and covariance estimation ({cmd:bw(}{it:#}{cmd:)} option), +with user-specified choice of kernel ({cmd:kernel()} option); +default reporting of large-sample statistics +(z and chi-squared rather than t and F); +{cmd:small} option to report small-sample statistics; +first-stage regressions reported with various tests and statistics for +identification and instrument relevance; +{cmd:ffirst} option to report only these identification statistics +and not the first-stage regression results themselves; +{cmd:nofooter} option to suppress footer of regression output. +{cmd:ivreg29} can also be used for ordinary least squares (OLS) estimation +using the same command syntax as official {cmd:regress} and {cmd:newey}. + +{marker s_robust}{dlgtab:Calculation of robust, cluster-robust, AC, HAC standard errors} + +{p}The standard errors reported by {cmd:ivreg29} can be made consistent +in the presence of a variety of violations of the assumption of i.i.d. errors: +{bind:(1) {cmd:robust}} causes {cmd:ivreg29} to report standard errors that are +robust to the presence of arbitrary heteroskedasticity; +{bind:(2) {cmd:cluster}} standard errors are robust to both +arbitrary heteroskedasticity and arbitrary intra-group correlation; +{bind:(3) {cmd:bw(}{it:#}{cmd:)}} requests AC standard errors that are +robust to arbitrary autocorrelation; +{bind:(4) {cmd:bw(}{it:#}{cmd:)}} combined with {cmd:robust} +requests HAC standard errors that are +robust to both arbitrary heteroskedasticity and arbitrary autocorrelation. + +{p}{cmd:ivreg29} allows a variety of options for kernel-based HAC and AC estimation. +The {cmd:bw(}{it:#}{cmd:)} option sets the bandwidth used in the estimation +and {cmd:kernel(}{it:string}{cmd:)} is the kernel used; +the default kernel is the Bartlett kernel, +also known in econometrics as Newey-West (see help {help newey}). When +using the Bartlett, Parzen, or Quadratic spectral kernels, the automatic +bandwidth selection procedure of Newey and West (1994) can be chosen +by specifying {cmd:bw(}{it:auto}{cmd:)}. +{cmd:ivreg29} can also be used for kernel-based estimation +with panel data, i.e., a cross-section of time series. +Before using {cmd:ivreg29} for kernel-based estimation +of time series or panel data, +the data must be {cmd:tsset}; see help {help tsset}. + +{marker s_gmm}{dlgtab:GMM estimation} + +{p}When combined with the above options, the {cmd:gmm2s} option generates +efficient estimates of the coefficients as well as consistent +estimates of the standard errors. +The {cmd:gmm2s} option implements the two-step efficient +generalized method of moments (GMM) estimator. +The efficient GMM estimator minimizes the GMM criterion function +J=N*g'*W*g, where N is the sample size, +g are the orthogonality or moment conditions +(specifying that all the exogenous variables, or instruments, +in the equation are uncorrelated with the error term) +and W is a weighting matrix. +In two-step efficient GMM, the efficient or optimal weighting matrix +is the inverse of an estimate of the covariance matrix of orthogonality conditions. +The efficiency gains of this estimator relative to the +traditional IV/2SLS estimator derive from the use of the optimal +weighting matrix, the overidentifying restrictions of the model, +and the relaxation of the i.i.d. assumption. +For an exactly-identified model, +the efficient GMM and traditional IV/2SLS estimators coincide, +and under the assumptions of conditional homoskedasticity and independence, +the efficient GMM estimator is the traditional IV/2SLS estimator. +For further details, see Hayashi (2000), pp. 206-13 and 226-27. + +{p}The {cmd:wmatrix} option allows the user to specify a weighting matrix +rather than computing the optimal weighting matrix. +Estimation with the {cmd:wmatrix} option yields a possibly inefficient GMM estimator. +{cmd:ivreg29} will use this inefficient estimator as the first-step GMM estimator +in two-step efficient GMM when combined with the {cmd:gmm2s} option; +otherwise, {cmd:ivreg29} reports the regression results +using this inefficient GMM estimator. + +{p}The {cmd:smatrix} option allows the user to directly +specify the matrix S, the covariance matrix of orthogonality conditions. +{cmd:ivreg29} will use this matrix in the calculation of the variance-covariance +matrix of the estimator, the J statistic, +and, if the {cmd:gmm2s} option is specified, +the two-step efficient GMM coefficients. +The {cmd:smatrix} can be useful for guaranteeing a positive test statistic +in user-specified "GMM-distance tests" (see {help ivreg29##s_endog:below}). +For further details, see Hayashi (2000), pp. 220-24. + +{marker s_liml}{dlgtab:LIML, k-class and GMM-CUE estimation} + +{marker liml}{p} Maximum-likelihood estimation of a single equation of this form +(endogenous RHS variables and excluded instruments) +is known as limited-information maximum likelihood or LIML. +The overidentifying restrictions test +reported after LIML estimation is the Anderson-Rubin (1950) overidentification +statistic in a homoskedastic context. +LIML, OLS and IV/2SLS are examples of k-class estimators. +LIML is a k-class estimator with k=the LIML eigenvalue lambda; +2SLS is a k-class estimator with k=1; +OLS is a k-class esimator with k=0. +Estimators based on other values of k have been proposed. +Fuller's modified LIML (available with the {cmd:fuller(}{it:#}{cmd:)} option) +sets k = lambda - alpha/(N-L), where lambda is the LIML eigenvalue, +L = number of instruments (L1 excluded and L2 included), +and the Fuller parameter alpha is a user-specified positive constant. +Nagar's bias-adjusted 2SLS estimator can be obtained with the +{cmd:kclass(}{it:#}{cmd:)} option by setting +k = 1 + (L-K)/N, where L-K = number of overidentifying restrictions, +K = number of regressors (K1 endogenous and K2=L2 exogenous) +and N = the sample size. +For a discussion of LIML and k-class estimators, +see Davidson and MacKinnon (1993, pp. 644-51). + +{p} The GMM generalization of the LIML estimator +to the case of possibly heteroskedastic +and autocorrelated disturbances +is the "continuously-updated" GMM estimator or CUE +of Hansen, Heaton and Yaron (1996). +The CUE estimator directly maximizes the GMM objective function +J=N*g'*W(b_cue)*g, where W(b_cue) is an optimal weighting matrix +that depends on the estimated coefficients b_cue. +{cmd:cue} combined with {cmd:robust}, {cmd:cluster}, and/or {cmd:bw}, +generates coefficient estimates that are efficient in the presence +of the corresponding deviations from homoskedasticity. +Specifying {cmd:cue} with no other options +is equivalent to the combination of the options {cmd:liml} and {cmd:coviv}. +The CUE estimator requires numerical optimization methods, +and the implementation here uses Stata's {cmd:ml} routine. +The starting values are either IV or two-step efficient GMM +coefficient estimates; +these can be overridden with the {cmd:cueinit} option, +which takes the matrix of starting values b as its argument. +{cmd:cueoptions} passes options to Stata's {cmd:ml}; see help {help ml}. +Estimation with the {cmd:cue} option can be slow and problematic, +and it should be used with caution. +If the user wants to evaluate the CUE objective function at +an arbitrary user-defined coefficient vector instead of having {cmd:ivreg29} +find the coefficient vector that minimizes the objective function, +the {cmd:b0(}{it:matrix}{cmd:)} option can be used. +The value of the CUE objective function at {cmd:b0} +is the Sargan or Hansen J statistic reported in the output. + +{marker s_sumopt}{dlgtab:Summary of robust, HAC, AC, GMM, LIML and CUE options} + + +{col 45}VCE option +Estimator + option{col 20}(none) {col 65}{cmd:robust}, {cmd:cluster}, {cmd:bw}, {cmd:kernel} +{hline} +(none){col 20}IV/2SLS{col 65}IV/2SLS with +{col 20}SEs consistent under homoskedasticity{col 65}robust SEs + +{cmd:liml}{col 20}LIML{col 65}LIML with +{col 20}SEs consistent under homoskedasticity{col 65}robust SEs + +{cmd:gmm2s}{col 20}IV/2SLS{col 65}Two-step GMM with +{col 20}SEs consistent under homoskedasticity{col 65}robust SEs + +{cmd:cue}{col 20}LIML{col 65}CUE GMM with +{col 20}SEs consistent under homoskedasticity{col 65}robust SEs + +{cmd:kclass}{col 20}k-class estimator{col 65}k-class estimator with +{col 20}SEs consistent under homoskedasticity{col 65}robust SEs + +{cmd:wmatrix}{col 20}Possibly inefficient GMM{col 65}Ineff GMM with +{col 20}SEs consistent under homoskedasticity{col 65}robust SEs + +{cmd:gmm2s} + {col 20}Two-step GMM{col 65}two-step GMM with +{cmd:wmatrix}{col 20}with user-specified first step{col 65}robust SEs +{col 20}SEs consistent under homoskedasticity + + +{p}With the {cmd:bw} or {cmd:bw} and {cmd:kernel} VCE options, +SEs are autocorrelation-robust (AC). +Combining the {cmd:robust} option with {cmd:bw}, SEs are heteroskedasticity- and +autocorrelation-robust (HAC). + +{p}For further details, see Hayashi (2000), pp. 206-13 and 226-27 +(on GMM estimation), Wooldridge (2002), p. 193 (on cluster-robust GMM), +and Hayashi (2000), pp. 406-10 or Cushing and McGarvey (1999) +(on kernel-based covariance estimation). + +{marker s_overid}{marker overidtests}{dlgtab:Testing overidentifying restrictions} + +{p}The Sargan-Hansen test is a test of overidentifying restrictions. +The joint null hypothesis is that the instruments are valid +instruments, i.e., uncorrelated with the error term, +and that the excluded instruments are correctly excluded from the estimated equation. +Under the null, the test statistic is distributed as chi-squared +in the number of (L-K) overidentifying restrictions. +A rejection casts doubt on the validity of the instruments. +For the efficient GMM estimator, the test statistic is +Hansen's J statistic, the minimized value of the GMM criterion function. +For the 2SLS estimator, the test statistic is Sargan's statistic, +typically calculated as N*R-squared from a regression of the IV residuals +on the full set of instruments. +Under the assumption of conditional homoskedasticity, +Hansen's J statistic becomes Sargan's statistic. +The J statistic is consistent in the presence of heteroskedasticity +and (for HAC-consistent estimation) autocorrelation; +Sargan's statistic is consistent if the disturbance is homoskedastic +and (for AC-consistent estimation) if it is also autocorrelated. +With {cmd:robust}, {cmd:bw} and/or {cmd:cluster}, +Hansen's J statistic is reported. +In the latter case the statistic allows observations +to be correlated within groups. +For further discussion see e.g. Hayashi (2000, pp. 227-8, 407, 417). + +{p}The Sargan statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg29} by the command {cmd:overid}. +The features of {cmd:ivreg29} that are unavailable in {cmd:overid} +are the J statistic and the C statistic; +the {cmd:overid} options unavailable in {cmd:ivreg29} +are various small-sample and pseudo-F versions of Sargan's statistic +and its close relative, Basmann's statistic. +See help {help overid} (if installed). + +{marker s_endog}{dlgtab:Testing subsets of regressors and instruments for endogeneity} + +{marker ctest}{p}The C statistic +(also known as a "GMM distance" +or "difference-in-Sargan" statistic) +implemented using the {cmd:orthog} option, +allows a test of a subset of the orthogonality conditions, i.e., +it is a test of the exogeneity of one or more instruments. +It is defined as +the difference of the Sargan-Hansen statistic +of the equation with the smaller set of instruments +(valid under both the null and alternative hypotheses) +and the equation with the full set of instruments, +i.e., including the instruments whose validity is suspect. +Under the null hypothesis that +both the smaller set of instruments +and the additional, suspect instruments are valid, +the C statistic is distributed as chi-squared +in the number of instruments tested. +Note that failure to reject the null hypothesis +requires that the full set of orthogonality conditions be valid; +the C statistic and the Sargan-Hansen test statistics +for the equations with both the smaller and full set of instruments +should all be small. +The instruments tested may be either excluded or included exogenous variables. +If excluded exogenous variables are being tested, +the equation that does not use these orthogonality conditions +omits the suspect instruments from the excluded instruments. +If included exogenous variables are being tested, +the equation that does not use these orthogonality conditions +treats the suspect instruments as included endogenous variables. +To guarantee that the C statistic is non-negative in finite samples, +the estimated covariance matrix of the full set orthogonality conditions +is used to calculate both Sargan-Hansen statistics +(in the case of simple IV/2SLS, this amounts to using the MSE +from the unrestricted equation to calculate both Sargan statistics). +If estimation is by LIML, the C statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For further discussion, see Hayashi (2000), pp. 218-22 and pp. 232-34. + +{marker endogtest}{p}Endogeneity tests of one or more endogenous regressors +can implemented using the {cmd:endog} option. +Under the null hypothesis that the specified endogenous regressors +can actually be treated as exogenous, the test statistic is distributed +as chi-squared with degrees of freedom equal to the number of regressors tested. +The endogeneity test implemented by {cmd:ivreg29}, is, like the C statistic, +defined as the difference of two Sargan-Hansen statistics: +one for the equation with the smaller set of instruments, +where the suspect regressor(s) are treated as endogenous, +and one for the equation with the larger set of instruments, +where the suspect regressors are treated as exogenous. +Also like the C statistic, the estimated covariance matrix used +guarantees a non-negative test statistic. +Under conditional homoskedasticity, +this endogeneity test statistic is numerically equal to +a Hausman test statistic; see Hayashi (2000, pp. 233-34). +The endogeneity test statistic can also be calculated after +{cmd:ivreg} or {cmd:ivreg29} by the command {cmd:ivendog}. +Unlike the Durbin-Wu-Hausman tests reported by {cmd:ivendog}, +the {cmd:endog} option of {cmd:ivreg29} can report test statistics +that are robust to various violations of conditional homoskedasticity; +the {cmd:ivendog} option unavailable in {cmd:ivreg29} +is the Wu-Hausman F-test version of the endogeneity test. +See help {help ivendog} (if installed). + +{marker s_relevance}{dlgtab:Tests of under- and weak identification} + +{marker idtest}{p}{cmd:ivreg29} automatically reports tests of +both underidentification and weak identification. +The underidentification test is an LM test of whether the equation is identified, +i.e., that the excluded instruments are "relevant", +meaning correlated with the endogenous regressors. +The test is essentially the test of the rank of a matrix: +under the null hypothesis that the equation is underidentified, +the matrix of reduced form coefficients on the L1 excluded instruments +has rank=K1-1 where K1=number of endogenous regressors. +Under the null, +the statistic is distributed as chi-squared +with degrees of freedom=(L1-K1+1). +A rejection of the null indicates that the matrix is full column rank, +i.e., the model is identified. +When errors are assumed to be i.i.d., +{cmd:ivreg29} automatically reports an LM version of +the Anderson (1951) canonical correlations test. +Denoting the minimum eigenvalue of the canonical correlations as CCEV, +the smallest canonical correlation between the K1 endogenous regressors +and the L1 excluded instruments +(after partialling out the K2=L2 exogenous regressors) +is sqrt(CCEV), +and the Anderson LM test statistic is N*CCEV, +i.e., N times the square of the smallest canonical correlation. +With the {cmd:first} or {cmd:ffirst} options, +{cmd:ivreg29} also reports the closely-related +Cragg-Donald (1993) Wald test statistic. +Again assuming i.i.d. errors, +and denoting the minimum eigenvalue of the Cragg-Donald statistic as CDEV, +CDEV=CCEV/(1-CCEV), +and the Cragg-Donald Wald statistic is N*CDEV. +Like the Anderson LM statistic, the Cragg-Donald Wald statistic +is distributed as chi-squred with (L1-K1+1) degrees of freedom. +Note that a result of rejection of the null +should be treated with caution, +because weak instrument problems may still be present. +See Hall et al. (1996) for a discussion of this test, +and below for discussion of testing for the presence of weak instruments. + +{p}When the i.i.d. assumption is dropped +and {cmd:ivreg29} reports heteroskedastic, AC, HAC +or cluster robust statistics, +the Anderson LM and Cragg-Donald Wald statistics are no longer valid. +In these cases, {cmd:ivreg29} reports the LM and Wald versions +of the Kleibergen-Paap (2006) rk statistic, +also distributed as chi-squared with (L1-K1+1) degrees of freedom. +The rk statistic can be seen as a generalization of these tests +to the case of non-i.i.d. errors; +see Kleibergen and Paap (2006) for discussion, +and Kleibergen and Schaffer (2007) for a Stata implementation, {cmd:ranktest}. +{cmd:ivreg29} requires {cmd:ranktest} to be installed, +and will prompt the user to install it if necessary. +If {cmd:ivreg29} is invoked with the {cmd:robust} option, +the rk underidentification test statistics will be heteroskedastic-robust, +and similarly with {cmd:bw} and {cmd:cluster}. + +{marker widtest}{p}"Weak identification" arises when the excluded instruments are correlated +with the endogeous regressors, but only weakly. +Estimators can perform poorly when instruments are weak, +and different estimators are more robust to weak instruments (e.g., LIML) +than others (e.g., IV); +see, e.g., Stock and Yogo (2002, 2005) for further discussion. +When errors are assumed to be i.i.d., +the test for weak identification automatically reported +by {cmd:ivreg29} is an F version of the Cragg-Donald Wald statistic, (N-L)/L1*CDEV, +where L is the number of instruments and L1 is the number of excluded instruments. +Stock and Yogo (2005) have compiled critical values +for the Cragg-Donald F statistic for +several different estimators (IV, LIML, Fuller-LIML), +several different definitions of "perform poorly" (based on bias and test size), +and a range of configurations (up to 100 excluded instruments +and up to 2 or 3 endogenous regressors, +depending on the estimator). +{cmd:ivreg29} will report the Stock-Yogo critical values +if these are available; +missing values mean that the critical values +haven't been tabulated or aren't applicable. +See Stock and Yogo (2002, 2005) for details. + +{p}When the i.i.d. assumption is dropped +and {cmd:ivreg29} is invoked with the {cmd:robust}, {cmd:bw} or {cmd:cluster} options, +the Cragg-Donald-based weak instruments test is no longer valid. +{cmd:ivreg29} instead reports a correspondly-robust +Kleibergen-Paap Wald rk F statistic. +The degrees of freedom adjustment for the rk statistic is (N-L)/L1, +as with the Cragg-Donald F statistic, +except in the cluster-robust case, +when the adjustment is ((N-L)/L1)*((N-1)/N)*(N_clust-1)/N_clust), +following the standard Stata small-sample adjustment for cluster-robust. +The critical values reported by {cmd:ivreg29} for the Kleibergen-Paap statistic +are the Stock-Yogo critical values for the Cragg-Donald i.i.d. case. +The critical values reported with 2-step GMM +are the Stock-Yogo IV critical values, +and the critical values reported with CUE +are the LIML critical values. + +{marker s_redundancy}{dlgtab:Testing instrument redundancy} + +{marker redtest}{p}The {cmd:redundant} option allows a test of +whether a subset of excluded instruments is "redundant". +Excluded instruments are redundant if the asymptotic efficiency +of the estimation is not improved by using them. +Breusch et al. (1999) show that the condition for the redundancy of a set of instruments +can be stated in several equivalent ways: +e.g., in the reduced form regressions of the endogenous regressors +on the full set of instruments, +the redundant instruments have statistically insignificant coefficients; +or the partial correlations between the endogenous regressors +and the instruments in question are zero. +{cmd:ivreg29} uses a formulation based on testing the rank +of the matrix cross-product between the endogenous regressors +and the possibly-redundant instruments after both have +all other instruments partialled-out; +{cmd:ranktest} is used to test whether the matrix has zero rank. +The test statistic is an LM test +and numerically equivalent to a regression-based LM test. +Under the null that the specified instruments are redundant, +the statistic is distributed as chi-squared +with degrees of freedom=(#endogenous regressors)*(#instruments tested). +Rejection of the null indicates that +the instruments are not redundant. +When the i.i.d. assumption is dropped +and {cmd:ivreg29} reports heteroskedastic, AC, HAC +or cluster-robust statistics, +the redundancy test statistic is similarly robust. +See Baum et al. (2007) for further discussion. + +{p}Calculation and reporting of all underidentification +and weak identification statistics +can be supressed with the {cmd:noid} option. + +{marker s_first}{dlgtab:First-stage regressions, identification, and weak-id-robust inference} + +{marker apstats}{p}The {cmd:first} and {cmd:ffirst} options report +various first-stage results and identification statistics. +Tests of both underidentification and weak identification are reported +for each endogenous regressor separately, +using the method described by Angrist and Pischke (2009), pp. 217-18 +(see also the note on their "Mostly Harmless Econometrics" +{browse "http://www.mostlyharmlesseconometrics.com/2009/10/multivariate-first-stage-f-not/" :blog}. + +{p}The Angrist-Pischke (AP) first-stage chi-squared and F statistics +are tests of underidentification and weak identification, respectively, +of individual endogenous regressors. +They are constructed by "partialling-out" linear projections of the +remaining endogenous regressors. +The AP chi-squared Wald statistic is distributed as chi2(L1-K1+1)) +under the null that the particular endogenous regressor +in question is unidentified. +In the special case of a single endogenous regressor, +the AP statistic reported is identical to underidentification statistics reported +in the {cmd:ffirst} output, +namely the Cragg-Donald Wald statistic (if i.i.d.) +or the Kleibergen-Paap rk Wald statistic (if robust, cluster-robust, AC or HAC +statistics have been requested); +see {help ivreg29##idtest:above}. +Note the difference in the null hypotheses if there are two or more endogenous regressors: +the AP test will fail to reject if a particular endogenous regressor is unidentified, +whereas the Anderson/Cragg-Donald/Kleibergen-Paap tests of underidentification +will fail to reject if {it:any} of the endogenous regressors is unidentified. + +{p}The AP first-stage F statistic is the F form of the same test statistic. +It can be used as a diagnostic for whether a particular endogenous regressor +is "weakly identified" (see {help ivreg29##widtest:above}). +Critical values for the AP first-stage F as a test of weak identification are not available, +but the test statistic can be compared to the Stock-Yogo (2002, 2005) critical +values for the Cragg-Donald F statistic with K1=1. + +{p}The first-stage results are always reported with small-sample statistics, +to be consistent with the recommended use of the first-stage F-test as a diagnostic. +If the estimated equation is reported with robust standard errors, +the first-stage F-test is also robust. + +{p}A full set of first-stage statistics for each of the K1 endogenous regressors +is saved in the matrix e(first). +These include (a) the AP F and chi-squared statistics; (b) the "partial R-squared" +(squared partial correlation) corresponding to the AP statistics; +(c) Shea's (1997) partial R-squared measure (closely related to the AP statistic, +but not amenable to formal testing); (d) the simple F and partial R-squared +statistics for each of the first-stage equations, +with no adjustments if there is more than one endogenous regressor. +In the special case of a single endogenous regressor, +these F statistics and partial R-squareds are identical. + +{marker wirobust}{p}The first-stage output also includes +two statistics that provide weak-instrument robust inference +for testing the significance of the endogenous regressors in the structural equation being estimated. +The first statistic is the Anderson-Rubin (1949) test +(not to be confused with the Anderson-Rubin overidentification test for LIML estimation; +see {help ivreg29##s_liml:above}). +The second is the closely related Stock-Wright (2000) S statistic. +The null hypothesis tested in both cases is that +the coefficients of the endogenous regressors in the structural equation are jointly equal to zero, +and, in addition, that the overidentifying restrictions are valid. +Both tests are robust to the presence of weak instruments. +The tests are equivalent to estimating the reduced form of the equation +(with the full set of instruments as regressors) +and testing that the coefficients of the excluded instruments are jointly equal to zero. +In the form reported by {cmd:ivreg29},the Anderson-Rubin statistic is a Wald test +and the Stock-Wright S statistic is a GMM-distance test. +Both statistics are distributed as chi-squared with L1 degrees of freedom, +where L1=number of excluded instruments. +The traditional F-stat version of the Anderson-Rubin test is also reported. +See Stock and Watson (2000), Dufour (2003), Chernozhukov and Hansen (2005) and Kleibergen (2007) +for further discussion. +For related alternative test statistics that are also robust to weak instruments, +see {help condivreg} and {help rivtest}, +and the corresponding discussions +in Moreira and Poi (2003) and Mikusheva and Poi (2006), +and in Finlay and Magnusson (2009), respectively. + +{p}The {cmd:savefirst} option requests that the individual first-stage regressions +be saved for later access using the {cmd:estimates} command. +If saved, they can also be displayed using {cmd:first} or {cmd:ffirst} and the {cmd:ivreg29} replay syntax. +The regressions are saved with the prefix "_ivreg29_", +unless the user specifies an alternative prefix with the +{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} option. + +{marker s_rf}{dlgtab:Reduced form estimates} + +{p}The {cmd:rf} option requests that the reduced form estimation of the equation be displayed. +The {cmd:saverf} option requests that the reduced form estimation is saved +for later access using the {cmd:estimates} command. +If saved, it can also be displayed using the {cmd:rf} and the {cmd:ivreg29} replay syntax. +The regression is saved with the prefix "_ivreg29_", +unless the user specifies an alternative prefix with the +{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} option. + +{marker s_partial}{dlgtab:Partialling-out exogenous regressors} + +{marker partial}{p}The {cmd:partial(}{it:varlist}{cmd:)} option requests that +the exogenous regressors in {it:varlist} are "partialled out" +from all the other variables (other regressors and excluded instruments) in the estimation. +If the equation includes a constant, it is also automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} are not calculated. +By the Frisch-Waugh-Lovell (FWL) theorem, in IV, +two-step GMM and LIML estimation the coefficients for the remaining regressors +are the same as those that would be obtained if the variables were not partialled out. +(NB: this does not hold for CUE or GMM iterated more than two steps.) +The {cmd:partial} option is most useful when using {cmd:cluster} +and #clusters < (#exogenous regressors + #excluded instruments). +In these circumstances, +the covariance matrix of orthogonality conditions S is not of full rank, +and efficient GMM and overidentification tests are infeasible +since the optimal weighting matrix W = {bind:S^-1} +cannot be calculated. +The problem can be addressed by using {cmd:partial} +to partial out enough exogenous regressors for S to have full rank. +A similar problem arises when the regressors include a variable that is a singleton dummy, +i.e., a variable with one 1 and N-1 zeros or vice versa, +if a robust covariance matrix is requested. +The singleton dummy causes the robust covariance matrix estimator +to be less than full rank. +In this case, partialling-out the variable with the singleton dummy solves the problem. +Specifying {cmd:partial(_cons)} will cause just the constant to be partialled-out, +i.e., the equation will be estimated in deviations-from-means form. +When {cmd:ivreg29} is invoked with {cmd:partial}, +it reports test statistics with the same small-sample adjustments +as if estimating without {cmd:partial}. +Note that after estimation using the {cmd:partial} option, +the post-estimation {cmd:predict} can be used only to generate residuals, +and that in the current implementation, +{cmd:partial} is not compatible with endogenous variables or instruments (included or excluded) +that use time-series operators. + +{marker s_ols}{dlgtab:OLS and Heteroskedastic OLS (HOLS) estimation} + +{p}{cmd:ivreg29} also allows straightforward OLS estimation +by using the same syntax as {cmd:regress}, i.e., +{it:ivreg29 depvar varlist1}. +This can be useful if the user wishes to use one of the +features of {cmd:ivreg29} in OLS regression, e.g., AC or +HAC standard errors. + +{p}If the list of endogenous variables {it:varlist2} is empty +but the list of excluded instruments {it:varlist_iv} is not, +and the option {cmd:gmm2s} is specified, +{cmd:ivreg29} calculates Cragg's "heteroskedastic OLS" (HOLS) estimator, +an estimator that is more efficient than OLS +in the presence of heteroskedasticity of unknown form +(see Davidson and MacKinnon (1993), pp. 599-600). +If the option {cmd:bw(}{it:#}{cmd:)} is specified, +the HOLS estimator is efficient in the presence of +arbitrary autocorrelation; +if both {cmd:bw(}{it:#}{cmd:)} and {cmd:robust} are specified +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and autocorrelation; +and if {cmd:cluster(}{it:varname}{cmd:)} is used, +the HOLS estimator is efficient in the presence of +arbitrary heteroskedasticity and within-group correlation. +The efficiency gains of HOLS derive from the orthogonality conditions +of the excluded instruments listed in {it:varlist_iv}. +If no endogenous variables are specified and {cmd:gmm2s} is not specified, +{cmd:ivreg29} reports standard OLS coefficients. +The Sargan-Hansen statistic reported +when the list of endogenous variables {it:varlist2} is empty +is a Lagrange multiplier (LM) test +of the hypothesis that the excluded instruments {it:varlist_iv} are +correctly excluded from the restricted model. +If the estimation is LIML, the LM statistic reported +is now based on the Sargan-Hansen test statistics from +the restricted and unrestricted equation. +For more on LM tests, see e.g. Wooldridge (2002), pp. 58-60. +Note that because the approach of the HOLS estimator +has applications beyond heteroskedastic disturbances, +and to avoid confusion concerning the robustness of the estimates, +the estimators presented above as "HOLS" +are described in the output of {cmd:ivreg29} +as "2-Step GMM", "CUE", etc., as appropriate. + +{marker s_collin}{dlgtab:Collinearities} + +{p}{cmd:ivreg29} checks the lists of included instruments, +excluded instruments, and endogenous regressors +for collinearities and duplicates. If an endogenous regressor is +collinear with the instruments, it is reclassified as exogenous. If any +endogenous regressors are collinear with each other, some are dropped. +If there are any collinearities among the instruments, some are dropped. +In Stata 9+, excluded instruments are dropped before included instruments. +If any variables are dropped, a list of their names are saved +in the macros {cmd:e(collin)} and/or {cmd:e(dups)}. +Lists of the included and excluded instruments +and the endogenous regressors with collinear variables and duplicates removed +are also saved in macros with "1" appended +to the corresponding macro names. + +{p}Collinearity checks can be supressed with the {cmd:nocollin} option. + +{marker s_speed}{dlgtab:Speed options: nocollin and noid} + +{p}Two options are available for speeding execution. +{cmd:nocollin} specifies that the collinearity checks not be performed. +{cmd:noid} suspends calculation and reporting of +the underidentification and weak identification statistics +in the main output. + +{marker s_small}{dlgtab:Small sample corrections} + +{p}Mean square error = sqrt(RSS/(N-K)) if {cmd:small}, = sqrt(RSS/N) otherwise. + +{p}If {cmd:robust} is chosen, the finite sample adjustment +(see {hi:[R] regress}) to the robust variance-covariance matrix +qc = N/(N-K) if {cmd:small}, qc = 1 otherwise. + +{p}If {cmd:cluster} is chosen, the finite sample adjustment +qc = (N-1)/(N-K)*M/(M-1) if {cmd:small}, where M=number of clusters, +qc = 1 otherwise. + +{p}The Sargan and C (difference-in-Sargan) statistics use +error variance = RSS/N, i.e., there is no small sample correction. + +{p}A full discussion of these computations and related topics +can be found in Baum, Schaffer, and Stillman (2003) and Baum, Schaffer and +Stillman (2007). Some features of the program postdate the former article and are described in the latter paper. + + +{marker s_options}{title:Options summary} + +{p 0 4}{cmd:gmm2s} requests the two-step efficient GMM estimator. +If no endogenous variables are specified, the estimator is Cragg's HOLS estimator. + +{p 0 4}{cmd:liml} requests the limited-information maximum likelihood estimator. + +{p 0 4}{cmd:fuller(}{it:#}{cmd:)} specifies that Fuller's modified LIML estimator +is calculated using the user-supplied Fuller parameter alpha, +a non-negative number. +Alpha=1 has been suggested as a good choice. + +{p 0 4}{cmd:kclass(}{it:#}{cmd:)} specifies that a general k-class estimator is calculated +using the user-supplied #, a non-negative number. + +{p 0 4}{cmd:coviv} specifies that the matrix used to calculate the +covariance matrix for the LIML or k-class estimator +is based on the 2SLS matrix, i.e., with k=1. +In this case the covariance matrix will differ from that calculated for the 2SLS +estimator only because the estimate of the error variance will differ. +The default is for the covariance matrix to be based on the LIML or k-class matrix. + +{p 0 4}{cmd:cue} requests the GMM continuously-updated estimator (CUE). + +{p 0 4}{cmd:cueinit(}{it:matrix}{cmd:)} specifies that the starting values +for the CUE estimator use those in a user-supplied matrix b. +If omitted, the default behavior is to use starting values +from IV or 2-step efficient GMM estimation. + +{p 0 4}{cmd:cueopt(}{it:string}{cmd:)} passes user-specified options +to Stata's {cmd:ml} routine; see help {help ml}. + +{p 0 4}{cmd:b0(}{it:matrix}{cmd:)} specifies that the J statistic +(i.e., the value of the CUE objective function) +should be calculated for an arbitrary coefficient vector {cmd:b0}. +That vector must be provided as a matrix with appropriate row and column names. +Under- and weak-identification statistics are not reported +in the output. + +{p 0 4}{cmd:robust} specifies that the Eicker/Huber/White/sandwich estimator of +variance is to be used in place of the traditional calculation. {cmd:robust} +combined with {cmd:cluster()} further allows residuals which are not +independent within cluster (although they must be independent between +clusters). See {hi:[U] Obtaining robust variance estimates}. + +{p 0 4}{cmd:cluster}{cmd:(}{it:varname}{cmd:)} specifies that the observations +are independent across groups (clusters) but not necessarily independent +within groups. {it:varname} specifies to which group each observation +belongs; e.g., {cmd:cluster(personid)} in data with repeated observations on +individuals. {cmd:cluster()} can be used with {help pweight}s to produce +estimates for unstratified cluster-sampled data, but see help {help svyreg} +for a command especially designed for survey data. Specifying {cmd:cluster()} +implies {cmd:robust}. + +{p 0 4}{cmd:bw(}{it:#}{cmd:)} impements AC or HAC covariance estimation +with bandwidth equal to {it:#}, where {it:#} is an integer greater than zero. +Specifying {cmd:robust} implements HAC covariance estimation; +omitting it implements AC covariance estimation. +If the Bartlett (default), Parzen or Quadratic Spectral kernels are selected, +the value {cmd:auto} may be given (rather than an integer) +to invoke Newey and West's (1994) automatic bandwidth selection procedure. + +{p 0 4}{cmd:kernel(}{it:string)}{cmd:)} specifies the kernel +to be used for AC and HAC covariance estimation; +the default kernel is Bartlett (also known in econometrics +as Newey-West). Other kernels available are (abbreviations in parentheses): +Truncated (tru); Parzen (par); Tukey-Hanning (thann); Tukey-Hamming (thamm); +Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs). + +{p 4 4}Note: in the cases of the Bartlett, Parzen, +and Tukey-Hanning/Hamming kernels, the number of lags used +to construct the kernel estimate equals the bandwidth minus one. +Stata's official {cmd:newey} implements +HAC standard errors based on the Bartlett kernel, +and requires the user to specify +the maximum number of lags used and not the bandwidth; +see help {help newey}. +If these kernels are used with {cmd:bw(1)}, +no lags are used and {cmd:ivreg29} will report the usual +Eicker/Huber/White/sandwich variance estimates. + +{p 0 4}{cmd:wmatrix(}{it:matrix}{cmd:)} specifies a user-supplied weighting matrix +in place of the computed optimal weighting matrix. +The matrix must be positive definite. +The user-supplied matrix must have the same row and column names +as the instrument variables in the regression model (or a subset thereof). + +{p 0 4}{cmd:smatrix(}{it:matrix}{cmd:)} specifies a user-supplied covariance matrix +of the orthogonality conditions to be used in calculating the covariance matrix of the estimator. +The matrix must be positive definite. +The user-supplied matrix must have the same row and column names +as the instrument variables in the regression model (or a subset thereof). + +{p 0 4}{cmd:orthog}{cmd:(}{it:varlist_ex}{cmd:)} requests that a C-statistic +be calculated as a test of the exogeneity of the instruments in {it:varlist_ex}. +These may be either included or excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instruments must still be identified. + +{p 0 4}{cmd:endog}{cmd:(}{it:varlist_en}{cmd:)} requests that a C-statistic +be calculated as a test of the endogeneity +of the endogenous regressors in {it:varlist_en}. + +{p 0 4}{cmd:redundant}{cmd:(}{it:varlist_ex}{cmd:)} requests an LM test +of the redundancy of the instruments in {it:varlist_ex}. +These must be excluded exogenous variables. +The standard order condition for identification applies: +the restricted equation that does not use these variables +as exogenous instrumenst must still be identified. + +{p 0 4}{cmd:small} requests that small-sample statistics (F and t-statistics) +be reported instead of large-sample statistics (chi-squared and z-statistics). +Large-sample statistics are the default. +The exception is the statistic for the significance of the regression, +which is always reported as a small-sample F statistic. + +{p 0 4}{cmd:noconstant} suppresses the constant term (intercept) in the +regression. If {cmd:noconstant} is specified, the constant term is excluded +from both the final regression and the first-stage regression. To include a +constant in the first-stage when {cmd:noconstant} is specified, explicitly +include a variable containing all 1's in {it:varlist_iv}. + +{p 0 4}{cmd:first} requests that the full first-stage regression results be displayed, +along with the associated diagnostic and identification statistics. + +{p 0 4}{cmd:ffirst} requests the first-stage diagnostic and identification statistics. +The results are saved in various e() macros. + +{p 0 4}{cmd:nocollin} suppresses the checks for collinearities +and duplicate variables. + +{p 0 4}{cmd:noid} suppresses the calculation and reporting +of underidentification and weak identification statistics. + +{p 0 4}{cmd:savefirst} requests that the first-stage regressions results +are saved for later access using the {cmd:estimates} command. +The names under which the first-stage regressions are saved +are the names of the endogenous regressors prefixed by "_ivreg29_". +If these use Stata's time-series operators, +the "." is replaced by a "_". +The maximum number of first-stage estimation results that can be saved +depends on how many other estimation results the user has already saved +and on the maximum supported by Stata (300 for Stata 9.1). + +{p 0 4}{cmdab:savefp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the first-stage regression results be saved using the user-specified prefix +instead of the default "_ivreg29_". + +{p 0 4}{cmd:rf} requests that the reduced-form estimation of the equation +be displayed. + +{p 0 4}{cmd:saverf} requests that the reduced-form estimation of the equation +be saved for later access using the {cmd:estimates} command. +The estimation is stored under the name of the dependent variable +prefixed by "_ivreg29_". +If this uses Stata's time-series operators, +the "." is replaced by a "_". + +{p 0 4}{cmdab:saverfp:refix}{cmd:(}{it:prefix}{cmd:)} requests that +the reduced-form estimation be saved using the user-specified prefix +instead of the default "_ivreg29_". + +{p 0 4}{cmd:partial(}{it:varlist}{cmd:)} requests that +the exogenous regressors in {it:varlist} be partialled out +from the other variables in the equation. +If the equation includes a constant, +it is automatically partialled out as well. +The coefficients corresponding to the regressors in {it:varlist} +are not calculated. + +{p 0 4}{cmd:level(}{it:#}{cmd:)} specifies the confidence level, in percent, +for confidence intervals of the coefficients; see help {help level}. + +{p 0 4}{cmd:noheader}, {cmd:eform()}, {cmd:depname()} and {cmd:plus} +are for ado-file writers; see {hi:[R] ivreg} and {hi:[R] regress}. + +{p 0 4}{cmd:nofooter} suppresses the display of the footer containing +identification and overidentification statistics, +exogeneity and endogeneity tests, +lists of endogenous variables and instruments, etc. + +{p 0 4}{cmd:version} causes {cmd:ivreg29} to display its current version number +and to leave it in the macro {cmd:e(version)}. +It cannot be used with any other options. +and will clear any existing {cmd:e()} saved results. + +{marker s_macros}{title:Remarks and saved results} + +{p}{cmd:ivreg29} does not report an ANOVA table. +Instead, it reports the RSS and both the centered and uncentered TSS. +It also reports both the centered and uncentered R-squared. +NB: the TSS and R-squared reported by official {cmd:ivreg} is centered +if a constant is included in the regression, and uncentered otherwise. + +{p}{cmd:ivreg29} saves the following results in {cmd:e()}: + +Scalars +{col 4}{cmd:e(N)}{col 18}Number of observations +{col 4}{cmd:e(yy)}{col 18}Total sum of squares (SS), uncentered (y'y) +{col 4}{cmd:e(yyc)}{col 18}Total SS, centered (y'y - ((1'y)^2)/n) +{col 4}{cmd:e(rss)}{col 18}Residual SS +{col 4}{cmd:e(mss)}{col 18}Model SS =yyc-rss if the eqn has a constant, =yy-rss otherwise +{col 4}{cmd:e(df_m)}{col 18}Model degrees of freedom +{col 4}{cmd:e(df_r)}{col 18}Residual degrees of freedom +{col 4}{cmd:e(r2u)}{col 18}Uncentered R-squared, 1-rss/yy +{col 4}{cmd:e(r2c)}{col 18}Centered R-squared, 1-rss/yyc +{col 4}{cmd:e(r2)}{col 18}Centered R-squared if the eqn has a constant, uncentered otherwise +{col 4}{cmd:e(r2_a)}{col 18}Adjusted R-squared +{col 4}{cmd:e(ll)}{col 18}Log likelihood +{col 4}{cmd:e(rankxx)}{col 18}Rank of the matrix of observations on rhs variables=K +{col 4}{cmd:e(rankzz)}{col 18}Rank of the matrix of observations on instruments=L +{col 4}{cmd:e(rankV)}{col 18}Rank of covariance matrix V of coefficients +{col 4}{cmd:e(rankS)}{col 18}Rank of covariance matrix S of orthogonality conditions +{col 4}{cmd:e(rmse)}{col 18}root mean square error=sqrt(rss/(N-K)) if -small-, =sqrt(rss/N) if not +{col 4}{cmd:e(F)}{col 18}F statistic +{col 4}{cmd:e(N_clust)}{col 18}Number of clusters +{col 4}{cmd:e(bw)}{col 18}Bandwidth +{col 4}{cmd:e(lambda)}{col 18}LIML eigenvalue +{col 4}{cmd:e(kclass)}{col 18}k in k-class estimation +{col 4}{cmd:e(fuller)}{col 18}Fuller parameter alpha +{col 4}{cmd:e(sargan)}{col 18}Sargan statistic +{col 4}{cmd:e(sarganp)}{col 18}p-value of Sargan statistic +{col 4}{cmd:e(sargandf)}{col 18}dof of Sargan statistic = degree of overidentification = L-K +{col 4}{cmd:e(j)}{col 18}Hansen J statistic +{col 4}{cmd:e(jp)}{col 18}p-value of Hansen J statistic +{col 4}{cmd:e(jdf)}{col 18}dof of Hansen J statistic = degree of overidentification = L-K +{col 4}{cmd:e(arubin)}{col 18}Anderson-Rubin overidentification LR statistic N*ln(lambda) +{col 4}{cmd:e(arubinp)}{col 18}p-value of Anderson-Rubin overidentification LR statistic +{col 4}{cmd:e(arubin_lin)}{col 18}Anderson-Rubin linearized overidentification statistic N*(lambda-1) +{col 4}{cmd:e(arubin_linp)}{col 18}p-value of Anderson-Rubin linearized overidentification statistic +{col 4}{cmd:e(arubindf)}{col 18}dof of A-R overid statistic = degree of overidentification = L-K +{col 4}{cmd:e(idstat)}{col 18}LM test statistic for underidentification (Anderson or Kleibergen-Paap) +{col 4}{cmd:e(idp)}{col 18}p-value of underidentification LM statistic +{col 4}{cmd:e(iddf)}{col 18}dof of underidentification LM statistic +{col 4}{cmd:e(widstat)}{col 18}F statistic for weak identification (Cragg-Donald or Kleibergen-Paap) +{col 4}{cmd:e(arf)}{col 18}Anderson-Rubin F-test of significance of endogenous regressors +{col 4}{cmd:e(arfp)}{col 18}p-value of Anderson-Rubin F-test of endogenous regressors +{col 4}{cmd:e(archi2)}{col 18}Anderson-Rubin chi-sq test of significance of endogenous regressors +{col 4}{cmd:e(archi2p)}{col 18}p-value of Anderson-Rubin chi-sq test of endogenous regressors +{col 4}{cmd:e(ardf)}{col 18}degrees of freedom of Anderson-Rubin tests of endogenous regressors +{col 4}{cmd:e(ardf_r)}{col 18}denominator degrees of freedom of AR F-test of endogenous regressors +{col 4}{cmd:e(redstat)}{col 18}LM statistic for instrument redundancy +{col 4}{cmd:e(redp)}{col 18}p-value of LM statistic for instrument redundancy +{col 4}{cmd:e(reddf)}{col 18}dof of LM statistic for instrument redundancy +{col 4}{cmd:e(cstat)}{col 18}C-statistic +{col 4}{cmd:e(cstatp)}{col 18}p-value of C-statistic +{col 4}{cmd:e(cstatdf)}{col 18}Degrees of freedom of C-statistic +{col 4}{cmd:e(cons)}{col 18}1 when equation has a Stata-supplied constant; 0 otherwise +{col 4}{cmd:e(partialcons)}{col 18}as above but prior to partialling-out (see {cmd:e(partial)}) +{col 4}{cmd:e(partial_ct)}{col 18}Number of partialled-out variables (see {cmd:e(partial)}) + +Macros +{col 4}{cmd:e(cmd)}{col 18}ivreg29 +{col 4}{cmd:e(cmdline)}{col 18}Command line invoking ivreg29 +{col 4}{cmd:e(version)}{col 18}Version number of ivreg29 +{col 4}{cmd:e(model)}{col 18}ols, iv, gmm, liml, or kclass +{col 4}{cmd:e(depvar)}{col 18}Name of dependent variable +{col 4}{cmd:e(instd)}{col 18}Instrumented (RHS endogenous) variables +{col 4}{cmd:e(insts)}{col 18}Instruments +{col 4}{cmd:e(inexog)}{col 18}Included instruments (regressors) +{col 4}{cmd:e(exexog)}{col 18}Excluded instruments +{col 4}{cmd:e(collin)}{col 18}Variables dropped because of collinearities +{col 4}{cmd:e(dups)}{col 18}Duplicate variables +{col 4}{cmd:e(ecollin)}{col 18}Endogenous variables reclassified as exogenous because of +{col 20}collinearities with instruments +{col 4}{cmd:e(clist)}{col 18}Instruments tested for orthogonality +{col 4}{cmd:e(redlist)}{col 18}Instruments tested for redundancy +{col 4}{cmd:e(partial)}{col 18}Partialled-out exogenous regressors +{col 4}{cmd:e(small)}{col 18}small +{col 4}{cmd:e(wtype)}{col 18}weight type +{col 4}{cmd:e(wexp)}{col 18}weight expression +{col 4}{cmd:e(clustvar)}{col 18}Name of cluster variable +{col 4}{cmd:e(vcetype)}{col 18}Covariance estimation method +{col 4}{cmd:e(kernel)}{col 18}Kernel +{col 4}{cmd:e(tvar)}{col 18}Time variable +{col 4}{cmd:e(ivar)}{col 18}Panel variable +{col 4}{cmd:e(firsteqs)}{col 18}Names of stored first-stage equations +{col 4}{cmd:e(rfeq)}{col 18}Name of stored reduced-form equation +{col 4}{cmd:e(predict)}{col 18}Program used to implement predict + +Matrices +{col 4}{cmd:e(b)}{col 18}Coefficient vector +{col 4}{cmd:e(V)}{col 18}Variance-covariance matrix of the estimators +{col 4}{cmd:e(S)}{col 18}Covariance matrix of orthogonality conditions +{col 4}{cmd:e(W)}{col 18}GMM weighting matrix (=inverse of S if efficient GMM estimator) +{col 4}{cmd:e(first)}{col 18}First-stage regression results +{col 4}{cmd:e(ccev)}{col 18}Eigenvalues corresponding to the Anderson canonical correlations test +{col 4}{cmd:e(cdev)}{col 18}Eigenvalues corresponding to the Cragg-Donald test + +Functions +{col 4}{cmd:e(sample)}{col 18}Marks estimation sample + + + +{marker s_examples}{title:Examples} + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta" : . use http://fmwww.bc.edu/ec-p/data/hayashi/griliches76.dta }{p_end} +{p 8 12}(Wages of Very Young Men, Zvi Griliches, J.Pol.Ec. 1976) + +{p 8 12}{stata "xi i.year" : . xi i.year} + +{col 0}(Instrumental variables. Examples follow Hayashi 2000, p. 255.) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt)} + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), small ffirst" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), small ffirst} + +{col 0}(Testing for the presence of heteroskedasticity in IV/GMM estimation) + +{p 8 12}{stata "ivhettest, fitlev" : . ivhettest, fitlev} + +{col 0}(Two-step GMM efficient in the presence of arbitrary heteroskedasticity) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust} + +{p 0}(GMM with user-specified first-step weighting matrix or matrix of orthogonality conditions) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), robust" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), robust} + +{p 8 12}{stata "predict double uhat if e(sample), resid" : . predict double uhat if e(sample), resid} + +{p 8 12}{stata "mat accum S = `e(insts)' [iw=uhat^2]" : . mat accum S = `e(insts)' [iw=uhat^2]} + +{p 8 12}{stata "mat S = 1/`e(N)' * S" : . mat S = 1/`e(N)' * S} + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust smatrix(S)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust smatrix(S)} + +{p 8 12}{stata "mat W = invsym(S)" : . mat W = invsym(S)} + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust wmatrix(W)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s robust wmatrix(W)} + +{p 0}(Equivalence of J statistic and Wald tests of included regressors, irrespective of instrument choice (Ahn, 1997)) + +{p 8 12}{stata "ivreg29 lw (iq=med kww age), gmm2s" : . ivreg29 lw (iq=med kww age), gmm2s} + +{p 8 12}{stata "mat S0 = e(S)" : . mat S0 = e(S)} + +{p 8 12}{stata "qui ivreg29 lw (iq=kww) med age, gmm2s smatrix(S0)" : . qui ivreg29 lw (iq=kww) med age, gmm2s smatrix(S0)} + +{p 8 12}{stata "test med age" : . test med age} + +{p 8 12}{stata "qui ivreg29 lw (iq=med) kww age, gmm2s smatrix(S0)" : . qui ivreg29 lw (iq=med) kww age, gmm2s smatrix(S0)} + +{p 8 12}{stata "test kww age" : . test kww age} + +{p 8 12}{stata "qui ivreg29 lw (iq=age) med kww, gmm2s smatrix(S0)" : . qui ivreg29 lw (iq=age) med kww, gmm2s smatrix(S0)} + +{p 8 12}{stata "test med kww" : . test med kww} + +{p 0}(Continuously-updated GMM (CUE) efficient in the presence of arbitrary heteroskedasticity. NB: may require 50+ iterations.) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust} + +{p 0}(Continuously-updated GMM (CUE) with ml options) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust cueopt(technique(dfp))" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), cue robust cueopt(technique(dfp))} + +{col 0}(Sargan-Basmann tests of overidentifying restrictions for IV estimation) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt)} + +{p 8 12}{stata "overid, all" : . overid, all} + +{col 0}(Tests of exogeneity and endogeneity) + +{col 0}(Test the exogeneity of one regressor) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(s)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(s)} + +{col 0}(Test the exogeneity of two excluded instruments) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(age mrt)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age mrt), gmm2s orthog(age mrt)} + +{col 0}(Frisch-Waugh-Lovell (FWL): equivalence of estimations with and without partialling-out) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns _I* (iq=kww age), cluster(year)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age), cluster(year)} + +{p 8 12}{stata "ivreg29 lw s expr tenure rns _I* (iq=kww age), cluster(year) partial(_I*)" : . ivreg29 lw s expr tenure rns smsa _I* (iq=med kww age), cluster(year) partial(_I*)} + +{col 0}({cmd:partial()}: efficient GMM with #clusters<#instruments feasible after partialling-out) + +{p 8 12}{stata "ivreg29 lw s expr tenure rns _I* (iq=kww age), cluster(year) partial(_I*) gmm2s" : . ivreg29 lw s expr tenure rns smsa (iq=med kww age), cluster(year) partial(_I*) gmm2s} + +{col 0}(Examples following Wooldridge 2002, pp.59, 61) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta" : . use http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta } + +{col 0}(Equivalence of DWH endogeneity test when regressor is endogenous...) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6)" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6)} + +{p 8 12}{stata "ivendog educ" :. ivendog educ} + +{col 0}(... endogeneity test using the {cmd:endog} option) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), endog(educ)} + +{col 0}(...and C-test of exogeneity when regressor is exogenous, using the {cmd:orthog} option) + +{p 8 12}{stata "ivreg29 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)" : . ivreg29 lwage exper expersq educ (=age kidslt6 kidsge6), orthog(educ)} + +{col 0}(Heteroskedastic Ordinary Least Squares, HOLS) + +{p 8 12}{stata "ivreg29 lwage exper expersq educ (=age kidslt6 kidsge6), gmm2s" : . ivreg29 lwage exper expersq educ (=age kidslt6 kidsge6), gmm2s} + +{col 0}(Equivalence of Cragg-Donald Wald F statistic and F-test from first-stage regression +{col 0}in special case of single endogenous regressor. Also illustrates {cmd:savefirst} option.) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), savefirst" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), savefirst} + +{p 8 12}{stata "di e(widstat)" : . di e(widstat)} + +{p 8 12}{stata "estimates restore _ivreg29_educ" : . estimates restore _ivreg29_educ} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Equivalence of Kleibergen-Paap robust rk Wald F statistic and F-test from first-stage +{col 0}regression in special case of single endogenous regressor.) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust savefirst" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust savefirst} + +{p 8 12}{stata "di e(widstat)" : . di e(widstat)} + +{p 8 12}{stata "estimates restore _ivreg29_educ" : . estimates restore _ivreg29_educ} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Equivalence of Kleibergen-Paap robust rk LM statistic for identification and LM test +{col 0}of joint significance of excluded instruments in first-stage regression in special +{col 0}case of single endogenous regressor. Also illustrates use of {cmd:ivreg29} to perform an +{col 0}LM test in OLS estimation.) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust} + +{p 8 12}{stata "di e(idstat)" : . di e(idstat)} + +{p 8 12}{stata "ivreg29 educ exper expersq (=age kidslt6 kidsge6) if e(sample), robust" : . ivreg29 educ exper expersq (=age kidslt6 kidsge6) if e(sample), robust} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(Equivalence of an LM test of an excluded instrument for redundancy and an LM test of +{col 0}significance from first-stage regression in special case of single endogenous regressor.) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust redundant(age)" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust redundant(age)} + +{p 8 12}{stata "di e(redstat)" : . di e(redstat)} + +{p 8 12}{stata "ivreg29 educ exper expersq kidslt6 kidsge6 (=age) if e(sample), robust" : . ivreg29 educ exper expersq kidslt6 kidsge6 (=age) if e(sample), robust} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(Weak-instrument robust inference: Anderson-Rubin Wald F and chi-sq and +{col 0}Stock-Wright S statistics. Also illusrates use of {cmd:saverf} option.) + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust ffirst saverf" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust ffirst saverf} + +{p 8 12}{stata "di e(arf)" : . di e(arf)} + +{p 8 12}{stata "di e(archi2)" : . di e(archi2)} + +{p 8 12}{stata "di e(sstat)" : . di e(sstat)} + +{col 0}(Obtaining the Anderson-Rubin Wald F statistic from the reduced-form estimation) + +{p 8 12}{stata "estimates restore _ivreg29_lwage" : . estimates restore _ivreg29_lwage} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(F)" : . di r(F)} + +{col 0}(Obtaining the Anderson-Rubin Wald chi-sq statistic from the reduced-form estimation. +{col 0}Use {cmd:ivreg29} without {cmd:small} to obtain large-sample test statistic.) + +{p 8 12}{stata "ivreg29 lwage exper expersq age kidslt6 kidsge6, robust" : . ivreg29 lwage exper expersq age kidslt6 kidsge6, robust} + +{p 8 12}{stata "test age kidslt6 kidsge6" : . test age kidslt6 kidsge6} + +{p 8 12}{stata "di r(chi2)" : . di r(chi2)} + +{col 0}(Obtaining the Stock-Wright S statistic as the value of the GMM CUE objective function. +{col 0}Also illustrates use of {cmd:b0} option. Coefficients on included exogenous regressors +{col 0}are OLS coefficients, which is equivalent to partialling them out before obtaining +{col 0}the value of the CUE objective function.) + +{p 8 12}{stata "mat b = 0" : . mat b = 0} + +{p 8 12}{stata "mat colnames b = educ" : . mat colnames b = educ} + +{p 8 12}{stata "qui ivreg29 lwage exper expersq" : . qui ivreg29 lwage exper expersq} + +{p 8 12}{stata "mat b = b, e(b)" : . mat b = b, e(b)} + +{p 8 12}{stata "ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust b0(b)" : . ivreg29 lwage exper expersq (educ=age kidslt6 kidsge6), robust b0(b)} + +{p 8 12}{stata "di e(j)" : . di e(j)} + +{col 0}(LIML and k-class estimation using Klein data) + +{col 9}{stata "use http://fmwww.bc.edu/repec/bocode/k/kleinI" :. use http://fmwww.bc.edu/repec/bocode/k/kleinI} + +{col 0}(LIML estimates of Klein's consumption function) + +{p 8 12}{stata "ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml" :. ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml} + +{col 0}(Equivalence of LIML and CUE+homoskedasticity+independence) + +{p 8 12}{stata "ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml coviv" :. ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), liml coviv} + +{p 8 12}{stata "ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), cue" :. ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), cue} + +{col 0}(Fuller's modified LIML with alpha=1) + +{p 8 12}{stata "ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), fuller(1)" :. ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), fuller(1)} + +{col 0}(k-class estimation with Nagar's bias-adjusted IV, k=1+(L-K)/N=1+4/21=1.19) + +{p 8 12}{stata "ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), kclass(1.19)" :. ivreg29 consump L.profit (profit wages = govt taxes trend wagegovt capital1 L.demand), kclass(1.19)} + +{col 0}(Kernel-based covariance estimation using time-series data) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta" :. use http://fmwww.bc.edu/ec-p/data/wooldridge/phillips.dta} + +{p 8 12}{stata "tsset year, yearly" :. tsset year, yearly} + +{col 0}(Autocorrelation-consistent (AC) inference in an OLS Regression) + +{p 8 12}{stata "ivreg29 cinf unem, bw(3)" :. ivreg29 cinf unem, bw(3)} + +{p 8 12}{stata "ivreg29 cinf unem, kernel(qs) bw(auto)" :. ivreg29 cinf unem, kernel(qs) bw(auto)} + +{col 0}(Heteroskedastic and autocorrelation-consistent (HAC) inference in an OLS regression) + +{p 8 12}{stata "ivreg29 cinf unem, bw(3) kernel(bartlett) robust small" :. ivreg29 cinf unem, bw(3) kernel(bartlett) robust small} + +{p 8 12}{stata "newey cinf unem, lag(2)" :. newey cinf unem, lag(2)} + +{col 0}(AC and HAC in IV and GMM estimation) + +{p 8 12}{stata "ivreg29 cinf (unem = l(1/3).unem), bw(3)" :. ivreg29 cinf (unem = l(1/3).unem), bw(3)} + +{p 8 12}{stata "ivreg29 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(thann)" :. ivreg29 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(thann)} + +{p 8 12}{stata "ivreg29 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(qs) robust orthog(l1.unem)" :. ivreg29 cinf (unem = l(1/3).unem), bw(3) gmm2s kernel(qs) robust orthog(l1.unem)} + +{col 0}(Examples using Large N, Small T Panel Data) + +{p 8 12}{stata "use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta" : . use http://fmwww.bc.edu/ec-p/data/macro/abdata.dta }{p_end} + +{p 8 12}{stata "tsset id year" :. tsset id year} + +{col 0}(Autocorrelation-consistent inference in an IV regression) + +{p 8 12}{stata "ivreg29 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(1) kernel(tru)": . ivreg29 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(1) kernel(tru)} + +{col 0}(Two-step effic. GMM in the presence of arbitrary heteroskedasticity and autocorrelation) + +{p 8 12}{stata "ivreg29 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(2) gmm2s kernel(tru) robust": . ivreg29 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), bw(2) gmm2s kernel(tru) robust} + +{col 0}(Two-step effic. GMM in the presence of arbitrary heterosked. and intra-group correlation) + +{p 8 12}{stata "ivreg29 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm2s cluster(id)": . ivreg29 n (w k ys = d.w d.k d.ys d2.w d2.k d2.ys), gmm2s cluster(id)} + + +{marker s_refs}{title:References} + +{p 0 4}Ahn, Seung C. 1997. Orthogonality tests in linear models. Oxford Bulletin +of Economics and Statistics, Vol. 59, pp. 183-186. + +{p 0 4}Anderson, T.W. 1951. Estimating linear restrictions on regression coefficients +for multivariate normal distributions. Annals of Mathematical Statistics, Vol. 22, pp. 327-51. + +{p 0 4}Anderson, T. W. and H. Rubin. 1949. Estimation of the parameters of a single equation +in a complete system of stochastic equations. Annals of Mathematical Statistics, Vol. 20, +pp. 46-63. + +{p 0 4}Anderson, T. W. and H. Rubin. 1950. The asymptotic properties of estimates of the parameters of a single +equation in a complete system of stochastic equations. Annals of Mathematical Statistics, +Vol. 21, pp. 570-82. + +{p 0 4}Angrist, J.D. and Pischke, J.-S. 2009. Mostly Harmless Ecnometrics: An Empiricist's Companion. +Princeton: Princeton University Press. + +{p 0 4}Baum, C.F., Schaffer, M.E., and Stillman, S. 2003. Instrumental Variables and GMM: +Estimation and Testing. The Stata Journal, Vol. 3, No. 1, pp. 1-31. +{browse "http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html":http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html}. +Working paper version: Boston College Department of Economics Working Paper No. 545. +{browse "http://ideas.repec.org/p/boc/bocoec/545.html":http://ideas.repec.org/p/boc/bocoec/545.html}. + +{p 0 4}Baum, C. F., Schaffer, M.E., and Stillman, S. 2007. Enhanced routines for instrumental variables/GMM estimation and testing. +The Stata Journal, Vol. 7, No. 4, pp. 465-506. +{browse "http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html":http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html}. +Working paper version: Boston College Department of Economics Working Paper No. 667. +{browse "http://ideas.repec.org/p/boc/bocoec/667.html":http://ideas.repec.org/p/boc/bocoec/667.html}. + +{p 0 4}Breusch, T., Qian, H., Schmidt, P. and Wyhowski, D. 1999. +Redundancy of moment conditions. +Journal of Econometrics, Vol. 9, pp. 89-111. + +{p 0 4}Chernozhukov, V. and Hansen, C. 2005. The Reduced Form: +A Simple Approach to Inference with Weak Instruments. +Working paper, University of Chicago, Graduate School of Business. + +{p 0 4}Cragg, J.G. and Donald, S.G. 1993. Testing Identfiability and Specification in +Instrumental Variables Models. Econometric Theory, Vol. 9, pp. 222-240. + +{p 0 4}Cushing, M.J. and McGarvey, M.G. 1999. Covariance Matrix Estimation. +In L. Matyas (ed.), Generalized Methods of Moments Estimation. +Cambridge: Cambridge University Press. + +{p 0 4}Davidson, R. and MacKinnon, J. 1993. Estimation and Inference in Econometrics. +1993. New York: Oxford University Press. + +{p 0 4}Dufour, J.M. 2003. Identification, Weak Instruments and Statistical Inference +in Econometrics. Canadian Journal of Economics, Vol. 36, No. 4, pp. 767-808. +Working paper version: CIRANO Working Paper 2003s-49. +{browse "http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf":http://www.cirano.qc.ca/pdf/publication/2003s-49.pdf}. + +{p 0 4}Finlay, K., and Magnusson, L.M. 2009. Implementing Weak-Instrument Robust Tests +for a General Class of Instrumental-Variables Models. + +{p 0 4}Hall, A.R., Rudebusch, G.D. and Wilcox, D.W. 1996. Judging Instrument Relevance in +Instrumental Variables Estimation. International Economic Review, Vol. 37, No. 2, pp. 283-298. +The Stata Journal, Vol. 9, No. 3, pp. 398-421. +{browse "http://www.stata-journal.com/article.html?article=st0171":http://www.stata-journal.com/article.html?article=st0171}. + +{p 0 4}Hayashi, F. Econometrics. 2000. Princeton: Princeton University Press. + +{p 0 4}Hansen, L.P., Heaton, J., and Yaron, A. 1996. Finite Sample Properties +of Some Alternative GMM Estimators. Journal of Business and Economic Statistics, Vol. 14, No. 3, pp. 262-280. + +{p 0 4}Kleibergen, F. 2007. Generalizing Weak Instrument Robust Statistics Towards Multiple Parameters, Unrestricted Covariance Matrices and Identification Statistics. Journal of Econometrics, forthcoming. + +{p 0 4}Kleibergen, F. and Paap, R. 2006. Generalized Reduced Rank Tests Using the Singular Value Decomposition. +Journal of Econometrics, Vol. 133, pp. 97-126. + +{p 0 4}Kleibergen, F. and Schaffer, M.E. 2007. ranktest: Stata module for testing the rank +of a matrix using the Kleibergen-Paap rk statistic. +{browse "http://ideas.repec.org/c/boc/bocode/s456865.html":http://ideas.repec.org/c/boc/bocode/s456865.html}. + +{p 0 4}Mikusheva, A. and Poi, B.P. 2006. +Tests and Confidence Sets with Correct Size When Instruments are Potentially Weak. The Stata Journal, Vol. 6, No. 3, pp. 335-347. + +{p 0 4}Moreira, M.J. and Poi, B.P. 2003. Implementing Tests with the Correct Size in the Simultaneous Equations Model. The Stata Journal, Vol. 3, No. 1, pp. 57-70. + +{p 0 4}Newey, W.K. and K.D. West, 1994. Automatic Lag Selection in Covariance Matrix Estimation. Review of Economic Studies, Vol. 61, No. 4, pp. 631-653. + +{p 0 4}Shea, J. 1997. Instrument Relevance in Multivariate Linear Models: +A Simple Measure. +Review of Economics and Statistics, Vol. 49, No. 2, pp. 348-352. + +{p 0 4}Stock, J.H. and Wright, J.H. 2000. GMM with Weak Identification. +Econometrica, Vol. 68, No. 5, September, pp. 1055-1096. + +{p 0 4}Stock, J.H. and Yogo, M. 2005. Testing for Weak Instruments in Linear IV Regression. In D.W.K. Andrews and J.H. Stock, eds. Identification and Inference for Econometric Models: Essays in Honor of Thomas Rothenberg. Cambridge: Cambridge University Press, 2005, pp. 80�108. +Working paper version: NBER Technical Working Paper 284. +{browse "http://www.nber.org/papers/T0284":http://www.nber.org/papers/T0284}. + +{p 0 4}Wooldridge, J.M. 2002. Econometric Analysis of Cross Section and Panel Data. Cambridge, MA: MIT Press. + + +{marker s_acknow}{title:Acknowledgements} + +{p}We would like to thanks various colleagues who helped us along the way, including +David Drukker, +Frank Kleibergen, +Austin Nichols, +Brian Poi, +Vince Wiggins, +and, not least, the users of {cmd:ivreg29} +who have provided suggestions, +spotted bugs, +and helped test the package. +We are also grateful to Jim Stock and Moto Yogo for permission to reproduce +their critical values for the Cragg-Donald statistic. + +{marker s_citation}{title:Citation of ivreg29} + +{p}{cmd:ivreg29} is not an official Stata command. It is a free contribution +to the research community, like a paper. Please cite it as such: {p_end} + +{phang}Baum, C.F., Schaffer, M.E., Stillman, S. 2010. +ivreg29: Stata module for extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression. +{browse "http://ideas.repec.org/c/boc/bocode/s425401.html":http://ideas.repec.org/c/boc/bocode/s425401.html}{p_end} + +{title:Authors} + + Christopher F Baum, Boston College, USA + baum@bc.edu + + Mark E Schaffer, Heriot-Watt University, UK + m.e.schaffer@hw.ac.uk + + Steven Stillman, Motu Economic and Public Policy Research + stillman@motu.org.nz + + +{title:Also see} + +{p 1 14}Articles:{it:Stata Journal}, volume 3, number 1: {browse "http://ideas.repec.org/a/tsj/stataj/v3y2003i1p1-31.html":st0030}{p_end} +{p 10 14}{it:Stata Journal}, volume 7, number 4: {browse "http://ideas.repec.org/a/tsj/stataj/v7y2007i4p465-506.html":st0030_3}{p_end} + +{p 1 14}Manual: {hi:[U] 23 Estimation and post-estimation commands}{p_end} +{p 10 14}{hi:[U] 29 Overview of model estimation in Stata}{p_end} +{p 10 14}{hi:[R] ivreg}{p_end} + +{p 1 10}On-line: help for {help ivregress}, {help ivreg}, {help newey}; +{help overid}, {help ivendog}, {help ivhettest}, {help ivreset}, +{help xtivreg2}, {help xtoverid}, {help ranktest}, +{help condivreg} (if installed); +{help rivtest} (if installed); +{help est}, {help postest}; +{help regress}{p_end} diff --git a/110/replication_package/replication/ado/plus/i/ivreg29_cue.ado b/110/replication_package/replication/ado/plus/i/ivreg29_cue.ado new file mode 100644 index 0000000000000000000000000000000000000000..e982e35a86e685ba031d45090cc66eb16c30464b --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg29_cue.ado @@ -0,0 +1,26 @@ +! 1.0.6 13Nov2009 +* 1.0.1 cfb updated to v8.2 +* 1.0.2 mes fixed col and row names mismatch +* 1.0.3 added noid option to supress unnecessary identification stats +* 1.0.4 added local `ivreg2_cmd'. ref only to e(j); e(sargan) no longer needed. +* 1.0.5 added nocollin option to supress unnecessary checks for collinearity +* 1.0.6 slight rewrite of ivreg2_cue to ivreg29_cue + +program define ivreg29_cue + version 8.2 + args todo b lnf + local ivreg2_cmd "ivreg29" + tempname b1 J +* Need to make col and rownames match + mat `b1'=`b' +* Remove equation number from col names + local vn : colfullnames `b1' + local vn : subinstr local vn "eq1" "", all + mat colnames `b1' = `vn' +* Standard row name + mat rownames `b1' = y1 + qui `ivreg2_cmd' $IV_lhs $IV_inexog ($IV_endog=$IV_exexog) $IV_wt if $ML_samp==1, b0(`b1') $IV_opt noid nocollin + scalar `J'=e(j) + scalar `lnf' = -`J' +end + diff --git a/110/replication_package/replication/ado/plus/i/ivreg29_p.ado b/110/replication_package/replication/ado/plus/i/ivreg29_p.ado new file mode 100644 index 0000000000000000000000000000000000000000..2d7868661ba37237df759c116444799fef320fe6 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg29_p.ado @@ -0,0 +1,100 @@ +*! ivreg29_p 1.0.8 30Jan2011 +*! author mes +* 1.0.1: 25apr2002 original version +* 1.0.2: 28jun2005 version 8.2 +* 1.0.3: 1Aug2006 complete rewrite plus fwl option +* 1.0.4: 26Jan2007 eliminated double reporting of #MVs +* 1.0.5: 2Feb2007 small fix to allow fwl of just _cons +* 1.0.6: 19Aug2007 replacement of "fwl" with "partial" in conjuction with new ivreg2 syntax +* 1.0.7: 13Nov2009 slight rewrite of ivreg2_p to ivreg29_p +* 1.0.8: 30Jan2011 re-introduced stdp option (hadn't been supported after fwl/partial) +* and added labelling of created residual variable + +program define ivreg29_p + version 8.2 + syntax newvarname [if] [in] , [XB Residuals stdp] + marksample touse, novarlist + + local type "`xb'`residuals'`stdp'" + + if "`type'"=="" { + local type "xb" +di in gr "(option xb assumed; fitted values)" + } + +* e(partialcons) now always exists and is 1 or 0 + if e(partial_ct) { +* partial partial-out block + if "`type'" == "residuals" { + + tempvar esample + tempname ivres + gen byte `esample' = e(sample) + +* Need to strip out time series operators + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + + local rhs : colnames(e(b)) + tsrevar `rhs', substitute + local rhs_t "`r(varlist)'" + + if "`e(partial1)'" != "" { + local partial "`e(partial1)'" + } + else { + local partial "`e(partial)'" + } + tsrevar `partial', substitute + local partial_t "`r(varlist)'" + + if ~e(partialcons) { + local noconstant "noconstant" + } + + local allvars "`lhs_t' `rhs_t'" +* Partial-out block. Uses estimatation sample to get coeffs, markout sample for predict + _estimates hold `ivres', restore + foreach var of local allvars { + tempname `var'_partial + qui regress `var' `partial' if `esample', `noconstant' + qui predict double ``var'_partial' if `touse', resid + local allvars_partial "`allvars_partial' ``var'_partial'" + } + _estimates unhold `ivres' + + tokenize `allvars_partial' + local lhs_partial "`1'" + mac shift + local rhs_partial "`*'" + + tempname b + mat `b'=e(b) + mat colnames `b' = `rhs_partial' +* Use forcezero? + tempvar xb + mat score double `xb' = `b' if `touse' + gen `typlist' `varlist' = `lhs_partial' - `xb' + label var `varlist' "Residuals" + } + else { +di in red "Option `type' not supported with -partial- option" + error 198 + } + } + else if "`type'" == "residuals" { + tempname lhs lhs_t xb + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + qui _predict `typlist' `xb' if `touse' + gen `typlist' `varlist'=`lhs_t'-`xb' + label var `varlist' "Residuals" + } +* Must be either xb or stdp + else { + _predict `typlist' `varlist' if `touse', `type' + } + +end diff --git a/110/replication_package/replication/ado/plus/i/ivreg2_p.ado b/110/replication_package/replication/ado/plus/i/ivreg2_p.ado new file mode 100644 index 0000000000000000000000000000000000000000..cf7fec904a48556ff3837e6826cfe10b135cc593 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreg2_p.ado @@ -0,0 +1,142 @@ +*! ivreg2_p 1.0.9 25Jan2015 +*! author mes +* 1.0.1: 25apr2002 original version +* 1.0.2: 28jun2005 version 8.2 +* 1.0.3: 1Aug2006 complete rewrite plus fwl option +* 1.0.4: 26Jan2007 eliminated double reporting of #MVs +* 1.0.5: 2Feb2007 small fix to allow fwl of just _cons +* 1.0.6: 19Aug2007 replacement of "fwl" with "partial" in conjuction with new ivreg2 syntax +* 1.0.7: 4Feb2010 version check update +* 1.0.8: 30Jan2011 re-introduced stdp option (hadn't been supported after fwl/partial) +* and added labelling of created residual variable +* 1.0.9: 25Jan2015 Rewrite to accommodate new ivreg2 with legacy support; +* passes control to matching ivreg2x_p predict program. + +program define ivreg2_p + +* Minimum of version 8 required (earliest ivreg2 is ivreg28) + version 8 + +* If estimation used current ivreg2, pass control to ivreg211 subroutine below. +* If estimation used legacy ivreg2x, pass control to earlier version. + + if "`e(ivreg2cmd)'"=="ivreg2" { + ivreg211_p `0' + } + else if "`e(ivreg2cmd)'"=="ivreg210" { + ivreg210_p `0' + } + else if "`e(ivreg2cmd)'"=="ivreg29" { + ivreg29_p `0' + } + else if "`e(ivreg2cmd)'"=="ivreg28" { + ivreg28_p `0' + } + else { +di as err "Error - ivreg2 estimation missing e(ivreg2cmd) macro" + exit 601 + } + +end + + +* Main/current predict program +program define ivreg211_p + version 8.2 + syntax newvarname [if] [in] , [XB Residuals stdp] + marksample touse, novarlist + +* Check ivreg2 version is compatible. +* fwl becomes partial starting in ivreg2 02.2.07 + local vernum "`e(version)'" + if ("`vernum'" < "03.0.00") | ("`vernum'" > "09.9.99") { +di as err "Error: incompatible versions of ivreg2 and ivreg2_p." +di as err "Currently installed version of ivreg2 is `vernum'" +di as err "To update, from within Stata type " _c +di in smcl "{stata ssc install ivreg2, replace :ssc install ivreg2, replace}" + exit 601 + } + + local type "`xb'`residuals'`stdp'" + + if "`type'"=="" { + local type "xb" +di in gr "(option xb assumed; fitted values)" + } + +* e(partialcons) now always exists and is 1 or 0 + if e(partial_ct) { +* partial partial-out block + if "`type'" == "residuals" { + + tempvar esample + tempname ivres + gen byte `esample' = e(sample) + +* Need to strip out time series operators + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + + local rhs : colnames(e(b)) + tsrevar `rhs', substitute + local rhs_t "`r(varlist)'" + + if "`e(partial1)'" != "" { + local partial "`e(partial1)'" + } + else { + local partial "`e(partial)'" + } + tsrevar `partial', substitute + local partial_t "`r(varlist)'" + + if ~e(partialcons) { + local noconstant "noconstant" + } + + local allvars "`lhs_t' `rhs_t'" +* Partial-out block. Uses estimatation sample to get coeffs, markout sample for predict + _estimates hold `ivres', restore + foreach var of local allvars { + tempname `var'_partial + qui regress `var' `partial' if `esample', `noconstant' + qui predict double ``var'_partial' if `touse', resid + local allvars_partial "`allvars_partial' ``var'_partial'" + } + _estimates unhold `ivres' + + tokenize `allvars_partial' + local lhs_partial "`1'" + mac shift + local rhs_partial "`*'" + + tempname b + mat `b'=e(b) + mat colnames `b' = `rhs_partial' +* Use forcezero? + tempvar xb + mat score double `xb' = `b' if `touse' + gen `typlist' `varlist' = `lhs_partial' - `xb' + label var `varlist' "Residuals" + } + else { +di in red "Option `type' not supported with -partial- option" + error 198 + } + } + else if "`type'" == "residuals" { + tempname lhs lhs_t xb + local lhs "`e(depvar)'" + tsrevar `lhs', substitute + local lhs_t "`r(varlist)'" + qui _predict `typlist' `xb' if `touse' + gen `typlist' `varlist'=`lhs_t'-`xb' + label var `varlist' "Residuals" + } +* Must be either xb or stdp + else { + _predict `typlist' `varlist' if `touse', `type' + } + +end diff --git a/110/replication_package/replication/ado/plus/i/ivreghdfe.ado b/110/replication_package/replication/ado/plus/i/ivreghdfe.ado new file mode 100644 index 0000000000000000000000000000000000000000..5de4d8c28a052836bd459370ef70315971c58d11 --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreghdfe.ado @@ -0,0 +1,6940 @@ +*! ivreghdfe 1.1.3 04Jan2023 (bugfix for github issue #48) +*! ivreghdfe 1.1.2 29Sep2022 (bugfix for github issue #44) +*! ivreghdfe 1.1.1 14Dec2021 (experimental -margins- support) +*! ivreghdfe 1.1.0 25Feb2021 +*! ivreg2 4.1.11 22Nov2019 +*! authors cfb & mes +*! see end of file for version comments + +* Variable naming: +* lhs = LHS endogenous +* endo = X1, RHS endogenous (instrumented) = #K1 +* inexog = X2 = Z2 = included exogenous (instruments) = #K2 = #L2 +* exexog = Z1 = excluded exogenous (instruments) = #L1 +* iv = {inexog exexog} = all instruments +* rhs = {endo inexog} = RHS regressors +* no 0 or 1 at end of varlist means original varlist but after expansion of FV and TS vars +* 0 at the end of the name means the varlist after duplicates removed and collinearities/omitteds marked +* 1 means the same as 0 but after omitted vars dropped and extraneous FV operators "o", "b" and "n" removed. +* 0, 1 etc. also apply to _ct variables that are counts of these varlists +* dofminus is large-sample adjustment (e.g., #fixed effects) +* sdofminus is small-sample adjustment (e.g., #partialled-out regressors) + +if c(version) < 12 & c(version) >= 9 { +* livreg2 Mata library. +* Ensure Mata library is indexed if new install. +* Not needed for Stata 12+ since ssc.ado does this when installing. + capture mata: mata drop m_calckw() + capture mata: mata drop m_omega() + capture mata: mata drop ms_vcvorthog() + capture mata: mata drop s_vkernel() + capture mata: mata drop s_cdsy() + mata: mata mlib index +} + +********************************************************************************* +***************************** PARENT IVREG2 ************************************* +****************** FORKS TO EXTERNAL IVREG2S IF CALLER < 11 ********************* +********************************************************************************* + +* Parent program, forks to versions as appropriate after version call +* Requires byable(onecall) +program define ivreghdfe, eclass byable(onecall) /* properties(svyj) */ sortpreserve + local lversion 04.1.11 + + ms_get_version ftools, min_version("2.48.0") + ms_get_version reghdfe, min_version("6.0.2") + +* local to store Stata version of calling program + local caller = _caller() + +* reghdfe 6 requires stata 13 + version 13 + +* Replay = no arguments before comma + if replay() { +* Call to ivreg2 will either be for version, in which case there should be no other arguments, +* or a postestimation call, in which case control should pass to main program. + syntax [, VERsion * ] + if "`version'"~="" & "`options'"=="" { +* Call to ivreg2 is for version + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + else if "`version'"~="" & "`options'"~="" { +* Improper use of version option +di as err "invalid syntax - cannot combine version with other options" + exit 198 + } + else { +* Postestimation call, so put `options' macro (i.e. *) back into `0' macro with preceding comma + local 0 `", `options'"' + } + } + +* replay can't be combined with by + if replay() & _by() { +di as err "invalid syntax - cannot use by with replay" + exit 601 + } + +* Handling of by. ivreg2x programs are byable(recall), so must set prefix for them. + if _by() { + local BY `"by `_byvars'`_byrc0':"' + } + +* If calling version is < 11, pass control to earlier version +* Note that this means calls from version 11.0 will not go to legacy version +* but will fail requirement of version 11.2 in main code. + if `caller' < 11 { + local ver = round(`caller') + local ivreg2cmd ivreg2`ver' +* If replay, change e(cmd) macro to name of legacy ivreg2 before calling it, then change back +* Note by not allowed with replay; caught above so prefix not needed here. + if replay() { + ereturn local cmd "`ivreg2cmd'" + `ivreg2cmd' `0' + ereturn local cmd "ivreg2" + + } + else { +* If not replay, call legacy ivreg2 and then add macros + `BY' `ivreg2cmd' `0' + ereturn local cmd "ivreg2" + ereturn local ivreg2cmd "`ivreg2cmd'" + ereturn local version `lversion' + ereturn local predict ivreg2_p + } + exit + } + +// Version is 11 or above. +// Pass control to current estimation program ivreg211. + if replay() { + ivreg211 `0' + } +// If not replay, call ivreg211 and then add macros + else { + // use to separate main args from options + syntax [anything] [if] [in] [aw fw pw iw] [, * ] + // append caller(.) to options + `BY' ivreg211 `anything' `if' `in' [`weight' `exp'], `options' caller(`caller') +// `BY' ivreg211 `0' + ereturn local cmd "ivreghdfe" + ereturn local ivreg2cmd "ivreghdfe" + ereturn local version `lversion' + ereturn local predict reghdfe // ivreg2_p <- to enable -predict- and -margins- + ereturn local cmdline ivreghdfe `0' // `0' rather than `*' in case of any "s in string + if (e(N_hdfe)!= .) ereturn local predict reghdfe_p + cap mata: mata drop HDFE // prefix ivreg211 call with capture? + cap mata: mata drop hdfe_residuals + } + +end +********************************************************************************* +*************************** END PARENT IVREG2 *********************************** +********************************************************************************* + + +********************* EXIT IF STATA VERSION < 11 ******************************** + +* When do file is loaded, exit here if Stata version calling program is < 11. +* Prevents loading of rest of program file (could cause earlier Statas to crash). + +if c(stata_version) < 11 { + exit +} + +******************** END EXIT IF STATA VERSION < 11 ***************************** + + +********************************************************************************* +***************** BEGIN MAIN IVREG2 ESTIMATION CODE ***************************** +********************************************************************************* + +* Main estimation program +program define ivreg211, eclass byable(recall) sortpreserve + version 11.2 + + local ivreg2cmd "ivreg211" // actual command name + local ivreg2name "ivreg2" // name used in command line and for default naming of equations etc. + + if replay() { + syntax [, /// + FIRST FFIRST RF SFIRST /// + dropfirst droprf dropsfirst /// + Level(integer $S_level) /// + NOHEader NOFOoter /// + EForm(string) PLUS /// + NOOMITTED vsquish noemptycells /// + baselevels allbaselevels /// + VERsion /// + caller(real 0) /// + ] + if "`version'" != "" & "`first'`ffirst'`rf'`noheader'`nofooter'`dropfirst'`droprf'`eform'`plus'" != "" { + di as err "option version not allowed" + error 198 + } + if "`version'" != "" { + di in gr "`lversion'" + ereturn clear + ereturn local version `lversion' + exit + } + if `"`e(cmd)'"' != "ivreghdfe" { + error 301 + } +// Set display options + local dispopt eform(`eform') `noomitted' `vsquish' `noemptycells' `baselevels' `allbaselevels' + +// On replay, set flag so saved eqns aren't dropped + if "`e(firsteqs)'" != "" & "`dropfirst'" == "" { + local savefirst "savefirst" + } + if "`e(rfeq)'" != "" & "`droprf'" == "" { + local saverf "saverf" + } + if "`e(sfirsteq)'" != "" & "`dropsfirst'" == "" { + local savesfirst "savesfirst" + } +// On replay, re-display collinearities and duplicates messages + DispCollinDups + } + else { +// MAIN CODE BLOCK + +// Start parsing + syntax [anything(name=0)] [if] [in] [aw fw pw iw/] [, /// + NOID NOCOLLIN /// + FIRST FFIRST SAVEFIRST SAVEFPrefix(name) /// + RF SAVERF SAVERFPrefix(name) /// + SFIRST SAVESFIRST SAVESFPrefix(name) /// + SMall NOConstant /// + Robust CLuster(varlist) kiefer dkraay(integer 0) /// + VCE(string) /// + BW(string) kernel(string) center /// + GMM GMM2s CUE /// + LIML COVIV FULLER(real 0) Kclass(real 0) /// + ORTHOG(string) ENDOGtest(string) REDundant(string) /// + PARTIAL(string) FWL(string) /// + Absorb(string) /// + RESiduals(name) RESiduals2 /* if no name, residuals saved as _reghdfe_resid */ /// + Level(integer $S_level) /// + NOHEader NOFOoter NOOUTput /// + bvclean NOOMITTED omitted vsquish noemptycells /// + baselevels allbaselevels /// + title(string) subtitle(string) /// + DEPname(string) EForm(string) PLUS /// + Tvar(varname) Ivar(varname) /// + B0(string) SMATRIX(string) WMATRIX(string) /// + sw psd0 psda useqr /// + dofminus(integer 0) sdofminus(integer 0) /// + NOPARTIALSMALL /// + fvall fvsep /// + caller(real 0) /// + * ] + + * Allow cluster(vars) as a shortcut for vce(cluster vars) + if ("`vce'"!="") { + _assert ("`cluster'"==""), msg("only one of cluster() and vce() can be specified") rc(198) + _assert ("`robust'"==""), msg("only one of robust() and vce() can be specified") rc(198) + gettoken vce_type vce_rest : vce + _assert inlist("`vce_type'", "cluster", "robust", "unadjusted"), msg("vce() only supports cluster, robust, and unadjusted") rc(198) + + if ("`vce_type'" == "cluster") { + loc cluster "`vce_rest'" + _assert ("`vce_rest'"!=""), msg("vce(cluster ...) requires variables") rc(198) + } + if ("`vce_type'" == "robust") { + _assert ("`vce_rest'"==""), msg("vce(robust) does not support varnames or other options") rc(198) + loc robust "robust" + } + loc vce // clear out local just to be sure (as it's used later in the code for other purposes) + } + + if (`"`absorb'"' != "") { + // absorb implies... + loc small small + loc noconstant noconstant + loc nopartialsmall + loc reghdfe_options `"absorb(`absorb') `options' nopartialout varlist_is_touse"' // (nopartialout implies keepmata) + + if ("`residuals2'" != "") { + cap drop _reghdfe_resid // destructive! + loc residuals _reghdfe_resid + } + else if ("`residuals'"!="") { + conf new var `residuals' + } + } + +// Confirm ranktest is installed (necessary component). + checkversion_ranktest `caller' + local ranktestcmd `r(ranktestcmd)' + +// Parse after clearing any sreturn macros (can be left behind in Stata 11) + sreturn clear + ivparse `0', ivreg2name(`ivreg2name') /// needed for some options + partial(`partial') /// + fwl(`fwl') /// legacy option + orthog(`orthog') /// + endogtest(`endogtest') /// + redundant(`redundant') /// + depname(`depname') /// + `robust' /// + cluster(`cluster') /// + bw(`bw') /// + kernel(`kernel') /// + dkraay(`dkraay') /// + `center' /// + `kiefer' /// + `sw' /// + `noconstant' /// + tvar(`tvar') /// + ivar(`ivar') /// + `gmm2s' /// + `gmm' /// legacy option, produces error message + `cue' /// + `liml' /// + fuller(`fuller') /// + kclass(`kclass') /// + b0(`b0') /// + wmatrix(`wmatrix') /// + `noid' /// + `savefirst' /// + savefprefix(`savefprefix') /// + `saverf' /// + saverfprefix(`saverfprefix') /// + `savesfirst' /// + savesfprefix(`savesfprefix') /// + dofminus(`dofminus') /// + `psd0' /// + `psda' /// + `nocollin' /// + `useqr' /// + `bvclean' /// + eform(`eform') /// + `noomitted' /// + `vsquish' /// + `noemptycells' /// + `baselevels' /// + `allbaselevels' + +// varlists are unexpanded; may be empty + local lhs `s(lhs)' + local depname `s(depname)' + local endo `s(endo)' + local inexog `s(inexog)' + local exexog `s(exexog)' + local partial `s(partial)' + local cons =s(cons) + local partialcons =s(partialcons) + local tvar `s(tvar)' + local ivar `s(ivar)' + local tdelta `s(tdelta)' + local tsops =s(tsops) + local fvops =s(fvops) + local robust `s(robust)' + local cluster `s(cluster)' + local bw =`s(bw)' // arrives as string but return now as number + local bwopt `s(bwopt)' + local kernel `s(kernel)' // also used as flag for HAC estimation + local center =`s(center)' // arrives as string but now boolean + local kclassopt `s(kclassopt)' + local fulleropt `s(fulleropt)' + local liml `s(liml)' + local noid `s(noid)' // can also be triggered by b0(.) option + local useqr =`s(useqr)' // arrives as string but now boolean; nocollin=>useqr + local savefirst `s(savefirst)' + local savefprefix `s(savefprefix)' + local saverf `s(saverf)' + local saverfprefix `s(saverfprefix)' + local savesfirst `s(savesfirst)' + local savesfprefix `s(savesfprefix)' + local psd `s(psd)' // triggered by psd0 or psda + local dofmopt `s(dofmopt)' + local bvclean =`s(bvclean)' // arrives as string but return now as boolean + local dispopt `s(dispopt)' + +// Can now tsset; sortpreserve will restore sort after exit + if `tsops' | "`kernel'"~="" { + cap tsset // restores sort if tsset or xtset but sort disrupted + if _rc>0 { + tsset `ivar' `tvar' + } + } + +*********************************************************** + +// Weights +// fweight and aweight accepted as is +// iweight not allowed with robust or gmm and requires a trap below when used with summarize +// pweight is equivalent to aweight + robust +// Since we subsequently work with wvar, tsrevar of weight vars in weight `exp' not needed. + + tempvar wvar + if "`weight'" == "fweight" | "`weight'"=="aweight" { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + if "`weight'" == "fweight" & "`kernel'" !="" { + di in red "fweights not allowed (data are -tsset-)" + exit 101 + } + if "`weight'" == "fweight" & "`sw'" != "" { + di in red "fweights currently not supported with -sw- option" + exit 101 + } + if "`weight'" == "iweight" { + if "`robust'`cluster'`gmm2s'`kernel'" !="" { + di in red "iweights not allowed with robust or gmm" + exit 101 + } + else { + local wtexp `"[`weight'=`exp']"' + qui gen double `wvar'=`exp' + } + } + if "`weight'" == "pweight" { + local wtexp `"[aweight=`exp']"' + qui gen double `wvar'=`exp' + local robust "robust" + } + if "`weight'" == "" { +* If no weights, define neutral weight variable + qui gen byte `wvar'=1 + } + +******************************************************************************** +// markout sample +// include `tvar' to limit sample to where tvar is available, but only if TS operators used + marksample touse + if `tsops' { + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster' `tvar', strok + } + else { + markout `touse' `lhs' `inexog' `exexog' `endo' `cluster', strok + } + +* Create HDFE object and update touse +if (`"`absorb'"' != "") { + if ("`weight'" != "") loc reghdfe_weight "[`weight'=`wvar']" + if (`"`cluster'"' != "") loc reghdfe_options `"`reghdfe_options' vce(cluster `cluster')"' + reghdfe `touse' `reghdfe_weight', `reghdfe_options' // create HDFE object +} + +******************************************************************************** +// weight factor and sample size +// Every time a weight is used, must multiply by scalar wf ("weight factor") +// wf=1 for no weights, fw and iw, wf = scalar that normalizes sum to be N if aw or pw + + sum `wvar' if `touse' `wtexp', meanonly +// Weight statement + if "`weight'" ~= "" { +di in gr "(sum of wgt is " %14.4e `r(sum_w)' ")" + } + if "`weight'"=="" | "`weight'"=="fweight" | "`weight'"=="iweight" { +// Effective number of observations is sum of weight variable. +// If weight is "", weight var must be column of ones and N is number of rows + local wf=1 + local N=r(sum_w) + } + else if "`weight'"=="aweight" | "`weight'"=="pweight" { + local wf=r(N)/r(sum_w) + local N=r(N) + } + else { +// Should never reach here +di as err "ivreg2 error - misspecified weights" + exit 198 + } + if `N'==0 { +di as err "no observations" + exit 2000 + } + +*************************************************************** +// Time-series data +// tindex used by Mata code so that ts operators work correctly + + tempvar tindex + qui gen `tindex'=1 if `touse' + qui replace `tindex'=sum(`tindex') if `touse' + + if `tsops' | "`kernel'"~="" { +// Report gaps in data + tsreport if `touse', panel + if `r(N_gaps)' != 0 { +di as text "Warning: time variable " as res "`tvar'" as text " has " /// + as res "`r(N_gaps)'" as text " gap(s) in relevant range" + } +// Set local macro T and check that bw < (T-1) + sum `tvar' if `touse', meanonly + local T = r(max)-r(min) + 1 + local T1 = `T' - 1 + if (`bw' > (`T1'/`tdelta')) { +di as err "invalid bandwidth in option bw() - cannot exceed timespan of data" + exit 198 + } + } + +// kiefer VCV = kernel(tru) bw(T) and no robust with tsset data + if "`kiefer'" ~= "" { + local bw =`T' + } + +*********** Column of ones for constant set up here ************** + + if "`noconstant'"=="" { +// If macro not created, automatically omitted. + tempvar ones + qui gen byte `ones' = 1 if `touse' + } + +************* Varlists, FV varlists, duplicates ***************** +// Varlists come in 4 versions, e.g., for inexog: +// (a) inexog = full list of original expanded vnames; may have duplicates +// (b) inexog0 = as with inexog with duplicates removed but RETAINING base/omitted/etc. varnames +// (c) inexog1 = as with inexog0 but WITHOUT base/omitted/etc. +// (d) fv_inexog1 = corresponding list with temp vars minus base/omitted/etc., duplicates, collinearities etc. +// Varlists (c) and (d) are definitive, i.e., have the variables actually used in the estimation. + +// Create consistent expanded varlists. +// "Consistent" means base vars for FVs must be consistent +// hence default rhs=endo+inexog is expanded as one. +// fvall: overrides, endo+inexog+exexog expanded as one +// fvsep: overrides, endo, inexog and exexog expanded separately +// NB: expanding endo+inexog+exexog is dangerous because +// fvexpand can zap a list in case of overlap +// e.g. fvexpand mpg + i(1/4).rep78 + i5.rep78 +// => mpg 1b.rep78 2.rep78 3.rep78 4.rep78 5.rep78 +// but fvexpand mpg + i.rep78 + i5.rep78 +// => mpg 5.rep78 + + CheckDupsCollin, /// + lhs(`lhs') /// + endo(`endo') /// + inexog(`inexog') /// + exexog(`exexog') /// + partial(`partial') /// + orthog(`orthog') /// + endogtest(`endogtest') /// + redundant(`redundant') /// + touse(`touse') /// + wvar(`wvar') /// + wf(`wf') /// + `noconstant' /// + `nocollin' /// + `fvall' /// + `fvsep' + +// Replace basic varlists and create "0" versions of varlists + foreach vl in lhs endo inexog exexog partial orthog endogtest redundant { + local `vl' `s(`vl')' + local `vl'0 `s(`vl'0)' + } + local dups `s(dups)' + local collin `s(collin)' + local ecollin `s(ecollin)' + +// Create "1" and fv versions of varlists + foreach vl in lhs endo inexog exexog partial orthog endogtest redundant { + foreach var of local `vl'0 { // var-by-var so that fvrevar doesn't decide on base etc. + _ms_parse_parts `var' + if ~`r(omit)' { // create temp var only if not omitted + fvrevar `var' if `touse' + local `vl'1 ``vl'1' `var' + local fv_`vl'1 `fv_`vl'1' `r(varlist)' + } + } + local `vl'1 : list retokenize `vl'1 + local fv_`vl'1 : list retokenize fv_`vl'1 + } + +// Check that LHS expanded to a single variable + local wrongvars_ct : word count `lhs' + if `wrongvars_ct' > 1 { +di as err "multiple dependent variables specified: `lhs'" + error 198 + } + +// Check that option varlists are compatible with main varlists +// orthog() + local wrongvars : list orthog1 - inexog1 + local wrongvars : list wrongvars - exexog1 + local wrongvars_ct : word count `wrongvars' + if `wrongvars_ct' { +di as err "Error: `wrongvars' listed in orthog() but does not appear as exogenous." + error 198 + } +// endog() + local wrongvars : list endogtest1 - endo1 + local wrongvars_ct : word count `wrongvars' + if `wrongvars_ct' { +di as err "Error: `wrongvars' listed in endog() but does not appear as endogenous." + error 198 + } +// redundant() + local wrongvars : list redundant1 - exexog1 + local wrongvars_ct : word count `wrongvars' + if `wrongvars_ct' { +di as err "Error: `wrongvars' listed in redundant() but does not appear as exogenous." + error 198 + } + +// And create allnames macros + local allnames `lhs' `endo' `inexog' `exexog' + local allnames0 `lhs0' `endo0' `inexog0' `exexog0' + local allnames1 `lhs1' `endo1' `inexog1' `exexog1' + local fv_allnames1 `fv_lhs1' `fv_endo1' `fv_inexog1' `fv_exexog1' + + +// *************** Partial-out block ************** // + +// `partial' has all to be partialled out except for constant + if "`partial1'" != "" | `partialcons'==1 | "`absorb'" != "" { + preserve + +// Remove partial0 from inexog0. +// Remove partial1 from inexog1. + local inexog0 : list inexog0 - partial0 + local inexog1 : list inexog1 - partial1 + local fv_inexog1 : list fv_inexog1 - fv_partial1 + +// Check that cluster, weight, tvar or ivar variables won't be transformed +// Use allnames1 (expanded varlist) + if "`cluster'"~="" { + local pvarcheck : list cluster in allnames1 + if `pvarcheck' { +di in r "Error: cannot use cluster variable `cluster' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`tvar'"~="" { + local pvarcheck : list tvar in allnames1 + if `pvarcheck' { +di in r "Error: cannot use time variable `tvar' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`ivar'"~="" { + local pvarcheck : list ivar in allnames1 + if `pvarcheck' { +di in r "Error: cannot use panel variable `ivar' as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } + if "`wtexp'"~="" { + tokenize `exp', parse("*/()+-^&|~") + local wvartokens `*' + local nwvarnames : list allnames1 - wvartokens + local wvarnames : list allnames1 - nwvarnames + if "`wvarnames'"~="" { +di in r "Error: cannot use weight variables as dependent variable, regressor or IV" +di in r " in combination with -partial- option." + error 198 + } + } +// Partial out +// But first replace everything with doubles + recast double `fv_lhs1' `fv_endo1' `fv_inexog1' `fv_exexog1' `fv_partial1' + +if ("`absorb'" != "") { + loc hdfe_varlist `fv_lhs1' `fv_endo1' `fv_inexog1' `fv_exexog1' `fv_partial1' + mata: HDFE.partial_out(tokens("`hdfe_varlist'"), 0, 1) // don't save TSS; standardize inputs + mata: st_store(HDFE.sample, tokens("`hdfe_varlist'"), HDFE.solution.data :* HDFE.solution.stdevs) + mata: HDFE.solution.data = . // save memory + mata: st_local("absorb_ct", strofreal(HDFE.df_a)) + assert `absorb_ct'`' != . + if (`absorb_ct'==0) loc absorb_ct 1 // adjustment to match ivreg2 and old reghdfe (happens if absvar is nested in cluster) + loc partial_ct 0 + loc partialcons `absorb_ct' +} + +if ("`partial1'" != "" | `partialcons'==1) { + mata: s_partial ("`fv_lhs1'", /// + "`fv_endo1'", /// + "`fv_inexog1'", /// + "`fv_exexog1'", /// + "`fv_partial1'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + `cons') + local partial_ct : word count `partial1' +} + +// Constant is partialled out, unless nocons already specified in the first place + capture drop `ones' + local ones "" + if "`noconstant'" == "" { +// partial_ct used for small-sample adjustment to regression F-stat + local partial_ct = `partial_ct' + 1 + local noconstant "noconstant" + local cons 0 + } + + if ("`absorb'" != "") { + local partial_ct = `partial_ct' + `absorb_ct' + } + } + else { +// Set count of partial vars to zero if option not used + local partial_ct 0 + local partialcons 0 + } +// Add partial_ct to small dof adjustment sdofminus + if "`nopartialsmall'"=="" { + local sdofminus = `sdofminus'+`partial_ct' + } + +********************************************* + + local rhs0 `endo0' `inexog0' // needed for display of omitted/base/etc. + local rhs1 `endo1' `inexog1' + local insts1 `exexog1' `inexog1' + local fv_insts1 `fv_exexog1' `fv_inexog1' + local fv_rhs1 `fv_endo1' `fv_inexog1' + local rhs0_ct : word count `rhs0' // needed for display of omitted/base/etc. + local rhs1_ct : word count `fv_rhs1' + local iv1_ct : word count `fv_insts1' + local endo1_ct : word count `fv_endo1' + local exex1_ct : word count `fv_exexog1' + local endoexex1_c : word count `fv_endo1' `fv_exexog1' + local inexog1_ct : word count `fv_inexog1' + +// Counts modified to include constant if appropriate + local rhs1_ct = `rhs1_ct' + `cons' + local rhs0_ct = `rhs0_ct' + `cons' // needed for display of omitted/base/etc. + local iv1_ct = `iv1_ct' + `cons' + +// Column/row names for matrices b, V, S, etc. + local cnb0 `endo0' `inexog0' // including omitted + local cnb1 `endo1' `inexog1' // excluding omitted + local cnZ0 `exexog0' `inexog0' // excluding omitted + local cnZ1 `exexog1' `inexog1' // excluding omitted + if `cons' { + local cnb0 "`cnb0' _cons" + local cnb1 "`cnb1' _cons" + local cnZ0 "`cnZ0' _cons" + local cnZ1 "`cnZ1' _cons" + } + +********************************************* +// Remaining checks: variable counts, col/row names of b0, smatrix, wmatrix + CheckMisc, /// + rhs1_ct(`rhs1_ct') /// + iv1_ct(`iv1_ct') /// + bvector(`b0') /// + smatrix(`smatrix') /// + wmatrix(`wmatrix') /// + cnb1(`cnb1') /// + cnZ1(`cnZ1') + + if "`b0'"~="" { + tempname b0 // so we can overwrite without changing original user matrix + mat `b0' = r(b0) + } + if "`smatrix'"~="" { + tempname S0 + mat `S0' = r(S0) + } + if "`wmatrix'"~="" { + tempname wmatrix // so we can overwrite without changing original user matrix + mat `wmatrix' = r(W0) + } + +*************** Commonly used matrices **************** + tempname YY yy yyc + tempname XX X1X1 X2X2 X1Z X1Z1 XZ Xy + tempname ZZ Z1Z1 Z2Z2 Z1Z2 Z1X2 Zy ZY Z2y Z2Y + tempname XXinv X2X2inv ZZinv XPZXinv + tempname rankxx rankzz condxx condzz + +// use fv_ varlists + mata: s_crossprods ("`fv_lhs1'", /// + "`fv_endo1'", /// + "`fv_inexog1' `ones'", /// + "`fv_exexog1'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N') + mat `XX' =r(XX) + mat `X1X1' =r(X1X1) + mat `X1Z' =r(X1Z) + mat `ZZ' =r(ZZ) + mat `Z2Z2' =r(Z2Z2) + mat `Z1Z2' =r(Z1Z2) + mat `XZ' =r(XZ) + mat `Xy' =r(Xy) + mat `Zy' =r(Zy) + mat `YY' =r(YY) + scalar `yy' =r(yy) + scalar `yyc' =r(yyc) + mat `ZY' =r(ZY) + mat `Z2y' =r(Z2y) + mat `Z2Y' =r(Z2Y) + mat `XXinv' =r(XXinv) + mat `ZZinv' =r(ZZinv) + mat `XPZXinv' =r(XPZXinv) + scalar `condxx' =r(condxx) + scalar `condzz' =r(condzz) + + scalar `rankzz' = rowsof(`ZZinv') - diag0cnt(`ZZinv') + scalar `rankxx' = rowsof(`XXinv') - diag0cnt(`XXinv') + local overid = `rankzz' - `rankxx' + +********** CLUSTER SETUP ********************************************** + +* Mata code requires data are sorted on (1) the first var cluster if there +* is only one cluster var; (2) on the 3rd and then 1st if two-way clustering, +* unless (3) two-way clustering is combined with kernel option, in which case +* the data are tsset and sorted on panel id (first cluster variable) and time +* id (second cluster variable). +* Second cluster var is optional and requires an identifier numbered 1..N_clust2, +* unless combined with kernel option, in which case it's the time variable. +* Third cluster var is the intersection of 1 and 2, unless combined with kernel +* opt, in which case it's unnecessary. +* Sorting on "cluster3 cluster1" means that in Mata, panelsetup works for +* both, since cluster1 nests cluster3. +* Note that it is possible to cluster on time but not panel, in which case +* cluster1 is time, cluster2 is empty and data are sorted on panel-time. +* Note also that if data are sorted here but happen to be tsset, will need +* to be re-tsset after estimation code concludes. + + +// No cluster options or only 1-way clustering +// but for Mata and other purposes, set N_clust vars =0 + local N_clust=0 + local N_clust1=0 + local N_clust2=0 + if "`cluster'"!="" { + local clopt "cluster(`cluster')" + tokenize `cluster' + local cluster1 "`1'" + local cluster2 "`2'" + if "`kernel'"~="" { +* kernel requires either that cluster1 is time var and cluster2 is empty +* or that cluster1 is panel var and cluster2 is time var. +* Either way, data must be tsset and sorted for panel data. + if "`cluster2'"~="" { +* Allow backwards order + if "`cluster1'"=="`tvar'" & "`cluster2'"=="`ivar'" { + local cluster1 "`2'" + local cluster2 "`1'" + } + if "`cluster1'"~="`ivar'" | "`cluster2'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset panel & time vars." +di as err " tsset panel var=`ivar'; tsset time var=`tvar'; cluster vars=`cluster1',`cluster2'" + exit 198 + } + } + else { + if "`cluster1'"~="`tvar'" { +di as err "Error: cluster kernel-robust requires clustering on tsset time variable." +di as err " tsset time var=`tvar'; cluster var=`cluster1'" + exit 198 + } + } + } +* Simple way to get quick count of 1st cluster variable without disrupting sort +* clusterid1 is numbered 1.._Nclust1. + tempvar clusterid1 + qui egen `clusterid1'=group(`cluster1') if `touse' + sum `clusterid1' if `touse', meanonly + if "`cluster2'"=="" { + local N_clust=r(max) + local N_clust1=`N_clust' + if "`kernel'"=="" { +* Single level of clustering and no kernel-robust, so sort on single cluster var. +* kernel-robust already sorted via tsset. + sort `cluster1' + } + } + else { + local N_clust1=r(max) + if "`kernel'"=="" { + tempvar clusterid2 clusterid3 +* New cluster id vars are numbered 1..N_clust2 and 1..N_clust3 + qui egen `clusterid2'=group(`cluster2') if `touse' + qui egen `clusterid3'=group(`cluster1' `cluster2') if `touse' +* Two levels of clustering and no kernel-robust, so sort on cluster3/nested in/cluster1 +* kernel-robust already sorted via tsset. + sort `clusterid3' `cluster1' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) + } + else { +* Need to create this only to count the number of clusters + tempvar clusterid2 + qui egen `clusterid2'=group(`cluster2') if `touse' + sum `clusterid2' if `touse', meanonly + local N_clust2=r(max) +* Now replace with original variable + local clusterid2 `cluster2' + } + + local N_clust=min(`N_clust1',`N_clust2') + + } // end 2-way cluster block + } // end cluster block + + +************************************************************************************************ + + tempname b W S V beta lambda j jp rss mss rmse sigmasq rankV rankS + tempname arubin arubinp arubin_lin arubin_linp + tempname r2 r2_a r2u r2c F Fp Fdf2 ivest + + tempvar resid + qui gen double `resid'=. + +******************************************************************************************* +* LIML +******************************************************************************************* + + if "`liml'`kclassopt'"~="" { + + mata: s_liml( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`Z2Z2'", /// + "`YY'", /// + "`ZY'", /// + "`Z2Y'", /// + "`Xy'", /// + "`ZZinv'", /// + "`fv_lhs1'", /// + "`fv_lhs1' `fv_endo1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_endo1'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`fv_exexog1'", /// + "`fv_inexog1' `ones'", /// + `fuller', /// + `kclass', /// + "`coviv'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + `center', /// + `dofminus', /// + `useqr') + + mat `b'=r(beta) + mat `S'=r(S) + mat `V'=r(V) + scalar `lambda'=r(lambda) + local kclass=r(kclass) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + + scalar `arubin'=(`N'-`dofminus')*ln(`lambda') + scalar `arubin_lin'=(`N'-`dofminus')*(`lambda'-1) + +// collinearities can cause LIML to generate (spurious) OLS results + if "`nocollin'"~="" & `kclass'<1e-8 { +di as err "warning: k=1 in LIML estimation; results equivalent to OLS;" +di as err " may be caused by collinearities" + } + } + +******************************************************************************************* +* OLS, IV and 2SGMM. Also enter to get CUE starting values. +************************************************************************************************ + + if "`liml'`kclassopt'`b0'"=="" { + +* Call to s_gmm1s to do 1st-step GMM. +* If W or S supplied, calculates GMM beta and residuals +* If none of the above supplied, calculates GMM beta using default IV weighting matrix and residuals +* Block not entered if b0 is provided. + +* 1-step GMM is efficient and V/J/Sargan can be returned if: +* - estimator is IV, W is known and S can be calculated from 1st-step residuals +* - S is provided (and W is NOT) so W=inv(S) and beta can be calculated using W +* 1-step GMM is inefficient if: +* - non-iid VCE is requested +* - W is provided + + local effic1s = ( /// + "`gmm2s'`robust'`cluster'`kernel'"=="" /// + | ("`smatrix'"~="" & "`wmatrix'"=="") /// + ) + +// use fv_ varlists + mata: s_gmm1s( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`ZZinv'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`wmatrix'", /// + "`S0'", /// + `dofminus', /// + `effic1s', /// + `overid', /// + `useqr') + mat `b'=r(beta) + mat `W'=r(W) + +* If 1st-step is efficient, save remaining results and we're done + if `effic1s' { + mat `V'=r(V) + mat `S'=r(S) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + } + else { +* ...we're not done - do inefficient or 2-step efficient GMM + +* Pick up matrix left by s_gmm1s(.) + tempname QXZ_W_QZX + mat `QXZ_W_QZX'=r(QXZ_W_QZX) + +* Block calls s_omega to get cov matrix of orthog conditions, if not supplied + if "`smatrix'"~="" { + mat `S'=`S0' + } + else { + +* NB: xtivreg2 calls ivreg2 with data sorted on ivar and optionally tvar. +* Stock-Watson adjustment -sw- assumes data are sorted on ivar. Checked at start of ivreg2. + +* call abw code if bw() is defined and bw(auto) selected + if `bw' != 0 { + if `bw' == -1 { + tempvar abwtouse + gen byte `abwtouse' = (`resid' < .) + abw `resid' `exexog1' `inexog1' `abwtouse', /* + */ tindex(`tindex') nobs(`N') tobs(`T') noconstant kernel(`kernel') + local bw `r(abw)' + local bwopt "bw(`bw')" + local bwchoice "`r(bwchoice)'" + } + } +* S covariance matrix of orthogonality conditions +// use fv_ varlists + mata: s_omega( "`ZZ'", /// + "`resid'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + `center', /// + `dofminus') + mat `S'=r(S) + } + +* By this point: `b' has 1st-step inefficient beta +* `resid' has resids from the above beta +* `S' has vcv of orthog conditions using either `resid' or user-supplied `S0' +* `QXZ_W_QZX' was calculated in s_gmm1s(.) for use in s_iegmm(.) + +* Inefficient IV. S, W and b were already calculated above. + if "`gmm2s'"=="" & "`robust'`cluster'`kernel'"~="" { + mata: s_iegmm( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`QXZ_W_QZX'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`W'", /// + "`S'", /// + "`b'", /// + `dofminus', /// + `overid', /// + `useqr') + } + +* 2-step efficient GMM. S calculated above, b and W will be updated. + if "`gmm2s'"~="" { + mata: s_egmm( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`Zy'", /// + "`ZZinv'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`S'", /// + `dofminus', /// + `overid', /// + `useqr') + mat `b'=r(beta) + mat `W'=r(W) + } + + mat `V'=r(V) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + } +* Finished with non-CUE/LIML block + } + +*************************************************************************************** +* Block for cue gmm +******************************************************************************************* + if "`cue'`b0'" != "" { + +* s_gmmcue is passed initial b from IV/2-step GMM block above +* OR user-supplied b0 for evaluation of CUE obj function at b0 + mata: s_gmmcue( "`ZZ'", /// + "`XZ'", /// + "`fv_lhs1'", /// + "`resid'", /// + "`fv_endo1' `fv_inexog1' `ones'", /// + "`fv_exexog1' `fv_inexog1' `ones'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + "`b'", /// + "`b0'", /// + `center', /// + `dofminus', /// + `useqr') + + mat `b'=r(beta) + mat `S'=r(S) + mat `W'=r(W) + mat `V'=r(V) + scalar `j'=r(j) + scalar `rss'=r(rss) + scalar `sigmasq'=r(sigmasq) + scalar `rankV'=r(rankV) + scalar `rankS'=r(rankS) + + } + +**************************************************************** +* Done with estimation blocks +**************************************************************** + + mat colnames `b' = `cnb1' + mat colnames `V' = `cnb1' + mat rownames `V' = `cnb1' + mat colnames `S' = `cnZ1' + mat rownames `S' = `cnZ1' +* No W matrix for LIML or kclass + capture mat colnames `W' = `cnZ1' + capture mat rownames `W' = `cnZ1' + +* Store residuals if requested +if (`"`absorb'"' != "") { + * Need to save resids if saving FEs, even if temporarily + mata: st_local("save_any_fe", strofreal(HDFE.save_any_fe)) + if ("`residuals'" == "" & `save_any_fe') { + loc residuals "__temp_reghdfe_resid__" + } + mata: HDFE.solution.resid = st_data(., "`resid'", "`touse'") +} + + +******************************************************************************************* +* RSS, counts, dofs, F-stat, small-sample corrections +******************************************************************************************* + +// rankxx = rhs1_ct except if nocollin +// rankzz = iv1_ct except if nocollin +// nocollin means count may exceed rank (because of dropped vars), so rank #s foolproof + scalar `rmse'=sqrt(`sigmasq') + if "`noconstant'"=="" { + scalar `mss'=`yyc' - `rss' + } + else { + scalar `mss'=`yy' - `rss' + } + + local Fdf1 = `rankxx' - `cons' + local df_m = `rankxx' - `cons' + (`sdofminus'-`partialcons') + +* Residual dof + if "`cluster'"=="" { +* Use int(`N') because of non-integer N with iweights, and also because of +* possible numeric imprecision with N returned by above. + local df_r = int(`N') - `rankxx' - `dofminus' - `sdofminus' + } + else { +* To match Stata, subtract 1 + local df_r = `N_clust' - 1 + } + +* Sargan-Hansen J dof and p-value +* df=0 doesn't guarantee j=0 since can be call to get value of CUE obj fn + local jdf = `rankzz' - `rankxx' + if `jdf' == 0 & "`b0'"=="" { + scalar `j' = 0 + } + else { + scalar `jp' = chiprob(`jdf',`j') + } + if "`liml'"~="" { + scalar `arubinp' = chiprob(`jdf',`arubin') + scalar `arubin_linp' = chiprob(`jdf',`arubin_lin') + } + +* Small sample corrections for var-cov matrix. +* If robust, the finite sample correction is N/(N-K), and with no small +* we change this to 1 (a la Davidson & MacKinnon 1993, p. 554, HC0). +* If cluster, the finite sample correction is (N-1)/(N-K)*M/(M-1), and with no small +* we change this to 1 (a la Wooldridge 2002, p. 193), where M=number of clusters. + + if "`small'" != "" { + if "`cluster'"=="" { + matrix `V'=`V'*(`N'-`dofminus')/(`N'-`rankxx'-`dofminus'-`sdofminus') + } + else { + matrix `V'=`V'*(`N'-1)/(`N'-`rankxx'-`sdofminus') /// + * `N_clust'/(`N_clust'-1) + } + scalar `sigmasq'=`rss'/(`N'-`rankxx'-`dofminus'-`sdofminus') + scalar `rmse'=sqrt(`sigmasq') + } + + scalar `r2u'=1-`rss'/`yy' + scalar `r2c'=1-`rss'/`yyc' + if "`noconstant'"=="" { + scalar `r2'=`r2c' + scalar `r2_a'=1-(1-`r2')*(`N'-1)/(`N'-`rankxx'-`dofminus'-`sdofminus') + } + else { + scalar `r2'=`r2u' + scalar `r2_a'=1-(1-`r2')*`N'/(`N'-`rankxx'-`dofminus'-`sdofminus') + } +* `N' is rounded down to nearest integer if iweights are used. +* If aw, pw or fw, should already be integer but use round in case of numerical imprecision. + local N=int(`N') + +* Fstat +* To get it to match Stata's, must post separately with dofs and then do F stat by hand +* in case weights generate non-integer obs and dofs +* Create copies so they can be posted + tempname FB FV + mat `FB'=`b' + mat `FV'=`V' + capture ereturn post `FB' `FV' +* If the cov matrix wasn't positive definite, the post fails with error code 506 + local rc = _rc + if `rc' != 506 { +* Strip out omitted/base/etc. vars from RHS list + ivreg2_fvstrip `rhs1', dropomit + capture test `r(varlist)' + if "`small'" == "" { + if "`cluster'"=="" { + capture scalar `F' = r(chi2)/`Fdf1' * `df_r'/(`N'-`dofminus') + } + else { +* sdofminus used here so that F-stat matches test stat from regression with no partial and small + capture scalar `F' = r(chi2)/`Fdf1' * /// + (`N_clust'-1)/`N_clust' * /// + (`N'-`rankxx'-`sdofminus')/(`N'-1) + } + } + else { + capture scalar `F' = r(chi2)/`Fdf1' + } + capture scalar `Fp'=Ftail(`Fdf1',`df_r',`F') + capture scalar `Fdf2'=`df_r' + } + +* If j==. or vcv wasn't full rank, then vcv problems and F is meaningless + if `j' == . | `rc'==506 { + scalar `F' = . + scalar `Fp' = . + } + +* End of counts, dofs, F-stat, small sample corrections + +******************************************************************************************** +* Reduced form and first stage regression options +******************************************************************************************* +* Relies on proper count of (non-collinear) IVs generated earlier. +* Note that nocons option + constant in instrument list means first-stage +* regressions are reported with nocons option. First-stage F-stat therefore +* correctly includes the constant as an explanatory variable. + + if "`sfirst'`savesfirst'`rf'`saverf'`first'`ffirst'`savefirst'" != "" & (`endo1_ct' > 0) { + +* Restore original order if changed for mata code above + capture tsset + + local sdofmopt = "sdofminus(`sdofminus')" +// Need to create Stata placeholders for Mata code so that Stata time-series operators can work on them +// fres1 is Nx1 +// endo1_hat is NxK1 +// fsresall is Nx(K1+1) (used for full system) + tempname fsres1 + qui gen double `fsres1'=. + local fsresall `fsres1' + foreach x of local fv_endo1 { + tempname fsres + qui gen double `fsres'=. + local fsresall "`fsresall' `fsres'" + } + +// mata code requires sorting on cluster 3 / cluster 1 (if 2-way) or cluster 1 (if one-way) + if "`cluster'"!="" { + sort `clusterid3' `cluster1' + } + mata: s_ffirst( "`ZZ'", /// + "`XX'", /// + "`XZ'", /// + "`ZY'", /// + "`ZZinv'", /// + "`XXinv'", /// + "`XPZXinv'", /// + "`Z2Z2'", /// + "`Z1Z2'", /// + "`Z2y'", /// + "`fsres1'", /// Nx1 + "`fsresall'", /// Nx(K1+1) + "`fv_lhs1'", /// + "`fv_endo1'", /// + "`fv_inexog1' `ones'", /// + "`fv_exexog1'", /// + "`touse'", /// + "`weight'", /// + "`wvar'", /// + `wf', /// + `N', /// + `N_clust', /// + "`robust'", /// + "`clusterid1'", /// + "`clusterid2'", /// + "`clusterid3'", /// + `bw', /// + "`kernel'", /// + "`sw'", /// + "`psd'", /// + "`ivar'", /// + "`tvar'", /// + "`tindex'", /// + `tdelta', /// + `center', /// + `dofminus', /// + `sdofminus') + + tempname firstmat firstb firstv firsts + mat `firstmat' = r(firstmat) + mat rowname `firstmat' = rmse sheapr2 pr2 F df df_r pvalue /// + SWF SWFdf1 SWFdf2 SWFp SWchi2 SWchi2p SWr2 /// + APF APFdf1 APFdf2 APFp APchi2 APchi2p APr2 + mat colname `firstmat' = `endo1' + mat `firstb' = r(b) + mat `firstv' = r(V) + mat `firsts' = r(S) + local archi2 =r(archi2) + local archi2p =r(archi2p) + local arf =r(arf) + local arfp =r(arfp) + local ardf =r(ardf) + local ardf_r =r(ardf_r) + local sstat =r(sstat) + local sstatdf =r(sstatdf) + local sstatp =r(sstatp) + local rmse_rf =r(rmse_rf) + +* Restore original order if changed for mata code above + capture tsset +// System of first-stage/reduced form eqns + if "`sfirst'`savesfirst'" ~= "" { + PostFirstRF if `touse', /// + bmat(`firstb') /// + vmat(`firstv') /// + smat(`firsts') /// + firstmat(`firstmat') /// + lhs1(`lhs1') /// + endo1(`endo1') /// + znames0(`cnZ0') /// + znames1(`cnZ1') /// + bvclean(`bvclean') /// + fvops(`fvops') /// + partial_ct(`partial_ct') /// + `robust' /// + cluster(`cluster') /// + cluster1(`cluster1') /// + cluster2(`cluster2') /// + nc(`N_clust') /// + nc1(`N_clust1') /// + nc2(`N_clust2') /// + kernel(`kernel') /// + bw(`bw') /// + ivar(`ivar') /// + tvar(`tvar') /// + obs(`N') /// + iv1_ct(`iv1_ct') /// + cons(`cons') /// + partialcons(`partialcons') /// + dofminus(`dofminus') /// + sdofminus(`sdofminus') + local sfirsteq "`savesfprefix'sfirst_`lhs1'" + local sfirsteq : subinstr local sfirsteq "." "_" + capture est store `sfirsteq', title("System of first-stage/reduced form regressions") + if _rc > 0 { +di +di in ye "Unable to store system of first-stage reduced form regressions." +di + } + } + +// RF regression + if "`rf'`saverf'" ~= "" { + PostFirstRF if `touse', /// + rf /// extract RF regression as saved result + rmse_rf(`rmse_rf') /// provide RMSE for posting + bmat(`firstb') /// + vmat(`firstv') /// + smat(`firsts') /// + firstmat(`firstmat') /// + lhs1(`lhs1') /// + endo1(`endo1') /// + znames0(`cnZ0') /// + znames1(`cnZ1') /// + bvclean(`bvclean') /// + fvops(`fvops') /// + partial_ct(`partial_ct') /// + `robust' /// + cluster(`cluster') /// + cluster1(`cluster1') /// + cluster2(`cluster2') /// + nc(`N_clust') /// + nc1(`N_clust1') /// + nc2(`N_clust2') /// + kernel(`kernel') /// + bw(`bw') /// + ivar(`ivar') /// + tvar(`tvar') /// + obs(`N') /// + iv1_ct(`iv1_ct') /// + cons(`cons') /// + partialcons(`partialcons') /// + dofminus(`dofminus') /// + sdofminus(`sdofminus') + local rfeq "`saverfprefix'`lhs1'" + local rfeq : subinstr local rfeq "." "_" + capture est store `rfeq', title("Reduced-form regression: `lhs'") + if _rc > 0 { +di +di in ye "Unable to store reduced form regression of `lhs1'." +di + } + } + +// Individual first-stage equations + if "`first'`savefirst'" ~= "" { + foreach vn in `endo1' { + + PostFirstRF if `touse', /// + first(`vn') /// extract first-stage regression + bmat(`firstb') /// + vmat(`firstv') /// + smat(`firsts') /// + firstmat(`firstmat') /// + lhs1(`lhs1') /// + endo1(`endo1') /// + znames0(`cnZ0') /// + znames1(`cnZ1') /// + bvclean(`bvclean') /// + fvops(`fvops') /// + partial_ct(`partial_ct') /// + `robust' /// + cluster(`cluster') /// + cluster1(`cluster1') /// + cluster2(`cluster2') /// + nc(`N_clust') /// + nc1(`N_clust1') /// + nc2(`N_clust2') /// + kernel(`kernel') /// + bw(`bw') /// + ivar(`ivar') /// + tvar(`tvar') /// + obs(`N') /// + iv1_ct(`iv1_ct') /// + cons(`cons') /// + partialcons(`partialcons') /// + dofminus(`dofminus') /// + sdofminus(`sdofminus') + local eqname "`savefprefix'`vn'" + local eqname : subinstr local eqname "." "_" + capture est store `eqname', title("First-stage regression: `vn'") + if _rc == 0 { + local firsteqs "`firsteqs' `eqname'" + } + else { +di +di in ye "Unable to store first-stage regression of `vn'." +di + } + } + } + } +* End of RF and first-stage regression code + +******************************************************************************************* +* Re-tsset if necessary +************************************************************************************************ + + capture tsset + +******************************************************************************************* +* orthog option: C statistic (difference of Sargan statistics) +******************************************************************************************* +* Requires j dof from above + if "`orthog'"!="" { + tempname cj cstat cstatp +* Initialize cstat + scalar `cstat' = 0 +* Remove orthog from inexog and put in endo +* Remove orthog from exexog + local cexexog1 : list fv_exexog1 - fv_orthog1 + local cinexog1 : list fv_inexog1 - fv_orthog1 + local cendo1 : list fv_inexog1 - cinexog1 + local cendo1 `fv_endo1' `cendo1' + local clist_ct : word count `orthog1' + +* If robust, HAC/AC or GMM (but not LIML or IV), create optimal weighting matrix to pass to ivreg2 +* by extracting the submatrix from the full S and then inverting. +* This guarantees the C stat will be non-negative. See Hayashi (2000), p. 220. +* Calculate C statistic with recursive call to ivreg2 +* Collinearities may cause problems, hence -capture-. +* smatrix works generally, including homoskedastic case with Sargan stat + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } +* clopt is omitted because it requires calculation of numbers of clusters, which is done +* only when S matrix is calculated +* S matrix has final varnames, but need to call ivreg2 with temp vars +* so must rename cols/rows of S + tempname fv_S + mat `fv_S'=`S' + if `cons' { + mat colnames `fv_S' = `fv_exexog1' `fv_inexog1' _cons + mat rownames `fv_S' = `fv_exexog1' `fv_inexog1' _cons + } + else { + mat colnames `fv_S' = `fv_exexog1' `fv_inexog1' + mat rownames `fv_S' = `fv_exexog1' `fv_inexog1' + } + capture `ivreg2cmd' `fv_lhs1' /// + `cinexog1' /// + (`cendo1'=`cexexog1') /// + if `touse' /// + `wtexp', /// + `noconstant' /// + `options' /// + `small' /// + `robust' /// + `gmm2s' /// + `bwopt' /// + `kernopt' /// + `dofmopt' /// + `sw' /// + `psd' /// + smatrix("`fv_S'") /// + noid /// + nocollin + local rc = _rc + if `rc' == 481 { + scalar `cstat' = 0 + local cstatdf = 0 + } + else { + scalar `cj'=e(j) + local cjdf=e(jdf) + scalar `cstat' = `j' - `cj' + local cstatdf = `jdf' - `cjdf' + } + _estimates unhold `ivest' + scalar `cstatp'= chiprob(`cstatdf',`cstat') +* Collinearities may cause C-stat dof to differ from the number of variables in orthog() +* If so, set cstat=0 + if `cstatdf' != `clist_ct' { + scalar `cstat' = 0 + } + } +* End of orthog block + +******************************************************************************************* +* Endog option +******************************************************************************************* +* Uses recursive call with orthog + if "`endogtest'"!="" { + tempname estat estatp +* Initialize estat + scalar `estat' = 0 +* Remove endogtest vars from endo and put in inexog + local eendo1 : list fv_endo1 - fv_endogtest1 + local einexog1 `fv_inexog1' `fv_endogtest1' + local elist_ct : word count `endogtest1' + +* Recursive call to ivreg2 using orthog option to obtain endogeneity test statistic +* Collinearities may cause problems, hence -capture-. + capture { + capture _estimates hold `ivest', restore + if _rc==1000 { +di as err "ivreg2 internal error - no room to save temporary estimation results" +di as err "Please drop one or more estimation results using -estimates drop-" + exit 1000 + } + capture `ivreg2cmd' `fv_lhs1' /// + `einexog1' /// + (`eendo1'=`fv_exexog1') /// + if `touse' /// + `wtexp', /// + `noconstant' /// + `robust' /// + `clopt' /// + `gmm2s' /// + `liml' /// + `bwopt' /// + `kernopt' /// + `small' /// + `dofmopt' /// + `sw' /// + `psd' /// + `options' /// + orthog(`fv_endogtest1') /// + noid /// + nocollin + local rc = _rc + if `rc' == 481 { + scalar `estat' = 0 + local estatdf = 0 + } + else { + scalar `estat'=e(cstat) + local estatdf=e(cstatdf) + scalar `estatp'=e(cstatp) + } + _estimates unhold `ivest' +* Collinearities may cause endog stat dof to differ from the number of variables in endog() +* If so, set estat=0 + if `estatdf' != `elist_ct' { + scalar `estat' = 0 + } + } +* End of endogeneity test block + } + +******************************************************************************************* +* Rank identification and redundancy block +******************************************************************************************* + if `endo1_ct' > 0 & "`noid'"=="" { + +// id=underidentification statistic, wid=weak identification statistic + tempname idrkstat widrkstat iddf idp + tempname ccf cdf rkf cceval cdeval cd + tempname idstat widstat + +// UNDERIDENTIFICATION +// Anderson canon corr underidentification statistic if homo, rk stat if not +// Need only id stat for testing full rank=(#cols-1) +// ranktest can exit with error if not full rank +// May not exit with error if e.g. ranktest (x y) (x w), +// i.e. collinearity across lists, so need to catch that. +// If no collinearity, can use iv1_ct and rhs1_ct etc. + cap `ranktestcmd' /// + (`fv_endo1') /// + (`fv_exexog1') /// + `wtexp' /// + if `touse', /// + partial(`fv_inexog1') /// + full /// + `noconstant' /// + `robust' /// + `clopt' /// + `bwopt' /// + `kernopt' +// Returned in e(.) macro: + local rkcmd `r(ranktestcmd)' + +// Canonical correlations returned in r(ccorr), sorted in descending order. +// If largest = 1, collinearities so enter error block. + local rkerror = _rc>0 | r(chi2)==. + if ~`rkerror' { + local rkerror = el(r(ccorr),1,1)==1 + } + if `rkerror' { +di as err "warning: -ranktest- error in calculating underidentification test statistics;" +di as err " may be caused by collinearities" + scalar `idstat' = . + local iddf = . + scalar `idp' = . + scalar `cd' = . + scalar `cdf' = . + } + else { + if "`cluster'"=="" { + scalar `idstat'=r(chi2)/r(N)*(`N'-`dofminus') + } + else { +// No dofminus adjustment needed for cluster-robust + scalar `idstat'=r(chi2) + } + mat `cceval'=r(ccorr) + mat `cdeval' = J(1,`endo1_ct',.) + forval i=1/`endo1_ct' { + mat `cceval'[1,`i'] = (`cceval'[1,`i'])^2 + mat `cdeval'[1,`i'] = `cceval'[1,`i'] / (1 - `cceval'[1,`i']) + } + local iddf = `iv1_ct' - (`rhs1_ct'-1) + scalar `idp' = chiprob(`iddf',`idstat') +// Cragg-Donald F statistic. +// Under homoskedasticity, Wald cd eigenvalue = cc/(1-cc) Anderson canon corr eigenvalue. + scalar `cd'=`cdeval'[1,`endo1_ct'] + scalar `cdf'=`cd'*(`N'-`sdofminus'-`iv1_ct'-`dofminus')/`exex1_ct' + } // end underidentification stat + +// WEAK IDENTIFICATION +// Weak id statistic is Cragg-Donald F stat, rk Wald F stat if not +// ranktest exits with error if not full rank so can use iv1_ct and rhs1_ct etc. + if "`robust'`cluster'`kernel'"=="" { + scalar `widstat'=`cdf' + } + else { +// Need only test of full rank + cap `ranktestcmd' /// + (`fv_endo1') /// + (`fv_exexog1') /// + `wtexp' /// + if `touse', /// + partial(`fv_inexog1') /// + full /// + wald /// + `noconstant' /// + `robust' /// + `clopt' /// + `bwopt' /// + `kernopt' +// Canonical correlations returned in r(ccorr), sorted in descending order. +// If largest = 1, collinearities so enter error block. + local rkerror = _rc>0 | r(chi2)==. + if ~`rkerror' { + local rkerror = el(r(ccorr),1,1)==1 + } + if `rkerror' { +di as err "warning: -ranktest- error in calculating weak identification test statistics;" +di as err " may be caused by collinearities" + scalar `rkf' = . + scalar `widstat' = . + } + else { +// sdofminus used here so that F-stat matches test stat from regression with no partial + if "`cluster'"=="" { + scalar `rkf'=r(chi2)/r(N)*(`N'-`iv1_ct'-`sdofminus'-`dofminus')/`exex1_ct' + } + else { + scalar `rkf' = r(chi2)/(`N'-1) * /// + (`N'-`iv1_ct'-`sdofminus') * /// + (`N_clust'-1)/`N_clust' / /// + `exex1_ct' + } + scalar `widstat'=`rkf' + } + } // end weak-identification stat + } // end under- and weak-identification stats + +* LM redundancy test + if `endo1_ct' > 0 & "`redundant'" ~= "" & "`noid'"=="" { +* Use K-P rk statistics and LM version of test +* Statistic is the rank of the matrix of Z_1B*X_2, where Z_1B are the possibly redundant +* instruments and X_1 are the endogenous regressors; both have X_2 (exogenous regressors) +* and Z_1A (maintained excluded instruments) partialled out. LM test of rank is +* is numerically equivalent to estimation of set of RF regressions and performing +* standard LM test of possibly redundant instruments. + + local rexexog1 : list fv_exexog1 - fv_redundant1 + local redlist_ct : word count `redundant1' +* LM version requires only -nullrank- rk statistics so would not need -all- option + tempname rkmatrix + qui `ranktestcmd' /// + (`fv_endo1') /// + (`fv_redundant1') /// + `wtexp' /// + if `touse', /// + partial(`fv_inexog1' `rexexog1') /// + null /// + `noconstant' /// + `robust' /// + `clopt' /// + `bwopt' /// + `kernopt' + mat `rkmatrix'=r(rkmatrix) + tempname redstat redp +* dof adjustment needed because it doesn't use the adjusted S + if "`cluster'"=="" { + scalar `redstat' = `rkmatrix'[1,1]/r(N)*(`N'-`dofminus') + } + else { +* No dofminus adjustment needed for cluster-robust + scalar `redstat' = `rkmatrix'[1,1] + } + local reddf = `endo1_ct'*`redlist_ct' + scalar `redp' = chiprob(`reddf',`redstat') + } + +* End of identification stats block + +******************************************************************************************* +* Error-checking block +******************************************************************************************* + +* Check if adequate number of observations + if `N' <= `iv1_ct' { +di in r "Error: number of observations must be greater than number of instruments" +di in r " including constant." + error 2001 + } + +* Check if robust VCV matrix is of full rank + if ("`gmm2s'`robust'`cluster'`kernel'" != "") & (`rankS' < `iv1_ct') { +* Robust covariance matrix not of full rank means either a singleton dummy or too few +* clusters (in which case the indiv SEs are OK but no F stat or 2-step GMM is possible), +* or there are too many AC/HAC-lags, or the HAC covariance estimator +* isn't positive definite (possible with truncated and Tukey-Hanning kernels) +* or nocollin option has been used. +* Previous versions of ivreg2 exited if 2-step GMM but beta and VCV may be OK. +* Continue but J, F, and C stat (if present) all possibly meaningless. +* Set j = missing so that problem can be reported in output. + scalar `j' = . + if "`orthog'"!="" { + scalar `cstat' = . + } + if "`endogtest'"!="" { + scalar `estat' = . + } + } + +* End of error-checking block + +********************************************************************************************** +* Post and display results. +******************************************************************************************* + +// rankV = rhs1_ct except if nocollin +// rankS = iv1_ct except if nocollin +// nocollin means count may exceed rank (because of dropped vars), so rank #s foolproof + +// Add back in omitted vars from "0" varlists unless bvclean requested +// or unless there are no omitted regressors that need adding back in. + if ~`bvclean' & (`rhs0_ct' > `rhs1_ct') { + AddOmitted, bmat(`b') vmat(`V') cnb0(`cnb0') cnb1(`cnb1') + mat `b' = r(b) + mat `V' = r(V) +// build fv info (base, empty, etc.) unless there was partialling out + if `fvops' & ~`partial_ct' { + local bfv "buildfvinfo" + } + } + +******************************************************************************************* + +// restore data if preserved for partial option + if (`partial_ct' | "`absorb'"!="") { + restore + } + + if "`small'"!="" { + local NminusK = `N'-`rankxx'-`sdofminus' + capture ereturn post `b' `V', dep(`depname') obs(`N') esample(`touse') dof(`NminusK') `bfv' + } + else { + capture ereturn post `b' `V', dep(`depname') obs(`N') esample(`touse') `bfv' + } + + local rc = _rc + if `rc' == 504 { +di in red "Error: estimated variance-covariance matrix has missing values" + exit 504 + } + if `rc' == 506 { +di in red "Error: estimated variance-covariance matrix not positive-definite" + exit 506 + } + if `rc' > 0 { +di in red "Error: estimation failed - could not post estimation results" + exit `rc' + } + + local mok =1 // default - margins OK + * ivreghdfe 1.1.1: Override to enable -margins-; dangerous + *local mok = `mok' & ~`partial_ct' // but not if partialling out + *local mok = `mok' & ~(`fvops' & `bvclean') // nor if there are FVs and the base vars are not in e(b) + if `mok' & `endo1_ct' { // margins can be used, endog regressors + ereturn local marginsnotok "Residuals SCores" // same as official -ivregress- + ereturn local marginsok "XB default" + } + else if `mok' & ~`endo1_ct' { // margins can be used, no endog regressors + ereturn local marginsok "XB default" // same as official -regress' + } + else { // don't allow margins + ereturn local marginsnotok "Residuals SCores XB default" + } + +// Original varlists without removed duplicates, collinears, etc. +// "0" varlists after removing duplicates and reclassifying vars, and including omitteds, FV base vars, etc. +// "1" varlists without omitted, FV base vars, and partialled-out vars + ereturn local ecollin `ecollin' + ereturn local collin `collin' + ereturn local dups `dups' + ereturn local partial1 `partial1' + ereturn local partial `partial' + ereturn local inexog1 `inexog1' + ereturn local inexog0 `inexog0' + ereturn local inexog `inexog' + ereturn local exexog1 `exexog1' + ereturn local exexog0 `exexog0' + ereturn local exexog `exexog' + ereturn local insts1 `exexog1' `inexog1' + ereturn local insts0 `exexog0' `inexog0' + ereturn local insts `exexog' `inexog' + ereturn local instd1 `endo1' + ereturn local instd0 `endo0' + ereturn local instd `endo' + ereturn local depvar1 `lhs1' + ereturn local depvar0 `lhs0' + ereturn local depvar `lhs' + + ereturn scalar inexog_ct =`inexog1_ct' + ereturn scalar exexog_ct =`exex1_ct' + ereturn scalar endog_ct =`endo1_ct' + ereturn scalar partial_ct =`partial_ct' + + if "`smatrix'" == "" { + ereturn matrix S `S' + } + else { + ereturn matrix S `S0' // it's a copy so original won't be zapped + } + +* No weighting matrix defined for LIML and kclass + if "`wmatrix'"=="" & "`liml'`kclassopt'"=="" { + ereturn matrix W `W' + } + else if "`liml'`kclassopt'"=="" { + ereturn matrix W `wmatrix' // it's a copy so original won't be zapped + } + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + if "`bwchoice'" ~= "" { + ereturn local bwchoice "`bwchoice'" + } + } + + if "`small'"!="" { + ereturn scalar df_r=`df_r' + ereturn local small "small" + } + if "`nopartialsmall'"=="" { + ereturn local partialsmall "small" + } + + + if "`robust'" != "" { + local vce "robust" + } + if "`cluster1'" != "" { + if "`cluster2'"=="" { + local vce "`vce' cluster" + } + else { + local vce "`vce' two-way cluster" + } + } + if "`kernel'" != "" { + if "`robust'" != "" { + local vce "`vce' hac" + } + else { + local vce "`vce' ac" + } + local vce "`vce' `kernel' bw=`bw'" + } + if "`sw'" != "" { + local vce "`vce' sw" + } + if "`psd'" != "" { + local vce "`vce' `psd'" + } + local vce : list clean vce + local vce = lower("`vce'") + ereturn local vce `vce' + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + if "`cluster2'"!="" { + ereturn scalar N_clust1=`N_clust1' + ereturn scalar N_clust2=`N_clust2' + ereturn local clustvar1 `cluster1' + ereturn local clustvar2 `cluster2' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + + ereturn scalar df_m=`df_m' + ereturn scalar sdofminus=`sdofminus' + ereturn scalar dofminus=`dofminus' + ereturn scalar center=`center' + ereturn scalar r2=`r2' + ereturn scalar rmse=`rmse' + ereturn scalar rss=`rss' + ereturn scalar mss=`mss' + ereturn scalar r2_a=`r2_a' + ereturn scalar F=`F' + ereturn scalar Fp=`Fp' + ereturn scalar Fdf1=`Fdf1' + ereturn scalar Fdf2=`Fdf2' + ereturn scalar yy=`yy' + ereturn scalar yyc=`yyc' + ereturn scalar r2u=`r2u' + ereturn scalar r2c=`r2c' + ereturn scalar condzz=`condzz' + ereturn scalar condxx=`condxx' + ereturn scalar rankzz=`rankzz' + ereturn scalar rankxx=`rankxx' + ereturn scalar rankS=`rankS' + ereturn scalar rankV=`rankV' + ereturn scalar ll = -0.5 * (`N'*ln(2*_pi) + `N'*ln(`rss'/`N') + `N') + +* Always save J. Also save as Sargan if homoskedastic; save A-R if LIML. + ereturn scalar j=`j' + ereturn scalar jdf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar jp=`jp' + } + if ("`robust'`cluster'"=="") { + ereturn scalar sargan=`j' + ereturn scalar sargandf=`jdf' + if `j' != 0 & `j' != . { + ereturn scalar sarganp=`jp' + } + } + if "`liml'"!="" { + ereturn scalar arubin=`arubin' + ereturn scalar arubin_lin=`arubin_lin' + if `j' != 0 & `j' != . { + ereturn scalar arubinp=`arubinp' + ereturn scalar arubin_linp=`arubin_linp' + } + ereturn scalar arubindf=`jdf' + } + + if "`orthog'"!="" { + ereturn scalar cstat=`cstat' + if `cstat'!=0 & `cstat' != . { + ereturn scalar cstatp=`cstatp' + ereturn scalar cstatdf=`cstatdf' + ereturn local clist `orthog1' + } + } + + if "`endogtest'"!="" { + ereturn scalar estat=`estat' + if `estat'!=0 & `estat' != . { + ereturn scalar estatp=`estatp' + ereturn scalar estatdf=`estatdf' + ereturn local elist `endogtest1' + } + } + + if `endo1_ct' > 0 & "`noid'"=="" { + ereturn scalar idstat=`idstat' + ereturn scalar iddf=`iddf' + ereturn scalar idp=`idp' + ereturn scalar cd=`cd' + ereturn scalar widstat=`widstat' + ereturn scalar cdf=`cdf' + capture ereturn matrix ccev=`cceval' + capture ereturn matrix cdev `cdeval' + capture ereturn scalar rkf=`rkf' + } + + if "`redundant'"!="" & "`noid'"=="" { + ereturn scalar redstat=`redstat' + ereturn scalar redp=`redp' + ereturn scalar reddf=`reddf' + ereturn local redlist `redundant1' + } + + if "`first'`ffirst'`savefirst'`sfirst'`savesfirst'" != "" & `endo1_ct'>0 { +// Capture here because firstmat may be empty if mvs encountered in 1st stage regressions + capture ereturn matrix first `firstmat' + ereturn scalar arf=`arf' + ereturn scalar arfp=`arfp' + ereturn scalar archi2=`archi2' + ereturn scalar archi2p=`archi2p' + ereturn scalar ardf=`ardf' + ereturn scalar ardf_r=`ardf_r' + ereturn scalar sstat=`sstat' + ereturn scalar sstatp=`sstatp' + ereturn scalar sstatdf=`sstatdf' + } +// not saved if empty + ereturn local firsteqs `firsteqs' + ereturn local rfeq `rfeq' + ereturn local sfirsteq `sfirsteq' + + if "`liml'"!="" { + ereturn local model "liml" + ereturn scalar kclass=`kclass' + ereturn scalar lambda=`lambda' + if `fuller' > 0 & `fuller' < . { + ereturn scalar fuller=`fuller' + } + } + else if "`kclassopt'" != "" { + ereturn local model "kclass" + ereturn scalar kclass=`kclass' + } + else if "`gmm2s'`cue'`b0'`wmatrix'"=="" { + if "`endo1'" == "" { + ereturn local model "ols" + } + else { + ereturn local model "iv" + } + } + else if "`cue'`b0'"~="" { + ereturn local model "cue" + } + else if "`gmm2s'"~="" { + ereturn local model "gmm2s" + } + else if "`wmatrix'"~="" { + ereturn local model "gmmw" + } + else { +* Should never enter here + ereturn local model "unknown" + } + + if "`weight'" != "" { + ereturn local wexp "=`exp'" + ereturn local wtype `weight' + } + ereturn local cmd `ivreg2cmd' + ereturn local ranktestcmd `rkcmd' + ereturn local version `lversion' + ereturn scalar nocollin =("`nocollin'"~="") + ereturn scalar partialcons =`partialcons' + ereturn scalar cons =`cons' + + ereturn local predict "`ivreg2cmd'_p" + + if "`e(model)'"=="gmm2s" & "`wmatrix'"=="" { + local title2 "2-Step GMM estimation" + } + else if "`e(model)'"=="gmm2s" & "`wmatrix'"~="" { + local title2 "2-Step GMM estimation with user-supplied first-step weighting matrix" + } + else if "`e(model)'"=="gmmw" { + local title2 "GMM estimation with user-supplied weighting matrix" + } + else if "`e(model)'"=="cue" & "`b0'"=="" { + local title2 "CUE estimation" + } + else if "`e(model)'"=="cue" & "`b0'"~="" { + local title2 "CUE evaluated at user-supplied parameter vector" + } + else if "`e(model)'"=="ols" { + local title2 "OLS estimation" + } + else if "`e(model)'"=="iv" { + local title2 "IV (2SLS) estimation" + } + else if "`e(model)'"=="liml" { + local title2 "LIML estimation" + } + else if "`e(model)'"=="kclass" { + local title2 "k-class estimation" + } + else { +* Should never reach here + local title2 "unknown estimation" + } + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" & "`e(clustvar)'"=="" { + local hacsubtitle3 "autocorrelation" + } + if "`kiefer'"!="" { + local hacsubtitle3 "within-cluster autocorrelation (Kiefer)" + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + else { + local hacsubtitle3 "clustering on `e(clustvar1)' and `e(clustvar2)'" + } + if "`e(kernel)'" != "" { + local hacsubtitle4 "and kernel-robust to common correlated disturbances (Driscoll-Kraay)" + } + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + if "`title'"=="" { + ereturn local title "`title1'`title2'" + } + else { + ereturn local title "`title'" + } + if "`subtitle'"~="" { + ereturn local subtitle "`subtitle'" + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`b0'"~="" { + ereturn local hacsubtitleB "Estimates based on supplied parameter vector" + } + else if "`hacsubtitle'"~="" & "`gmm2s'`cue'"~="" { + ereturn local hacsubtitleB "Estimates efficient for arbitrary `hacsubtitle'" + } + else if "`wmatrix'"~="" { + ereturn local hacsubtitleB "Efficiency of estimates dependent on weighting matrix" + } + else { + ereturn local hacsubtitleB "Estimates efficient for homoskedasticity only" + } + if "`hacsubtitle'"~="" { + ereturn local hacsubtitleV "Statistics robust to `hacsubtitle'" + } + else { + ereturn local hacsubtitleV "Statistics consistent for homoskedasticity only" + } + if "`hacsubtitle4'"~="" { + ereturn local hacsubtitleV2 "`hacsubtitle4'" + } + if "`sw'"~="" { + ereturn local hacsubtitleV "Stock-Watson heteroskedastic-robust statistics (BETA VERSION)" + } + + if ("`absorb'" != "") { + mata: HDFE.post_footnote() + assert e(N_hdfe) != . + + if ("`residuals'" != "") { + mata: HDFE.save_variable("`residuals'", HDFE.solution.resid, "Residuals") // do we need hdfe_residuals if we have HDFE.solution.resid ? + mata: st_global("e(resid)", "`residuals'") + reghdfe, store_alphas + } + } + } + +******************************************************************************************* +* Display results unless ivreg2 called just to generate stats or nooutput option + + if "`nooutput'" == "" { + +// Display supplementary first-stage/RF results + if "`savesfirst'`saverf'`savefirst'" != "" { + DispStored `"`savesfirst'"' `"`saverf'"' `"`savefirst'"' + } + if "`rf'" != "" { + local eqname "`e(rfeq)'" + tempname ivest + _estimates hold `ivest', copy + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to display stored reduced form estimation." +di + } + else { + DispSFirst "rf" `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + _estimates unhold `ivest' + } + if "`first'" != "" { + DispFirst `"`ivreg2name'"' + } + if "`sfirst'"!="" { + local eqname "`e(sfirsteq)'" + tempname ivest + _estimates hold `ivest', copy + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to display stored first-stage/reduced form estimations." +di + } + else { + DispSFirst "sfirst" `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + _estimates unhold `ivest' + } + if "`first'`ffirst'`sfirst'" != "" { + DispFFirst `"`ivreg2name'"' + } + +// Display main output. Can be standard ivreg2, or first-stage-type results + if "`e(model)'"=="first" | "`e(model)'"=="rf" | "`e(model)'"=="sfirst" { + DispSFirst "`e(model)'" `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + else { + DispMain `"`noheader'"' `"`plus'"' `"`level'"' `"`nofooter'"' `"`ivreg2name'"' "`dispopt'" + } + } + +// Drop first stage estimations unless explicitly saved or if replay + if "`savefirst'" == "" { + local firsteqs "`e(firsteqs)'" + foreach eqname of local firsteqs { + capture estimates drop `eqname' + } + ereturn local firsteqs + } +// Drop reduced form estimation unless explicitly saved or if replay + if "`saverf'" == "" { + local eqname "`e(rfeq)'" + capture estimates drop `eqname' + ereturn local rfeq + } +// Drop first stage/reduced form estimation unless explicitly saved or if replay + if "`savesfirst'" == "" { + local eqname "`e(sfirsteq)'" + capture estimates drop `eqname' + ereturn local sfirsteq + } + +end + +******************************************************************************************* +* SUBROUTINES +******************************************************************************************* + +// ************* Display system of or single first-stage and/or RF estimations ************ // + +program define DispSFirst, eclass + args model plus level nofooter helpfile dispopt + version 11.2 + +di + if "`model'"=="first" { +di in gr "First-stage regression of `e(depvar)':" + } + else if "`model'"=="rf" { + local strlen = length("`e(depvar)'")+25 +di in gr "Reduced-form regression: `e(depvar)'" +di in smcl in gr "{hline `strlen'}" + } + else if "`model'"=="sfirst" { +di in gr "System of first-stage/reduced-form regressions:" +di in smcl in gr "{hline 47}" + } + +// Display coefficients etc. +// Header info + if "`e(hacsubtitleV)'" ~= "" { +di in gr _n "`e(hacsubtitleV)'" + } + if "`e(hacsubtitleV2)'" ~= "" { +di in gr "`e(hacsubtitleV2)'" + } +di in gr "Number of obs = " _col(31) in ye %8.0f e(N) + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=" `e(bw)' + if "`e(bwchoice)'"!="" { +di in gr " `e(bwchoice)'" + } +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local N_clust `e(N_clust)' + local clustvar `e(clustvar)' + } + else { + local N_clust `e(N_clust1)' + local clustvar `e(clustvar1)' + } +di in gr "Number of clusters (`clustvar') = " _col(33) in ye %6.0f `N_clust' + } + if "`e(clustvar2)'"!="" { +di in gr "Number of clusters (" "`e(clustvar2)'" ") = " _col(33) in ye %6.0f e(N_clust2) + } + +// Unfortunate but necessary hack here: to suppress message about cluster adjustment of +// standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + ereturn local clustvar + +// Display output + ereturn display, `plus' level(`level') `dispopt' + ereturn local clustvar `cluster' + +end + +// ************* Display main estimation outpout ************** // + +program define DispMain, rclass + args noheader plus level nofooter helpfile dispopt + version 11.2 +* Prepare for problem resulting from rank(S) being insufficient +* Results from insuff number of clusters, too many lags in HAC, +* to calculate robust S matrix, HAC matrix not PD, singleton dummy, +* and indicated by missing value for j stat +* Macro `rprob' is either 1 (problem) or 0 (no problem) + capture local rprob ("`e(j)'"==".") + + if "`noheader'"=="" { + if "`e(title)'" ~= "" { +di in gr _n "`e(title)'" + local tlen=length("`e(title)'") +di in gr "{hline `tlen'}" + } + if "`e(subtitle)'" ~= "" { +di in gr "`e(subtitle)'" + } + if "`e(model)'"=="liml" | "`e(model)'"=="kclass" { +di in gr "k =" %7.5f `e(kclass)' + } + if "`e(model)'"=="liml" { +di in gr "lambda =" %7.5f `e(lambda)' + } + if e(fuller) > 0 & e(fuller) < . { +di in gr "Fuller parameter=" %-5.0f `e(fuller)' + } + if "`e(hacsubtitleB)'" ~= "" { +di in gr _n "`e(hacsubtitleB)'" _c + } + if "`e(hacsubtitleV)'" ~= "" { +di in gr _n "`e(hacsubtitleV)'" + } + if "`e(hacsubtitleV2)'" ~= "" { +di in gr "`e(hacsubtitleV2)'" + } + if "`e(kernel)'"!="" { +di in gr " kernel=`e(kernel)'; bandwidth=" `e(bw)' + if "`e(bwchoice)'"!="" { +di in gr " `e(bwchoice)'" + } +di in gr " time variable (t): " in ye e(tvar) + if "`e(ivar)'" != "" { +di in gr " group variable (i): " in ye e(ivar) + } + } + di + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local N_clust `e(N_clust)' + local clustvar `e(clustvar)' + } + else { + local N_clust `e(N_clust1)' + local clustvar `e(clustvar1)' + } +di in gr "Number of clusters (`clustvar') = " _col(33) in ye %6.0f `N_clust' _continue + } +di in gr _col(55) "Number of obs = " in ye %8.0f e(N) + if "`e(clustvar2)'"!="" { +di in gr "Number of clusters (" "`e(clustvar2)'" ") = " _col(33) in ye %6.0f e(N_clust2) _continue + } +di in gr _c _col(55) "F(" %3.0f e(Fdf1) "," %6.0f e(Fdf2) ") = " + if e(F) < 99999 { +di in ye %8.2f e(F) + } + else { +di in ye %8.2e e(F) + } +di in gr _col(55) "Prob > F = " in ye %8.4f e(Fp) + +di in gr "Total (centered) SS = " in ye %12.0g e(yyc) _continue +di in gr _col(55) "Centered R2 = " in ye %8.4f e(r2c) +di in gr "Total (uncentered) SS = " in ye %12.0g e(yy) _continue +di in gr _col(55) "Uncentered R2 = " in ye %8.4f e(r2u) +di in gr "Residual SS = " in ye %12.0g e(rss) _continue +di in gr _col(55) "Root MSE = " in ye %8.4g e(rmse) +di + } + +* Display coefficients etc. +* Unfortunate but necessary hack here: to suppress message about cluster adjustment of +* standard error, clear e(clustvar) and then reset it after display + local cluster `e(clustvar)' + * ereturn local clustvar + * ereturn display, `plus' level(`level') `dispopt' + * ereturn local clustvar `cluster' +* Sergio: workaround but not 100% sure that it will always work as wanted +* Trick: _coef_table.ado just calls a Mata function that reads locals + local noclustreport noclustreport // undocumented in _coef_table.ado + mata: _coef_table() + return add // adds r(level), r(table), etc. to ereturn (before the footnote deletes them) + + + +* Display 1st footer with identification stats +* Footer not displayed if -nofooter- option or if pure OLS, i.e., model="ols" and Sargan-Hansen=0 + if ~("`nofooter'"~="" | (e(model)=="ols" & (e(sargan)==0 | e(j)==0))) { + +* Under ID test + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##idtest:Underidentification test}" + if "`e(vcetype)'`e(kernel)'"=="" { +di in gr _c " (Anderson canon. corr. LM statistic):" + } + else { +di in gr _c " (Kleibergen-Paap rk LM statistic):" + } +di in ye _col(71) %8.3f e(idstat) +di in gr _col(52) "Chi-sq(" in ye e(iddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(idp) +* IV redundancy statistic + if "`e(redlist)'"!="" { +di in gr "-redundant- option:" +di in smcl _c "{help `helpfile'##redtest:IV redundancy test}" +di in gr _c " (LM test of redundancy of specified instruments):" +di in ye _col(71) %8.3f e(redstat) +di in gr _col(52) "Chi-sq(" in ye e(reddf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(redp) +di in gr "Instruments tested: " _c + Disp `e(redlist)', _col(23) + } +di in smcl in gr "{hline 78}" + } +* Report Cragg-Donald statistic + if "`e(instd)'"~="" & "`e(idstat)'"~="" { +di in smcl _c "{help `helpfile'##widtest:Weak identification test}" +di in gr " (Cragg-Donald Wald F statistic):" in ye _col(71) %8.3f e(cdf) + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr " (Kleibergen-Paap rk Wald F statistic):" in ye _col(71) %8.3f e(widstat) + } +di in gr _c "Stock-Yogo weak ID test critical values:" + Disp_cdsy, model(`e(model)') k2(`e(exexog_ct)') nendog(`e(endog_ct)') fuller("`e(fuller)'") col1(42) col2(73) + if `r(cdmissing)' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + } + di in smcl in gr "{hline 78}" + } + +* Report either (a) Sargan-Hansen-C stats, or (b) robust covariance matrix problem +* e(model)="gmmw" means user-supplied weighting matrix and Hansen J using 2nd-step resids reported + if `rprob' == 0 { +* Display overid statistic + if "`e(vcetype)'" == "Robust" | "`e(model)'" == "gmmw" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Hansen J statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } + else { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##overidtests:Sargan statistic}" +di in gr _c " (Lagrange multiplier test of excluded instruments):" + } + } +di in ye _col(71) %8.3f e(j) + if e(jdf) { +di in gr _col(52) "Chi-sq(" in ye e(jdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(jp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + +* Display orthog option: C statistic (difference of Sargan statistics) + if e(cstat) != . { +* If C-stat = 0 then warn, otherwise output + if e(cstat) > 0 { +di in gr "-orthog- option:" + if "`e(vcetype)'" == "Robust" { +di in gr _c "Hansen J statistic (eqn. excluding suspect orthog. conditions): " + } + else { +di in gr _c "Sargan statistic (eqn. excluding suspect orthogonality conditions):" + } +di in ye _col(71) %8.3f e(j)-e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(jdf)-e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f chiprob(e(jdf)-e(cstatdf),e(j)-e(cstat)) +di in smcl _c "{help `helpfile'##ctest:C statistic}" +di in gr _c " (exogeneity/orthogonality of suspect instruments): " +di in ye _col(71) %8.3f e(cstat) +di in gr _col(52) "Chi-sq(" in ye e(cstatdf) in gr ") P-val = " /* + */ in ye _col(73) %6.4f e(cstatp) +di in gr "Instruments tested: " _c + Disp `e(clist)', _col(23) + } + if e(cstat) == 0 { +di in gr _n "Collinearity/identification problems in eqn. excl. suspect orthog. conditions:" +di in gr " C statistic not calculated for -orthog- option" + } + } + } + else { +* Problem exists with robust VCV - notify and list possible causes +di in r "Warning: estimated covariance matrix of moment conditions not of full rank." + if e(j)==. { +di in r " overidentification statistic not reported, and standard errors and" + } +di in r " model tests should be interpreted with caution." +di in r "Possible causes:" + if e(nocollin) { +di in r " collinearities in regressors or instruments (with -nocollin- option)" + } + if "`e(N_clust)'" != "" { +di in r " number of clusters insufficient to calculate robust covariance matrix" + } + if "`e(kernel)'" != "" { +di in r " covariance matrix of moment conditions not positive definite" +di in r " covariance matrix uses too many lags" + } +di in r " singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)" +di in r in smcl _c "{help `helpfile'##partial:partial}" +di in r " option may address problem." + } + +* Display endog option: endogeneity test statistic + if e(estat) != . { +* If stat = 0 then warn, otherwise output + if e(estat) > 0 { +di in gr "-endog- option:" +di in smcl _c "{help `helpfile'##endogtest:Endogeneity test}" +di in gr _c " of endogenous regressors: " +di in ye _col(71) %8.3f e(estat) +di in gr _col(52) "Chi-sq(" in ye e(estatdf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(estatp) +di in gr "Regressors tested: " _c + Disp `e(elist)', _col(23) + } + if e(estat) == 0 { +di in gr _n "Collinearity/identification problems in restricted equation:" +di in gr " Endogeneity test statistic not calculated for -endog- option" + } + } + + di in smcl in gr "{hline 78}" +* Display AR overid statistic if LIML and not robust + if "`e(model)'" == "liml" & "`e(vcetype)'" ~= "Robust" & "`e(kernel)'" == "" { + if "`e(instd)'" != "" { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (overidentification test of all instruments):" + } + else { +di in smcl _c "{help `helpfile'##liml:Anderson-Rubin statistic}" +di in gr _c " (LR test of excluded instruments):" + } +di in ye _col(72) %7.3f e(arubin) + if e(arubindf) { +di in gr _col(52) "Chi-sq(" in ye e(arubindf) /* + */ in gr ") P-val = " in ye _col(73) %6.4f e(arubinp) + } + else { +di in gr _col(50) "(equation exactly identified)" + } + di in smcl in gr "{hline 78}" + } + } + +* Display 2nd footer with variable lists + if "`nofooter'"=="" { + +* Warn about dropped instruments if any +* Can happen with nocollin option and rank(S) < cols(S) + if colsof(e(S)) > e(rankzz) { +di in gr "Collinearities detected among instruments: " _c +di in gr colsof(e(S))-e(rankzz) " instrument(s) dropped" + } + + if "`e(collin)'`e(dups)'" != "" | e(partial_ct) { +* If collinearities, duplicates or partial, abbreviated varlists saved with a 1 at the end + local one "1" + } + if e(endog_ct) { + di in gr "Instrumented:" _c + Disp `e(instd`one')', _col(23) + } + if e(inexog_ct) { + di in gr "Included instruments:" _c + Disp `e(inexog`one')', _col(23) + } + if e(exexog_ct) { + di in gr "Excluded instruments:" _c + Disp `e(exexog`one')', _col(23) + } + if e(partial_ct) { + if e(partialcons) { + local partial "`e(partial`one')' _cons" + } + else { + local partial "`e(partial`one')'" + } +di in smcl _c "{help `helpfile'##partial:Partialled-out}" + di in gr ":" _c + Disp `partial', _col(23) + if "`e(partialsmall)'"=="" { +di in gr _col(23) "nb: total SS, model F and R2s are after partialling-out;" +di in gr _col(23) " any {help `helpfile'##s_small:small-sample adjustments} do not include" +di in gr _col(23) " partialled-out variables in regressor count K" + } + else { +di in gr _col(23) "nb: total SS, model F and R2s are after partialling-out;" +di in gr _col(23) " any {help `helpfile'##s_small:small-sample adjustments} include partialled-out" +di in gr _col(23) " variables in regressor count K" + } + } + if "`e(dups)'" != "" { + di in gr "Duplicates:" _c + Disp `e(dups)', _col(23) + } + if "`e(collin)'" != "" { + di in gr "Dropped collinear:" _c + Disp `e(collin)', _col(23) + } + if "`e(ecollin)'" != "" { + di in gr "Reclassified as exog:" _c + Disp `e(ecollin)', _col(23) + } + di in smcl in gr "{hline 78}" + + if (e(N_hdfe)!= .) reghdfe_footnote + } +end + +************************************************************************************** + +// ************ Display collinearity and duplicates warning messages ************ // + +program define DispCollinDups + version 11.2 + if "`e(dups)'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `e(dups)', _col(16) + } + if "`e(collin)'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `e(collin)', _col(16) + } +end + +// ************* Display all first-stage estimations ************ // + +program define DispFirst + version 11.2 + args helpfile + tempname firstmat ivest sheapr2 pr2 F df df_r pvalue + tempname SWF SWFdf1 SWFdf2 SWFp SWr2 + + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display first-stage estimates; macro e(first) is missing" + exit + } +di in gr _newline "First-stage regressions" +di in smcl in gr "{hline 23}" +di + local endo1 : colnames(`firstmat') + local nrvars : word count `endo1' + local firsteqs "`e(firsteqs)'" + local nreqs : word count `firsteqs' + if `nreqs' < `nrvars' { +di in ye "Unable to display all first-stage regressions." +di in ye "There may be insufficient room to store results using -estimates store-," +di in ye "or names of endogenous regressors may be too long to store the results." +di in ye "Try dropping one or more estimation results using -estimates drop-," +di in ye "using the -savefprefix- option, or using shorter variable names." +di + } + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + foreach eqname of local firsteqs { + _estimates hold `ivest' + capture estimates restore `eqname' + if _rc != 0 { +di +di in ye "Unable to list stored estimation `eqname'." +di in ye "There may be insufficient room to store results using -estimates store-," +di in ye "or names of endogenous regressors may be too long to store the results." +di in ye "Try dropping one or more estimation results using -estimates drop-," +di in ye "using the -savefprefix- option, or using shorter variable names." +di + } + else { + local vn "`e(depvar)'" + estimates replay `eqname', noheader + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `SWF' =`firstmat'["SWF","`vn'"] + mat `SWFdf1' =`firstmat'["SWFdf1","`vn'"] + mat `SWFdf2' =`firstmat'["SWFdf2","`vn'"] + mat `SWFp' =`firstmat'["SWFp","`vn'"] + mat `SWr2' =`firstmat'["SWr2","`vn'"] + +di in gr "F test of excluded instruments:" +di in gr " F(" %3.0f `df'[1,1] "," %6.0f `df_r'[1,1] ") = " in ye %8.2f `F'[1,1] +di in gr " Prob > F = " in ye %8.4f `pvalue'[1,1] + +di in smcl "{help `helpfile'##swstats:Sanderson-Windmeijer multivariate F test of excluded instruments:}" +di in gr " F(" %3.0f `SWFdf1'[1,1] "," %6.0f `SWFdf2'[1,1] ") = " in ye %8.2f `SWF'[1,1] +di in gr " Prob > F = " in ye %8.4f `SWFp'[1,1] + +di + } + _estimates unhold `ivest' + } +end + +// ************* Display list of stored first-stage and RF estimations ************ // + +program define DispStored + args savesfirst saverf savefirst + version 11.2 + + if "`savesfirst'" != "" { + local eqlist "`e(sfirsteq)'" + } + if "`saverf'" != "" { + local eqlist "`eqlist' `e(rfeq)'" + } + if "`savefirst'" != "" { + local eqlist "`eqlist' `e(firsteqs)'" + } + local eqlist : list retokenize eqlist + +di in gr _newline "Stored estimation results" +di in smcl in gr "{hline 25}" _c + capture estimates dir `eqlist' + if "`eqlist'" != "" & _rc == 0 { +// Estimates exist and can be listed + estimates dir `eqlist' + } + else if "`eqlist'" != "" & _rc != 0 { +di +di in ye "Unable to list stored estimations." +di + } +end + +// ************* Display summary first-stage and ID test results ************ // + +program define DispFFirst + version 11.2 + args helpfile + tempname firstmat + tempname sheapr2 pr2 F df df_r pvalue + tempname SWF SWFdf1 SWFdf2 SWFp SWchi2 SWchi2p SWr2 + mat `firstmat'=e(first) + if `firstmat'[1,1] == . { +di +di in ye "Unable to display summary of first-stage estimates; macro e(first) is missing" + exit + } + local endo : colnames(`firstmat') + local nrvars : word count `endo' + local robust "`e(vcetype)'" + local cluster "`e(clustvar)'" + local kernel "`e(kernel)'" + local efirsteqs "`e(firsteqs)'" + + mat `df' =`firstmat'["df",1] + mat `df_r' =`firstmat'["df_r",1] + mat `SWFdf1' =`firstmat'["SWFdf1",1] + mat `SWFdf2' =`firstmat'["SWFdf2",1] + +di +di in gr _newline "Summary results for first-stage regressions" +di in smcl in gr "{hline 43}" +di + +di _c in smcl _col(44) "{help `helpfile'##swstats:(Underid)}" +di in smcl _col(65) "{help `helpfile'##swstats:(Weak id)}" + +di _c in gr "Variable |" +di _c in smcl _col(16) "{help `helpfile'##swstats:F}" in gr "(" +di _c in ye _col(17) %3.0f `df'[1,1] in gr "," in ye %6.0f `df_r'[1,1] in gr ") P-val" +di _c in gr _col(37) "|" +di _c in smcl _col(39) "{help `helpfile'##swstats:SW Chi-sq}" in gr "(" +di _c in ye %3.0f `SWFdf1'[1,1] in gr ") P-val" +di _c in gr _col(60) "|" +di _c in smcl _col(62) "{help `helpfile'##swstats:SW F}" in gr "(" +di in ye _col(67) %3.0f `SWFdf1'[1,1] in gr "," in ye %6.0f `SWFdf2'[1,1] in gr ")" + + local i = 1 + foreach vn of local endo { + + mat `sheapr2' =`firstmat'["sheapr2","`vn'"] + mat `pr2' =`firstmat'["pr2","`vn'"] + mat `F' =`firstmat'["F","`vn'"] + mat `df' =`firstmat'["df","`vn'"] + mat `df_r' =`firstmat'["df_r","`vn'"] + mat `pvalue' =`firstmat'["pvalue","`vn'"] + mat `SWF' =`firstmat'["SWF","`vn'"] + mat `SWFdf1' =`firstmat'["SWFdf1","`vn'"] + mat `SWFdf2' =`firstmat'["SWFdf2","`vn'"] + mat `SWFp' =`firstmat'["SWFp","`vn'"] + mat `SWchi2' =`firstmat'["SWchi2","`vn'"] + mat `SWchi2p' =`firstmat'["SWchi2p","`vn'"] + mat `SWr2' =`firstmat'["SWr2","`vn'"] + + local vnlen : length local vn + if `vnlen' > 12 { + local vn : piece 1 12 of "`vn'" + } +di _c in y %-12s "`vn'" _col(14) in gr "|" _col(18) in y %8.2f `F'[1,1] +di _c _col(28) in y %8.4f `pvalue'[1,1] +di _c _col(37) in g "|" _col(42) in y %8.2f `SWchi2'[1,1] _col(51) in y %8.4f `SWchi2p'[1,1] +di _col(60) in g "|" _col(65) in y %8.2f `SWF'[1,1] + local i = `i' + 1 + } +di + + if "`robust'`cluster'" != "" { + if "`cluster'" != "" { + local rtype "cluster-robust" + } + else if "`kernel'" != "" { + local rtype "heteroskedasticity and autocorrelation-robust" + } + else { + local rtype "heteroskedasticity-robust" + } + } + else if "`kernel'" != "" { + local rtype "autocorrelation-robust" + } + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: first-stage test statistics `rtype'" +di + } + + local k2 = `SWFdf1'[1,1] +di in gr "Stock-Yogo weak ID F test critical values for single endogenous regressor:" + Disp_cdsy, model(`e(model)') k2(`e(exexog_ct)') nendog(1) fuller("`e(fuller)'") col1(36) col2(67) + if `r(cdmissing)' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(model)'"=="iv" & "`e(vcetype)'`e(kernel)'"=="" { +di in gr "NB: Critical values are for Sanderson-Windmeijer F statistic." + } + else { +di in gr "NB: Critical values are for i.i.d. errors only." + } +di + } + +* Check that SW chi-sq and F denominator are correct and = underid test dof + if e(iddf)~=`SWFdf1'[1,1] { +di in red "Warning: Error in calculating first-stage id statistics above;" +di in red " dof of SW statistics is " `SWFdf1'[1,1] ", should be L-(K-1)=`e(iddf)'." + } + + tempname iddf idstat idp widstat cdf rkf + scalar `iddf'=e(iddf) + scalar `idstat'=e(idstat) + scalar `idp'=e(idp) + scalar `widstat'=e(widstat) + scalar `cdf'=e(cdf) + capture scalar `rkf'=e(rkf) +di in smcl "{help `helpfile'##idtest:Underidentification test}" +di in gr "Ho: matrix of reduced form coefficients has rank=K1-1 (underidentified)" +di in gr "Ha: matrix has rank=K1 (identified)" + if "`robust'`kernel'"=="" { +di in ye "Anderson canon. corr. LM statistic" _c + } + else { +di in ye "Kleibergen-Paap rk LM statistic" _c + } +di in gr _col(42) "Chi-sq(" in ye `iddf' in gr ")=" %-7.2f in ye `idstat' /* + */ _col(61) in gr "P-val=" %6.4f in ye `idp' + +di +di in smcl "{help `helpfile'##widtest:Weak identification test}" +di in gr "Ho: equation is weakly identified" +di in ye "Cragg-Donald Wald F statistic" _col(65) %8.2f `cdf' + if "`robust'`kernel'"~="" { +di in ye "Kleibergen-Paap Wald rk F statistic" _col(65) %8.2f `rkf' + } +di + +di in gr "Stock-Yogo weak ID test critical values for K1=`e(endog_ct)' and L1=`e(exexog_ct)':" + Disp_cdsy, model(`e(model)') k2(`e(exexog_ct)') nendog(`e(endog_ct)') fuller("`e(fuller)'") col1(36) col2(67) + if `r(cdmissing)' { + di in gr _col(64) "" + } + else { + di in gr "Source: Stock-Yogo (2005). Reproduced by permission." + if "`e(vcetype)'`e(kernel)'"~="" { +di in gr "NB: Critical values are for Cragg-Donald F statistic and i.i.d. errors." + } + } +di + + tempname arf arfp archi2 archi2p ardf ardf_r + tempname sstat sstatp sstatdf +di in smcl "{help `helpfile'##wirobust:Weak-instrument-robust inference}" +di in gr "Tests of joint significance of endogenous regressors B1 in main equation" +di in gr "Ho: B1=0 and orthogonality conditions are valid" +* Needs to be small so that adjusted dof is reflected in F stat + scalar `arf'=e(arf) + scalar `arfp'=e(arfp) + scalar `archi2'=e(archi2) + scalar `archi2p'=e(archi2p) + scalar `ardf'=e(ardf) + scalar `ardf_r'=e(ardf_r) + scalar `sstat'=e(sstat) + scalar `sstatp'=e(sstatp) + scalar `sstatdf'=e(sstatdf) +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "F(" in ye `ardf' in gr "," in ye `ardf_r' in gr ")=" /* + */ _col(49) in ye %7.2f `arf' _col(61) in gr "P-val=" in ye %6.4f `arfp' +di in ye _c "Anderson-Rubin Wald test" +di in gr _col(36) "Chi-sq(" in ye `ardf' in gr ")=" /* + */ _col(49) in ye %7.2f `archi2' _col(61) in gr "P-val=" in ye %6.4f `archi2p' +di in ye _c "Stock-Wright LM S statistic" +di in gr _col(36) "Chi-sq(" in ye `sstatdf' in gr ")=" /* + */ _col(49) in ye %7.2f `sstat' _col(61) in gr "P-val=" in ye %6.4f `sstatp' +di + if "`robust'`cluster'`kernel'" != "" { +di in gr "NB: Underidentification, weak identification and weak-identification-robust" +di in gr " test statistics `rtype'" +di + } + + if "`cluster'" != "" & "`e(clustvar2)'"=="" { +di in gr "Number of clusters N_clust = " in ye %10.0f e(N_clust) + } + else if "`e(clustvar2)'" ~= "" { +di in gr "Number of clusters (1) N_clust1 = " in ye %10.0f e(N_clust1) +di in gr "Number of clusters (2) N_clust2 = " in ye %10.0f e(N_clust2) + } +di in gr "Number of observations N = " in ye %10.0f e(N) +di in gr "Number of regressors K = " in ye %10.0f e(rankxx) +di in gr "Number of endogenous regressors K1 = " in ye %10.0f e(endog_ct) +di in gr "Number of instruments L = " in ye %10.0f e(rankzz) +di in gr "Number of excluded instruments L1 = " in ye %10.0f e(ardf) + if "`e(partial)'" != "" { +di in gr "Number of partialled-out regressors/IVs = " in ye %10.0f e(partial_ct) +di in gr "NB: K & L do not included partialled-out variables" + } + +end + +// ************* Post first-stage and/or RF estimations ************ // + +program define PostFirstRF, eclass + version 11.2 + syntax [if] /// + [ , /// + first(string) /// can be fv + rf /// omit first(.) and rf => post system of eqns + rmse_rf(real 0) /// + bmat(name) /// + vmat(name) /// + smat(name) /// + firstmat(name) /// + lhs1(string) /// can be fv + endo1(string) /// + znames0(string) /// + znames1(string) /// + bvclean(integer 0) /// + fvops(integer 0) /// + partial_ct(integer 0) /// + robust /// + cluster(string) /// + cluster1(string) /// + cluster2(string) /// + nc(integer 0) /// + nc1(integer 0) /// + nc2(integer 0) /// + kernel(string) /// + bw(real 0) /// + ivar(name) /// + tvar(name) /// + obs(integer 0) /// + iv1_ct(integer 0) /// + cons(integer 0) /// + partialcons(integer 0) /// + dofminus(integer 0) /// + sdofminus(integer 0) /// + ] + +// renaming/copying + local N = `obs' + local N_clust = `nc' + local N_clust1 = `nc1' + local N_clust2 = `nc2' + tempname b V S + mat `b' = `bmat' + mat `V' = `vmat' + mat `S' = `smat' + + marksample touse + + mat colname `b' = `lhs1' `endo1' + mat rowname `b' = `znames1' + mat `b' = vec(`b') + mat `b' = `b'' + mat colname `V' = `: colfullnames `b'' + mat rowname `V' = `: colfullnames `b'' + mat colname `S' = `: colfullnames `b'' + mat rowname `S' = `: colfullnames `b'' + + if "`cluster'"=="" { + matrix `V'=`V'*(`N'-`dofminus')/(`N'-`iv1_ct'-`dofminus'-`sdofminus') + } + else { + matrix `V'=`V'*(`N'-1)/(`N'-`iv1_ct'-`sdofminus') /// + * `N_clust'/(`N_clust'-1) + } + +// If RF or first-stage estimation required, extract it +// also set macros for model and depvar + if "`rf'`first'"~="" { + if "`rf'"~="" { // RF + local vnum = 0 + local model rf + local depvar `lhs1' + local rmse = `rmse_rf' + } + else { // first-stage + local vnum : list posof "`first'" in endo1 + local vnum = `vnum' + local model first + local depvar `first' + local rmse = el(`firstmat', rownumb(`firstmat',"rmse"), colnumb(`firstmat',"`first'")) + } + local c0 = 1 + `vnum'*`iv1_ct' + local c1 = (`vnum'+1)*`iv1_ct' + mat `b' = `b'[1,`c0'..`c1'] + mat `V' = `V'[`c0'..`c1',`c0'..`c1'] + mat `S' = `S'[`c0'..`c1',`c0'..`c1'] + mat coleq `b' = "" + mat coleq `V' = "" + mat roweq `V' = "" + mat coleq `S' = "" + mat roweq `S' = "" + } + else { + local model sfirst + local eqlist `lhs1' `endo1' + } + +// reinsert omitteds etc. unless requested not to +// eqlist empty unless first-stage/rf system + if ~`bvclean' { + AddOmitted, bmat(`b') vmat(`V') cnb0(`znames0') cnb1(`znames1') eqlist(`eqlist') + mat `b' = r(b) + mat `V' = r(V) +// build fv info (base, empty, etc.) unless there was partialling out + if `fvops' & ~`partial_ct' { + local bfv "buildfvinfo" + } + } + + local dof = `N' - `iv1_ct' - `dofminus' - `sdofminus' + ereturn post `b' `V', obs(`obs') esample(`touse') dof(`dof') depname(`depvar') `bfv' + +// saved RF/first-stage equation scalars + if "`rf'`first'"~="" { + ereturn scalar rmse = `rmse' + ereturn scalar df_r = `dof' + ereturn scalar df_m = `iv1_ct' - `cons' + `sdofminus' - `partialcons' + } + ereturn scalar k_eq = `: word count `endo1'' + ereturn local cmd ivreg2 + ereturn local model `model' + ereturn matrix S `S' + + if "`kernel'"!="" { + ereturn local kernel "`kernel'" + ereturn scalar bw=`bw' + ereturn local tvar "`tvar'" + if "`ivar'" ~= "" { + ereturn local ivar "`ivar'" + } + } + + if "`robust'" != "" { + local vce "robust" + } + if "`cluster1'" != "" { + if "`cluster2'"=="" { + local vce "`vce' cluster" + } + else { + local vce "`vce' two-way cluster" + } + } + if "`kernel'" != "" { + if "`robust'" != "" { + local vce "`vce' hac" + } + else { + local vce "`vce' ac" + } + local vce "`vce' `kernel' bw=`bw'" + } + + local vce : list clean vce + local vce = lower("`vce'") + ereturn local vce `vce' + + if "`cluster'"!="" { + ereturn scalar N_clust=`N_clust' + ereturn local clustvar `cluster' + } + if "`cluster2'"!="" { + ereturn scalar N_clust1=`N_clust1' + ereturn scalar N_clust2=`N_clust2' + ereturn local clustvar1 `cluster1' + ereturn local clustvar2 `cluster2' + } + + if "`robust'`cluster'" != "" { + ereturn local vcetype "Robust" + } + +// Assemble output titles + if "`e(vcetype)'" == "Robust" { + local hacsubtitle1 "heteroskedasticity" + } + if "`e(kernel)'"!="" & "`e(clustvar)'"=="" { + local hacsubtitle3 "autocorrelation" + } + if "`kiefer'"!="" { + local hacsubtitle3 "within-cluster autocorrelation (Kiefer)" + } + if "`e(clustvar)'"!="" { + if "`e(clustvar2)'"=="" { + local hacsubtitle3 "clustering on `e(clustvar)'" + } + else { + local hacsubtitle3 "clustering on `e(clustvar1)' and `e(clustvar2)'" + } + if "`e(kernel)'" != "" { + local hacsubtitle4 "and kernel-robust to common correlated disturbances (Driscoll-Kraay)" + } + } + if "`hacsubtitle1'"~="" & "`hacsubtitle3'" ~= "" { + local hacsubtitle2 " and " + } + local hacsubtitle "`hacsubtitle1'`hacsubtitle2'`hacsubtitle3'" + if "`hacsubtitle'"~="" { + ereturn local hacsubtitleV "Statistics robust to `hacsubtitle'" + } + else { + ereturn local hacsubtitleV "Statistics consistent for homoskedasticity only" + } + if "`hacsubtitle4'"~="" { + ereturn local hacsubtitleV2 "`hacsubtitle4'" + } + if "`sw'"~="" { + ereturn local hacsubtitleV "Stock-Watson heteroskedastic-robust statistics (BETA VERSION)" + } + + if ("`absorb'" != "") { + mata: HDFE.post_footnote() + assert e(N_hdfe) != . + } + +end + + + +************************************************************************************** +program define IsStop, sclass + /* sic, must do tests one-at-a-time, + * 0, may be very large */ + version 11.2 + if `"`0'"' == "[" { + sret local stop 1 + exit + } + if `"`0'"' == "," { + sret local stop 1 + exit + } + if `"`0'"' == "if" { + sret local stop 1 + exit + } +* per official ivreg 5.1.3 + if substr(`"`0'"',1,3) == "if(" { + sret local stop 1 + exit + } + if `"`0'"' == "in" { + sret local stop 1 + exit + } + if `"`0'"' == "" { + sret local stop 1 + exit + } + else sret local stop 0 +end + +// ************* Display list of variables ************ // + +program define Disp + version 11.2 + syntax [anything] [, _col(integer 15) ] + local maxlen = 80-`_col' + local len = 0 + local first = 1 + foreach vn in `anything' { +* Don't display if base or omitted variable + _ms_parse_parts `vn' + if ~`r(omit)' { + local vnlen : length local vn + if `len'+`vnlen' > `maxlen' { + di + local first = 1 + local len = `vnlen' + } + else { + local len = `len'+`vnlen'+1 + } + if `first' { + local first = 0 + di in gr _col(`_col') "`vn'" _c + } + else { + di in gr " `vn'" _c + } + } + } +* Finish with a newline + di +end + +// *********** Display Cragg-Donald/Stock-Yogo critical values etc. ******** // + +program define Disp_cdsy, rclass + version 11.2 + syntax , col1(integer) col2(integer) model(string) k2(integer) nendog(integer) [ fuller(string) ] + local cdmissing=1 + if "`model'"=="iv" | "`model'"=="gmm2s" | "`model'"=="gmmw" { + cdsy, type(ivbias5) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') " 5% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivbias30) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "30% maximal IV relative bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize15) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "15% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(ivsize25) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "25% maximal IV size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`model'"=="liml" & "`fuller'"=="") | "`model'"=="cue" { + cdsy, type(limlsize10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize15) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "15% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(limlsize25) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "25% maximal LIML size" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + } + else if ("`model'"=="liml" & "`fuller'"~="") { + cdsy, type(fullrel5) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') " 5% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullrel30) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "30% maximal Fuller rel. bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax5) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') " 5% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax10) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "10% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax20) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "20% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + cdsy, type(fullmax30) k2(`k2') nendog(`nendog') + if "`r(cv)'"~="." { + di in gr _col(`col1') "30% Fuller maximum bias" in ye _col(`col2') %6.2f r(cv) + local cdmissing=0 + } + di in gr "NB: Critical values based on Fuller parameter=1" + } + return scalar cdmissing =`cdmissing' +end + +program define cdsy, rclass + version 11.2 + syntax , type(string) k2(integer) nendog(integer) + +* type() can be ivbias5 (k2<=100, nendog<=3) +* ivbias10 (ditto) +* ivbias20 (ditto) +* ivbias30 (ditto) +* ivsize10 (k2<=100, nendog<=2) +* ivsize15 (ditto) +* ivsize20 (ditto) +* ivsize25 (ditto) +* fullrel5 (ditto) +* fullrel10 (ditto) +* fullrel20 (ditto) +* fullrel30 (ditto) +* fullmax5 (ditto) +* fullmax10 (ditto) +* fullmax20 (ditto) +* fullmax30 (ditto) +* limlsize10 (ditto) +* limlsize15 (ditto) +* limlsize20 (ditto) +* limlsize25 (ditto) + + tempname temp cv + +* Initialize critical value as MV + scalar `cv'=. + + if "`type'"=="ivbias5" { + mata: s_cdsy("`temp'", 1) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias10" { + mata: s_cdsy("`temp'", 2) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias20" { + mata: s_cdsy("`temp'", 3) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivbias30" { + mata: s_cdsy("`temp'", 4) + if `k2'<=100 & `nendog'<=3 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + + if "`type'"=="ivsize10" { + mata: s_cdsy("`temp'", 5) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize15" { + mata: s_cdsy("`temp'", 6) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize20" { + mata: s_cdsy("`temp'", 7) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="ivsize25" { + mata: s_cdsy("`temp'", 8) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel5" { + mata: s_cdsy("`temp'", 9) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel10" { + mata: s_cdsy("`temp'", 10) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel20" { + mata: s_cdsy("`temp'", 11) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullrel30" { + mata: s_cdsy("`temp'", 12) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax5" { + mata: s_cdsy("`temp'", 13) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax10" { + mata: s_cdsy("`temp'", 14) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax20" { + mata: s_cdsy("`temp'", 15) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="fullmax30" { + mata: s_cdsy("`temp'", 16) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize10" { + mata: s_cdsy("`temp'", 17) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize15" { + mata: s_cdsy("`temp'", 18) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize20" { + mata: s_cdsy("`temp'", 19) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + + if "`type'"=="limlsize25" { + mata: s_cdsy("`temp'", 20) + if `k2'<=100 & `nendog'<=2 { + scalar `cv'=`temp'[`k2',`nendog'] + } + } + return scalar cv=`cv' +end + +// ***************************** Parse ivreg2 arguments **************** // + +program define ivparse, sclass + version 11.2 + syntax [anything(name=0)] /// + [ , /// + ivreg2name(name) /// + partial(string) /// as string because may have nonvariable in list + fwl(string) /// legacy option + orthog(varlist fv ts) /// + endogtest(varlist fv ts) /// + redundant(varlist fv ts) /// + depname(string) /// + robust /// + cluster(varlist fv ts) /// + bw(string) /// as string because may have noninteger option "auto" + kernel(string) /// + dkraay(integer 0) /// + sw /// + kiefer /// + center /// + NOCONSTANT /// + tvar(varname) /// + ivar(varname) /// + gmm2s /// + gmm /// + cue /// + liml /// + fuller(real 0) /// + kclass(real 0) /// + b0(string) /// + wmatrix(string) /// + NOID /// + savefirst /// + savefprefix(name) /// + saverf /// + saverfprefix(name) /// + savesfirst /// + savesfprefix(name) /// + psd0 /// + psda /// + dofminus(integer 0) /// + NOCOLLIN /// + useqr /// + bvclean /// + eform(string) /// + NOOMITTED /// + vsquish /// + noemptycells /// + baselevels /// + allbaselevels /// + ] + +// TS and FV opts based on option varlists + local tsops = ("`s(tsops)'"=="true") + local fvops = ("`s(fvops)'"=="true") +// useful boolean + local cons =("`noconstant'"=="") + + local n 0 + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + while `s(stop)'==0 { + if "`paren'"=="(" { + local ++n + if `n'>1 { +di as err `"syntax is "(all instrumented variables = instrument variables)""' + exit 198 + } + gettoken p lhs : lhs, parse(" =") + while "`p'"!="=" { + if "`p'"=="" { +di as err `"syntax is "(all instrumented variables = instrument variables)""' +di as er `"the equal sign "=" is required"' + exit 198 + } + local endo `endo' `p' + gettoken p lhs : lhs, parse(" =") + } + local exexog `lhs' + } + else { + local inexog `inexog' `lhs' + } + gettoken lhs 0 : 0, parse(" ,[") match(paren) + IsStop `lhs' + } +// lhs attached to front of inexog + gettoken lhs inexog : inexog + local endo : list retokenize endo + local inexog : list retokenize inexog + local exexog : list retokenize exexog +// If depname not provided (default) name is lhs variable + if "`depname'"=="" { + local depname `lhs' + } + +// partial, including legacy FWL option + local partial `partial' `fwl' +// Need to nonvars "_cons" from list if present +// Also set `partialcons' local to 0/1 +// Need word option so that varnames with cons in them aren't zapped + local partial : subinstr local partial "_cons" "", all count(local partialcons) word + local partial : list retokenize partial + if "`partial'"=="_all" { + local partial `inexog' + } +// constant always partialled out if present in regression and other inexog are being partialled out +// (incompatibilities caught in error-check section below) + if "`partial'"~="" { + local partialcons = (`cons' | `partialcons') + } + +// detect if TS or FV operators used in main varlists +// clear any extraneous sreturn macros first + sreturn clear + local 0 `lhs' `inexog' `endo' `exexog' `partial' + syntax varlist(fv ts) + local tsops = ("`s(tsops)'"=="true") | `tsops' + local fvops = ("`s(fvops)'"=="true") | `fvops' + +// TS operators not allowed with cluster, ivar or tvar. Captured in -syntax-. + if "`tvar'" == "" { + local tvar `_dta[_TStvar]' + } + if "`ivar'" == "" { + local ivar `_dta[_TSpanel]' + } + if "`_dta[_TSdelta]'" == "" { + local tdelta 1 + } + else { // use evaluator since _dta[_TSdelta] can + local tdelta = `_dta[_TSdelta]' // be stored as e.g. +1.0000000000000X+000 + } + + sreturn local lhs `lhs' + sreturn local depname `depname' + sreturn local endo `endo' + sreturn local inexog `inexog' + sreturn local exexog `exexog' + sreturn local partial `partial' + sreturn local cons =`cons' + sreturn local partialcons =`partialcons' + sreturn local tsops =`tsops' + sreturn local fvops =`fvops' + sreturn local tvar `tvar' + sreturn local ivar `ivar' + sreturn local tdelta `tdelta' + sreturn local noid `noid' // can be overriden below + sreturn local liml `liml' // can be overriden below + +//convert to boolean + sreturn local useqr =("`useqr'" ~= "") + +// Cluster and SW imply robust + if "`cluster'`sw'"~="" { + local robust "robust" + } + +// HAC estimation. + +// First dkraay(bw): special case of HAC with clustering +// on time-series var in a panel + kernel-robust + if `dkraay' { + if "`bw'" == "" { + local bw `dkraay' + } + if "`cluster'" == "" { + local cluster `tvar' + } + } +// If bw is omitted, default `bw' is 0. +// bw(.) can be number or "auto" hence arrives as string, but is returned as number +// bw=-1 returned if "auto" +// If bw or kernel supplied, check/set `kernel'. +// Macro `kernel' is also used for indicating HAC in use. +// If bw or kernel not supplied, set bw=0 + if "`bw'" == "" & "`kernel'" == "" { + local bw 0 + } + else { +// Check it's a valid kernel and replace with unabbreviated kernel name; check bw. +// s_vkernel is in livreg2 mlib. + mata: s_vkernel("`kernel'", "`bw'", "`ivar'") + local kernel `r(kernel)' + local bw `r(bw)' // = -1 if bw(auto) option chosen + local tsops = 1 + } +// kiefer = kernel(tru) bw(T) and no robust + if "`kiefer'" ~= "" & "`kernel'" == "" { + local kernel "Truncated" + } + +// Done parsing VCE opts + sreturn local bw `bw' + sreturn local kernel `kernel' + sreturn local robust `robust' + sreturn local cluster `cluster' + if `bw' { + sreturn local bwopt "bw(`bw')" + sreturn local kernopt "kernel(`kernel')" + } +// center arrives as string but is returned as boolean + sreturn local center =("`center'"=="center") + +// Fuller implies LIML + if `fuller' != 0 { + sreturn local liml "liml" + sreturn local fulleropt "fuller(`fuller')" + } + + if `kclass' != 0 { + sreturn local kclassopt "kclass(`kclass')" + } + +// b0 implies noid. + if "`b0'" ~= "" { + sreturn local noid "noid" + } + +// save first, rf + if "`savefprefix'" != "" { // savefprefix implies savefirst + local savefirst "savefirst" + } + else { // default savefprefix is _ivreg2_ + local savefprefix "_`ivreg2name'_" + } + sreturn local savefirst `savefirst' + sreturn local savefprefix `savefprefix' + if "`saverfprefix'" != "" { // saverfprefix implies saverf + local saverf "saverf" + } + else { // default saverfprefix is _ivreg2_ + local saverfprefix "_`ivreg2name'_" + } + sreturn local saverf `saverf' + sreturn local saverfprefix `saverfprefix' + if "`savesfprefix'" != "" { // savesfprefix implies savesfirst + local savesfirst "savesfirst" + } + else { // default saverfprefix is _ivreg2_ + local savesfprefix "_`ivreg2name'_" + } + sreturn local savesfirst `savesfirst' + sreturn local savesfprefix `savesfprefix' + +// Macro psd has either psd0, psda or is empty + sreturn local psd "`psd0'`psda'" + +// dofminus + if `dofminus' { + sreturn local dofmopt dofminus(`dofminus') + } + +// display options + local dispopt eform(`eform') `vsquish' `noomitted' `noemptycells' `baselevels' `allbaselevels' +// now boolean - indicates that omitted and/or base vars should NOT be added to VCV +// automatically triggered by partial + local bvclean = wordcount("`bvclean'") | wordcount("`partial'") | `partialcons' + sreturn local bvclean `bvclean' + sreturn local dispopt `dispopt' + +// ************ ERROR CHECKS ************* // + + if `partialcons' & ~`cons' { +di in r "Error: _cons listed in partial() but equation specifies -noconstant-." + exit 198 + } + if `partialcons' > 1 { +// Just in case of multiple _cons +di in r "Error: _cons listed more than once in partial()." + exit 198 + } + +// User-supplied tvar and ivar checked if consistent with tsset. + if "`tvar'"!="`_dta[_TStvar]'" { +di as err "invalid tvar() option - data already -tsset-" + exit 5 + } + if "`ivar'"!="`_dta[_TSpanel]'" { +di as err "invalid ivar() option - data already -xtset-" + exit 5 + } + +// dkraay + if `dkraay' { + if "`ivar'" == "" | "`tvar'" == "" { +di as err "invalid use of dkraay option - must use tsset panel data" + exit 5 + } + if "`dkraay'" ~= "`bw'" { +di as err "cannot use dkraay(.) and bw(.) options together" + exit 198 + } + if "`cluster'" ~= "`tvar'" { +di as err "invalid use of dkraay option - must cluster on `tvar' (or omit cluster option)" + exit 198 + } + } + +// kiefer VCV = kernel(tru) bw(T) and no robust with tsset data + if "`kiefer'" ~= "" { + if "`ivar'" == "" | "`tvar'" == "" { +di as err "invalid use of kiefer option - must use tsset panel data" + exit 5 + } + if "`robust'" ~= "" { +di as err "incompatible options: kiefer and robust" + exit 198 + } + if "`kernel'" ~= "" & "`kernel'" ~= "Truncated" { +di as err "incompatible options: kiefer and kernel(`kernel')" + exit 198 + } + if (`bw'~=0) { +di as err "incompatible options: kiefer and bw" + exit 198 + } + } + +// sw=Stock-Watson robust SEs + if "`sw'" ~= "" & "`cluster'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -cluster- option" + exit 198 + } + if "`sw'" ~= "" & "`kernel'" ~= "" { +di as err "Stock-Watson robust SEs not supported with -kernel- option" + exit 198 + } + if "`sw'" ~= "" & "`ivar'"=="" { +di as err "Must -xtset- or -tsset- data or specify -ivar- with -sw- option" + exit 198 + } + +// LIML/kclass incompatibilities + if "`liml'`kclassopt'" != "" { + if "`gmm2s'`cue'" != "" { +di as err "GMM estimation not available with LIML or k-class estimators" + exit 198 + } + if `fuller' < 0 { +di as err "invalid Fuller option" + exit 198 + } + if "`liml'" != "" & "`kclassopt'" != "" { +di as err "cannot use liml and kclass options together" + exit 198 + } + if `kclass' < 0 { +di as err "invalid k-class option" + exit 198 + } + } + + if "`gmm2s'" != "" & "`cue'" != "" { +di as err "incompatible options: 2-step efficient gmm and cue gmm" + exit 198 + } + + if "`gmm2s'`cue'" != "" & "`exexog'" == "" { +di as err "option `gmm2s'`cue' invalid: no excluded instruments specified" + exit 102 + } + +// Legacy gmm option + if "`gmm'" ~= "" { +di as err "-gmm- is no longer a supported option; use -gmm2s- with the appropriate option" +di as res " gmm = gmm2s robust" +di as res " gmm robust = gmm2s robust" +di as res " gmm bw() = gmm2s bw()" +di as res " gmm robust bw() = gmm2s robust bw()" +di as res " gmm cluster() = gmm2s cluster()" + exit 198 + } + +// b0 incompatible options. + if "`b0'" ~= "" & "`gmm2s'`cue'`liml'`wmatrix'" ~= "" { +di as err "incompatible options: -b0- and `gmm2s' `cue' `liml' `wmatrix'" + exit 198 + } + if "`b0'" ~= "" & `kclass' ~= 0 { +di as err "incompatible options: -b0- and kclass(`kclass')" + exit 198 + } + + if "`psd0'"~="" & "`psda'"~="" { +di as err "cannot use psd0 and psda options together" + exit 198 + } +end + +// *************** Check varlists for for duplicates and collinearities ***************** // + +program define CheckDupsCollin, sclass + version 11.2 + syntax /// + [ , /// + lhs(string) /// + endo(string) /// + inexog(string) /// + exexog(string) /// + partial(string) /// + orthog(string) /// + endogtest(string) /// + redundant(string) /// + touse(string) /// + wvar(string) /// + wf(real 0) /// + NOCONSTANT /// + NOCOLLIN /// + fvall /// + fvsep /// + ] + + if "`fvall'`fvsep'"=="" { // default, expand RHS and exexog separately + local rhs `endo' `inexog' + foreach vl in lhs rhs exexog { + fvexpand ``vl'' if `touse' + local `vl' `r(varlist)' + } + local allvars `rhs' `exexog' + } + else if "`fvall'"~="" { // expand all 3 varlists as one + fvexpand `lhs' if `touse' + local lhs `r(varlist)' + fvexpand `endo' `inexog' `exexog' if `touse' + local allvars `r(varlist)' + } + else if "`fvsep'"~="" { // expand 3 varlists separately + foreach vl in lhs endo inexog exexog { + fvexpand ``vl'' if `touse' + local `vl' `r(varlist)' + } + local allvars `endo' `inexog' `exexog' + } + else { // shouldn't reach here +di as err "internal ivreg2 err: CheckDupsCollin" + exit 198 + } + +// Create dictionary: `allvars' is list with b/n/o etc., sallvars is stripped version +// NB: lhs is not in dictionary and won't need to recreate it + ivreg2_fvstrip `allvars' + local sallvars `r(varlist)' + +// Create consistent expanded varlists +// (1) expand; (2) strip (since base etc. may be wrong); (3) recreate using dictionary +// NB: matchnames will return unmatched original name if not found in 2nd arg varlist + foreach vl in endo inexog exexog partial orthog endogtest redundant { + fvexpand ``vl'' if `touse' + ivreg2_fvstrip `r(varlist)' + local stripped `r(varlist)' // create stripped version of varlist + matchnames "`stripped'" "`sallvars'" "`allvars'" // match using dictionary + local `vl' `r(names)' // new consistent varlist with correct b/n/o etc. + } + +// Check for duplicates of variables +// (1) inexog > endo +// (2) inexog > exexog +// (3) endo + exexog = inexog, as if it were "perfectly predicted" + local lhs0 `lhs' // create here + local dupsen1 : list dups endo + local dupsin1 : list dups inexog + local dupsex1 : list dups exexog + foreach vl in endo inexog exexog partial orthog endogtest redundant { + local `vl'0 : list uniq `vl' + } +// Remove inexog from endo + local dupsen2 : list endo0 & inexog0 + local endo0 : list endo0 - inexog0 +// Remove inexog from exexog + local dupsex2 : list exexog0 & inexog0 + local exexog0 : list exexog0 - inexog0 +// Remove endo from exexog + local dupsex3 : list exexog0 & endo0 + local exexog0 : list exexog0 - endo0 + local dups "`dupsen1' `dupsex1' `dupsin1' `dupsen2' `dupsex2' `dupsex3'" + local dups : list uniq dups + +// Collinearity checks + +// Need variable counts for "0" varlists +// These do NOT include the constant + local endo0_ct : word count `endo0' + local inexog0_ct : word count `inexog0' + local rhs0_ct : word count `inexog0' `exexog0' + local exexog0_ct : word count `exexog' + + if "`nocollin'" == "" { + +// Needed for ivreg2_rmcollright2 + tempvar normwt + qui gen double `normwt' = `wf' * `wvar' if `touse' + +// Simple case: no endogenous regressors, only included and excluded exogenous + if `endo0_ct'==0 { +// Call ivreg2_rmcollright2 on "0" versions of inexog and exexog +// noexpand since already expanded and don't want inconsistant expansion +// newonly since don't want base vars in collinear list + qui ivreg2_rmcollright2 `inexog0' `exexog0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly +// ivreg2_rmcollright2 returns fulll varlist with omitteds marked as omitted, +// so just need to separate the inexog and exexog lists + if `r(k_omitted)' { + local collin `collin' `r(omitted)' + local inexog0 "" + local exexog0 "" + local nvarlist `r(varlist)' + local i 1 + while `i' <= `rhs0_ct' { + local nvar : word `i' of `nvarlist' + if `i' <= `inexog0_ct' { + local inexog0 `inexog0' `nvar' // first batch go into inexog0 + } + else { + local exexog0 `exexog0' `nvar' // remainder go into exexog0 + } + local ++i + } + local inexog0 : list retokenize inexog0 + local exexog0 : list retokenize exexog0 + } + } +// Not-simple case: endogenous regressors + else { + +// 1st pass through - remove intra-endo collinears + qui ivreg2_rmcollright2 `endo0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly +// ivreg2_rmcollright2 returns fulll varlist with omitteds marked as omitted, +// so just need to separate the inexog and exexog lists + if `r(k_omitted)' { + local collin `collin' `r(omitted)' + local endo0 `r(varlist)' + } + +// 2nd pass through - good enough unless endog appear as colllinear +// noexpand since already expanded and don't want inconsistent expansion +// newonly since don't want base vars in collinear list + qui ivreg2_rmcollright2 `inexog0' `exexog0' `endo0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly + if `r(k_omitted)' { +// Check if any endo are in the collinears. +// If yes, reclassify as inexog, then +// 3rd pass through - and then proceed to process inexog and exexog as above + local ecollin `r(omitted)' + local ecollin : list ecollin - inexog0 + local ecollin : list ecollin - exexog0 + if wordcount("`ecollin'") { +// Collinears in endo, so reclassify as inexog, redo counts and call ivreg2_rmcollright2 again + local endo0 : list endo0 - ecollin + local inexog0 `ecollin' `inexog0' + local inexog0 : list retokenize inexog0 + local endo0_ct : word count `endo0' + local inexog0_ct : word count `inexog0' + local rhs0_ct : word count `inexog0' `exexog0' +// noexpand since already expanded and don't want inconsistant expansion +// newonly since don't want base vars in collinear list + qui ivreg2_rmcollright2 `inexog0' `exexog0' `endo0' if `touse', /// + normwt(`normwt') `noconstant' noexpand newonly + } +// Collinears in inexog or exexog + local collin `collin' `r(omitted)' + local inexog0 "" + local exexog0 "" + local nvarlist `r(varlist)' + local i 1 + while `i' <= `rhs0_ct' { + local nvar : word `i' of `nvarlist' + if `i' <= `inexog0_ct' { + local inexog0 `inexog0' `nvar' + } + else { + local exexog0 `exexog0' `nvar' + } + local ++i + } + local inexog0 : list retokenize inexog0 + local exexog0 : list retokenize exexog0 + } + } + +// Collinearity and duplicates warning messages, if necessary + if "`dups'" != "" { +di in gr "Warning - duplicate variables detected" +di in gr "Duplicates:" _c + Disp `dups', _col(21) + } + if "`ecollin'" != "" { +di in gr "Warning - endogenous variable(s) collinear with instruments" +di in gr "Vars now exogenous:" _c + Disp `ecollin', _col(21) + } + if "`collin'" != "" { +di in gr "Warning - collinearities detected" +di in gr "Vars dropped:" _c + Disp `collin', _col(21) + } + } + +// Last step: process partial0 so that names with o/b/n etc. match inexog0 + if wordcount("`partial0'") { + ivreg2_fvstrip `inexog0' if `touse' + local sinexog0 `r(varlist)' // for inexog dictionary + ivreg2_fvstrip `partial0' if `touse' + local spartial0 `r(varlist)' // for partial dictionary + matchnames "`spartial0'" "`sinexog0'" "`inexog0'" // match using dictionary + local partial0 `r(names)' // new partial0 with matches + local partialcheck : list partial0 - inexog0 // unmatched are still in partial0 + if ("`partialcheck'"~="") { // so catch them +di in r "Error: `partialcheck' listed in partial() but not in list of regressors." + error 198 + } + } +// Completed duplicates and collinearity checks + + foreach vl in lhs endo inexog exexog partial orthog endogtest redundant { + sreturn local `vl' ``vl'' + sreturn local `vl'0 ``vl'0' + } + sreturn local dups `dups' + sreturn local collin `collin' + sreturn local ecollin `ecollin' + +end + +// ******************* Misc error checks *************************** // + +program define CheckMisc, rclass + version 11.2 + syntax /// + [ , /// + rhs1_ct(integer 0) /// + iv1_ct(integer 0) /// + bvector(name) /// + smatrix(name) /// + wmatrix(name) /// + cnb1(string) /// + cnZ1(string) /// + ] + +// Check variable counts + if `rhs1_ct' == 0 { +di as err "error: no regressors specified" + exit 102 + } + if `rhs1_ct' > `iv1_ct' { +di as err "equation not identified; must have at least as many instruments" +di as err "not in the regression as there are instrumented variables" + exit 481 + } + +// Check user-supplied b vector + if "`bvector'" != "" { + tempname b0 +// Rearrange/select columns to mat IV matrix + cap matsort `bvector' "`cnb1'" + matrix `b0'=r(sorted) + local scols = colsof(`b0') + local bcols : word count `cnb1' + if _rc ~= 0 | (`scols'~=`bcols') { +di as err "-b0- option error: supplied b0 columns do not match regressor list" +exit 198 + } + return mat b0 = `b0' + } + +// Check user-supplied S matrix + if "`smatrix'" != "" { + tempname S0 +// Check that smatrix is indeed a matrix + cap mat S0 = `smatrix' + if _rc ~= 0 { +di as err "invalid matrix `smatrix' in smatrix option" +exit _rc + } +// Rearrange/select columns to mat IV matrix + cap matsort `smatrix' "`cnZ1'" + matrix `S0'=r(sorted) + local srows = rowsof(`S0') + local scols = colsof(`S0') + local zcols : word count `cnZ1' + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-smatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + if issymmetric(`S0')==0 { +di as err "-smatrix- option error: supplied matrix is not symmetric" +exit 198 + } + return mat S0 = `S0' + } + +// Check user-supplied W matrix + if "`wmatrix'" != "" { + tempname W0 +// Check that wmatrix is indeed a matrix + cap mat W0 = `wmatrix' + if _rc ~= 0 { +di as err "invalid matrix `wmatrix' in wmatrix option" +exit _rc + } +// Rearrange/select columns to mat IV matrix + cap matsort `wmatrix' "`cnZ1'" + matrix `W0'=r(sorted) + local srows = rowsof(`W0') + local scols = colsof(`W0') + local zcols : word count `cnZ1' + if _rc ~= 0 | (`srows'~=`zcols') | (`scols'~=`zcols') { +di as err "-wmatrix- option error: supplied matrix columns/rows do not match IV list" +exit 198 + } + if issymmetric(`W0')==0 { +di as err "-smatrix- option error: supplied matrix is not symmetric" +exit 198 + } + return mat W0 = `W0' + } +end + + +******************************************************************************* +************************* misc utilities ************************************** +******************************************************************************* + +// internal version of ivreg2_fvstrip 1.01 ms 24march2015 +// takes varlist with possible FVs and strips out b/n/o notation +// returns results in r(varnames) +// optionally also omits omittable FVs +// expand calls fvexpand either on full varlist +// or (with onebyone option) on elements of varlist + +program define ivreg2_fvstrip, rclass + version 11.2 + syntax [anything] [if] , [ dropomit expand onebyone NOIsily ] + if "`expand'"~="" { // force call to fvexpand + if "`onebyone'"=="" { + fvexpand `anything' `if' // single call to fvexpand + local anything `r(varlist)' + } + else { + foreach vn of local anything { + fvexpand `vn' `if' // call fvexpand on items one-by-one + local newlist `newlist' `r(varlist)' + } + local anything : list clean newlist + } + } + foreach vn of local anything { // loop through varnames + if "`dropomit'"~="" { // check & include only if + _ms_parse_parts `vn' // not omitted (b. or o.) + if ~`r(omit)' { + local unstripped `unstripped' `vn' // add to list only if not omitted + } + } + else { // add varname to list even if + local unstripped `unstripped' `vn' // could be omitted (b. or o.) + } + } +// Now create list with b/n/o stripped out + foreach vn of local unstripped { + local svn "" // initialize + _ms_parse_parts `vn' + if "`r(type)'"=="variable" & "`r(op)'"=="" { // simplest case - no change + local svn `vn' + } + else if "`r(type)'"=="variable" & "`r(op)'"=="o" { // next simplest case - o.varname => varname + local svn `r(name)' + } + else if "`r(type)'"=="variable" { // has other operators so strip o but leave . + local op `r(op)' + local op : subinstr local op "o" "", all + local svn `op'.`r(name)' + } + else if "`r(type)'"=="factor" { // simple factor variable + local op `r(op)' + local op : subinstr local op "b" "", all + local op : subinstr local op "n" "", all + local op : subinstr local op "o" "", all + local svn `op'.`r(name)' // operator + . + varname + } + else if"`r(type)'"=="interaction" { // multiple variables + forvalues i=1/`r(k_names)' { + local op `r(op`i')' + local op : subinstr local op "b" "", all + local op : subinstr local op "n" "", all + local op : subinstr local op "o" "", all + local opv `op'.`r(name`i')' // operator + . + varname + if `i'==1 { + local svn `opv' + } + else { + local svn `svn'#`opv' + } + } + } + else if "`r(type)'"=="product" { + di as err "ivreg2_fvstrip error - type=product for `vn'" + exit 198 + } + else if "`r(type)'"=="error" { + di as err "ivreg2_fvstrip error - type=error for `vn'" + exit 198 + } + else { + di as err "ivreg2_fvstrip error - unknown type for `vn'" + exit 198 + } + local stripped `stripped' `svn' + } + local stripped : list retokenize stripped // clean any extra spaces + + if "`noisily'"~="" { // for debugging etc. +di as result "`stripped'" + } + + return local varlist `stripped' // return results in r(varlist) +end + +// **************** Add omitted vars to b and V matrices ****************** // + +program define AddOmitted, rclass + version 11.2 + syntax /// + [ , /// + bmat(name) /// + vmat(name) /// + cnb0(string) /// + cnb1(string) /// + eqlist(string) /// if empty, single-equation b and V + ] + + tempname newb newV + local eq_ct =max(1,wordcount("`eqlist'")) + local rhs0_ct : word count `cnb0' + local rhs1_ct : word count `cnb1' + + foreach vn in `cnb1' { + local cnum : list posof "`vn'" in cnb0 + local cnumlist "`cnumlist' `cnum'" + } +// cnumlist is the list of columns in the single-equation new big matrix in which +// the non-zero entries from the reduced matrix (bmat or vmat) will appear. +// E.g., if newb will be [mpg o.mpg2 _cons] then cnum = [1 3]. + + mata: s_AddOmitted( /// + "`bmat'", /// + "`vmat'", /// + "`cnumlist'", /// + `eq_ct', /// + `rhs0_ct', /// + `rhs1_ct') + mat `newb' = r(b) + mat `newV' = r(V) + + if `eq_ct'==1 { + local allnames `cnb0' // simple single-eqn case + } + else { + foreach eqname in `eqlist' { + foreach vname in `cnb0' { + local allnames "`allnames' `eqname':`vname'" + } + } + } + mat colnames `newb' = `allnames' + mat rownames `newb' = y1 + mat colnames `newV' = `allnames' + mat rownames `newV' = `allnames' + + return matrix b =`newb' + return matrix V =`newV' +end + +// ************* More misc utilities ************** // + +program define matsort, rclass + version 11.2 + args bvmat names + tempname m1 m2 + foreach vn in `names' { + mat `m1'=nullmat(`m1'), `bvmat'[1...,"`vn'"] + } + if rowsof(`m1')>1 { + foreach vn in `names' { + mat `m2'=nullmat(`m2') \ `m1'["`vn'",1...] + } + return matrix sorted =`m2' + } + else { + return matrix sorted =`m1' + } +end + + +program define matchnames, rclass + version 11.2 + args varnames namelist1 namelist2 + + local k1 : word count `namelist1' + local k2 : word count `namelist2' + + if `k1' ~= `k2' { + di as err "namelist error" + exit 198 + } + foreach vn in `varnames' { + local i : list posof `"`vn'"' in namelist1 + if `i' > 0 { + local newname : word `i' of `namelist2' + } + else { +* Keep old name if not found in list + local newname "`vn'" + } + local names "`names' `newname'" + } + local names : list clean names + return local names "`names'" +end + + +program define checkversion_ranktest, rclass + version 11.2 + args caller + +* Check that -ranktest- is installed + capture ranktest, version + if _rc != 0 { +di as err "Error: must have ranktest version 01.3.02 or greater installed" +di as err "To install, from within Stata type " _c +di in smcl "{stata ssc install ranktest :ssc install ranktest}" + exit 601 + } + local vernum "`r(version)'" + if ("`vernum'" < "01.3.02") | ("`vernum'" > "09.9.99") { +di as err "Error: must have ranktest version 01.3.02 or greater installed" +di as err "Currently installed version is `vernum'" +di as err "To update, from within Stata type " _c +di in smcl "{stata ssc install ranktest, replace :ssc install ranktest, replace}" + exit 601 + } + +* Minimum Stata version required for ranktest ver 2.0 or higher is Stata 16. +* If calling version is <16 then forks to ranktest ver 1.4 (aka ranktest11). + if `caller' >= 16 { + return local ranktestcmd version `caller': ranktest + } + else { + return local ranktestcmd version 11.2: ranktest + } +end + +// ************ Replacement _rmcollright with tweaks ****************** // + +program define ivreg2_rmcollright2, rclass + version 11.2 + syntax [ anything ] /// anything so that FVs aren't reordered + [if] [in] /// + [, /// + NORMWT(varname) /// + NOCONStant /// + NOEXPAND /// + newonly /// + lindep /// + ] + +// Empty varlist, leave early + if "`anything'"=="" { + return scalar k_omitted =0 + exit + } + + marksample touse + markout `touse' `anything' + + local cons = ("`noconstant'"=="") + local expand = ("`noexpand'"=="") + local newonly = ("`newonly'"~="") + local forcedrop = ("`forcedrop'"~="") + local lindep = ("`lindep'"~="") + local 0 `anything' + sreturn clear // clear any extraneous sreturn macros + syntax varlist(ts fv) + local tsops = ("`s(tsops)'"=="true") + local fvops = ("`s(fvops)'"=="true") + + if `tsops' | `fvops' { + if `expand' { + fvexpand `anything' if `touse' + local anything `r(varlist)' + fvrevar `anything' if `touse' + local fv_anything `r(varlist)' + } + else { +// already expanded and in set order +// loop through fvrevar so that it doesn't rebase or reorder + foreach var in `anything' { + fvrevar `var' if `touse' + local fv_anything `fv_anything' `r(varlist)' + } + } + } + else { + local fv_anything `anything' + } + + tempname wname + if "`normwt'"=="" { + qui gen byte `wname'=1 if `touse' + } + else { + qui gen double `wname' = `normwt' if `touse' + } + + mata: s_rmcoll2("`fv_anything'", "`anything'", "`wname'", "`touse'", `cons', `lindep') + + foreach var in `r(omitted)' { + di as text "note: `var' omitted because of collinearity" + } + + local omitted "`r(omitted)'" // has all omitted, both newly and previously omitted + local k_omitted =r(k_omitted) // but newly omitted not marked with omit operator o + if `lindep' { + tempname lindepmat + mat `lindepmat' = r(lindep) + mat rownames `lindepmat' = `anything' + mat colnames `lindepmat' = `anything' + } + +// Modern Stata version, add omitted notation to newly-missing vars + if `k_omitted' { + foreach var in `omitted' { + _ms_parse_parts `var' // check if already omitted + if r(omit) { // already omitted + local alreadyomitted `alreadyomitted' `var' + } + else { // not already omitted + ivreg2_rmc2_ms_put_omit `var' // add omitted omit operator o and replace in main varlist + local ovar `s(ospec)' + local anything : subinstr local anything "`var'" "`ovar'", word + } + } + if `newonly' { // omitted list should contain only newly omitted + local omitted : list omitted - alreadyomitted + local k_omitted : word count `omitted' + } + } + +// Return results + return scalar k_omitted =`k_omitted' + return local omitted `omitted' + return local varlist `anything' + if `lindep' { + return mat lindep `lindepmat' + } + +end + +// Used by ivreg2_rmcollright2 +// taken from later Stata - not available in Stata 11 +// version 1.0.0 28apr2011 +program ivreg2_rmc2_ms_put_omit, sclass + version 11.2 // added by MS + args vn + _ms_parse_parts `vn' + if r(type) =="variable" { + local name `r(name)' + local ovar o.`name' + } + if r(type) == "factor" { + if !r(base) { + local name `r(name)' + if "`r(ts_op)'" != "" { + local name `r(ts_op)'.`name' + } + local ovar `r(level)'o.`name' + } + else { + local ovar `vn' + } + } + else if r(type) == "interaction" { + local k = r(k_names) + + forval i = 1/`k' { + local name = r(name`i') + if "`r(ts_op`i')'" != "" { + local name `r(ts_op`i')'.`name' + } + if "`r(level`i')'" != "" { + if r(base`i') { + local name `r(level`i')'b.`name' + } + else { + local name `r(level`i')'o.`name' + } + } + else { + local name o.`name' + } + local spec `spec'`sharp'`name' + local sharp "#" + } + local ovar `spec' + + } + _msparse `ovar' + sreturn local ospec `r(stripe)' +end + + +******************************************************************************* +**************** SUBROUTINES FOR KERNEL-ROBUST ******************************** +******************************************************************************* + +// abw wants a varlist of [ eps | Z | touse] +// where Z includes all instruments, included and excluded, with constant if +// present as the last column; eps are a suitable set of residuals; and touse +// marks the observations in the data matrix used to generate the residuals +// (e.g. e(sample) of the appropriate model). +// The Noconstant option indicates that no constant term exists in the Z matrix. +// kern is the name of the HAC kernel. -ivregress- only provides definitions +// for Bartlett (default), Parzen, quadratic spectral. + +// returns the optimal bandwidth as local abw + +// abw 1.0.1 CFB 30jun2007 +// 1.0.1 : redefine kernel names (3 instances) to match ivreg2 +// 1.1.0 : pass nobs and tobs to s_abw; abw bug fix and also handles gaps in data correctly + +prog def abw, rclass + version 11.2 + syntax varlist(ts), [ tindex(varname) nobs(integer 0) tobs(integer 0) NOConstant Kernel(string)] +// validate kernel + if "`kernel'" == "" { + local kernel = "Bartlett" + } +// cfb B102 + if !inlist("`kernel'", "Bartlett", "Parzen", "Quadratic Spectral") { + di as err "Error: kernel `kernel' not compatible with bw(auto)" + return scalar abw = 1 + return local bwchoice "Kernel `kernel' not compatible with bw(auto); bw=1 (default)" + exit + } + else { +// set constant + local cons 1 + if "`noconstant'" != "" { + local cons 0 + } +// deal with ts ops + tsrevar `varlist' + local varlist1 `r(varlist)' + mata: s_abw("`varlist1'", "`tindex'", `nobs', `tobs', `cons', "`kernel'") + return scalar abw = `abw' + return local bwchoice "Automatic bw selection according to Newey-West (1994)" + } +end + + +******************************************************************************* +************** END SUBROUTINES FOR KERNEL-ROBUST ****************************** +******************************************************************************* + +******************************************************************************* +*************************** BEGIN MATA CODE *********************************** +******************************************************************************* + +// capture in case calling under version < 11.2 +capture version 11.2 + +mata: + +// For reference: +// struct ms_vcvorthog { +// string scalar ename, Znames, touse, weight, wvarname +// string scalar robust, clustvarname, clustvarname2, clustvarname3, kernel +// string scalar sw, psd, ivarname, tvarname, tindexname +// real scalar wf, N, bw, tdelta, dofminus +// real scalar center +// real matrix ZZ +// pointer matrix e +// pointer matrix Z +// pointer matrix wvar +// } + + +void s_abw (string scalar Zulist, + string scalar tindexname, + real scalar nobs, + real scalar tobs, + real scalar cons, + string scalar kernel + ) +{ + +// nobs = number of observations = number of data points available = rows(uZ) +// tobs = time span of data = t_N - t_1 + 1 +// nobs = tobs if no gaps in data +// nobs < tobs if there are gaps +// nobs used below when calculating means, e.g., covariances in sigmahat. +// tobs used below when time span of data is needed, e.g., mstar. + + string rowvector Zunames, tov + string scalar v, v2 + real matrix uZ + real rowvector h + real scalar lenzu, abw + +// access the Stata variables in Zulist, honoring touse stored as last column + Zunames = tokens(Zulist) + lenzu=cols(Zunames)-1 + v = Zunames[|1\lenzu|] + v2 = Zunames[lenzu+1] + st_view(uZ,.,v,v2) + tnow=st_data(., tindexname) + +// assume constant in last col of uZ if it exists +// account for eps as the first column of uZ + if (cons) { + nrows1=cols(uZ)-2 + nrows2=1 + } + else { + nrows1=cols(uZ)-1 + nrows2=0 + } +// [R] ivregress p.42: referencing Newey-West 1994 REStud 61(4):631-653 +// define h indicator rowvector + h = J(nrows1,1,1) \ J(nrows2,1,0) + +// calc mstar per p.43 +// Hannan (1971, 296) & Priestley (1981, 58) per Newey-West p. 633 +// corrected per Alistair Hall msg to Brian Poi 17jul2008 +// T = rows(uZ) +// oneT = 1/T + expo = 2/9 + q = 1 +// cgamma = 1.4117 + cgamma = 1.1447 + if(kernel == "Parzen") { + expo = 4/25 + q = 2 + cgamma = 2.6614 + } +// cfb B102 + if(kernel == "Quadratic Spectral") { + expo = 2/25 + q = 2 + cgamma = 1.3221 + } +// per Newey-West p.639, Anderson (1971), Priestley (1981) may provide +// guidance on setting expo for other kernels +// mstar = trunc(20 *(T/100)^expo) +// use time span of data (not number of obs) + mstar = trunc(20 *(tobs/100)^expo) + +// calc uZ matrix + u = uZ[.,1] + Z = uZ[|1,2 \.,.|] + +// calc f vector: (u_i Z_i) * h + f = (u :* Z) * h + +// approach allows for gaps in time series + sigmahat = J(mstar+1,1,0) + for(j=0;j<=mstar;j++) { + lsj = "L"+strofreal(j) + tlag=st_data(., lsj+"."+tindexname) + tmatrix = tnow, tlag + svar=(tnow:<.):*(tlag:<.) // multiply column vectors of 1s and 0s + tmatrix=select(tmatrix,svar) // to get intersection, and replace tmatrix + // now calculate autocovariance; divide by nobs + sigmahat[j+1] = quadcross(f[tmatrix[.,1],.], f[tmatrix[.,2],.]) / nobs + } + +// calc shat(q), shat(0) + shatq = 0 + shat0 = sigmahat[1] + for(j=1;j<=mstar;j++) { + shatq = shatq + 2 * sigmahat[j+1] * j^q + shat0 = shat0 + 2 * sigmahat[j+1] + } + +// calc gammahat + expon = 1/(2*q+1) + gammahat = cgamma*( (shatq/shat0)^2 )^expon +// use time span of data tobs (not number of obs T) + m = gammahat * tobs^expon + +// calc opt lag + if(kernel == "Bartlett" | kernel == "Parzen") { + optlag = min((trunc(m),mstar)) + } + else if(kernel == "Quadratic Spectral") { + optlag = min((m,mstar)) + } + +// if optlag is the optimal lag to be used, we need to add one to +// specify bandwidth in ivreg2 terms + abw = optlag + 1 + st_local("abw",strofreal(abw)) +} // end program s_abw + + +// *********** s_rmcoll2 (replacement for Stata _rmcollright etc. ********** + +void s_rmcoll2( string scalar fv_vnames, + string scalar vnames, + string scalar wname, + string scalar touse, + scalar cons, + scalar lindep) +{ + st_view(X=., ., tokens(fv_vnames), touse) + st_view(w=., ., tokens(wname), touse) + st_view(mtouse=., ., tokens(touse), touse) + + if (cons) { + Xmean=mean(X,w) + XX=quadcrossdev(X,Xmean, w, X,Xmean) + } + else { + XX=quadcross(X, w, X) + } + + XXinv=invsym(XX, range(1,cols(X),1)) + + st_numscalar("r(k_omitted)", diag0cnt(XXinv)) + if (lindep) { + st_matrix("r(lindep)", XX*XXinv) + } + smat = (diagonal(XXinv) :== 0)' + vl=tokens(vnames) + vl_drop = select(vl, smat) + vl_keep = select(vl, (1 :- smat)) + + if (cols(vl_keep)>0) { + st_global("r(varlist)", invtokens(vl_keep)) + } + if (cols(vl_drop)>0) { + st_global("r(omitted)", invtokens(vl_drop)) + } +} // end program s_rmcoll2 + + +// ************** Add omitted Mata utility ************************ + +void s_AddOmitted( string scalar bname, + string scalar vname, + string scalar cnumlist, + scalar eq_ct, + scalar rhs0_ct, + scalar rhs1_ct) + +{ + b = st_matrix(bname) + V = st_matrix(vname) + cn = strtoreal(tokens(cnumlist)) +// cnumlist is the list of columns in the single-equation new big matrix in which +// the non-zero entries from the reduced matrix (bmat or vmat) will appear. +// E.g., if newb will be [mpg o.mpg2 _cons] then cnum = [1 3]. + col_ct = eq_ct * rhs0_ct + + newb = J(1,col_ct,0) + newV = J(col_ct,col_ct,0) + +// Code needs to accommodate multi-equation case. Since all equations will have +// same reduced and full list of vars, in the same order, can do this with Kronecker +// products etc. Second term below is basically the offset for each equation. + cn = (J(1,eq_ct,1) # cn) + ((range(0,eq_ct-1,1)' # J(1,rhs1_ct,1) ) * rhs0_ct) + +// Insert the values from the reduced matrices into the right places in the big matrices. + newb[1, cn] = b + newV[cn, cn] = V + + st_matrix("r(b)", newb) + st_matrix("r(V)", newV) + +} + + +// ************** Partial out ************************************* + +void s_partial( string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar Pnames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + scalar cons) + +{ + +// All varnames should be basic form, no FV or TS operators etc. +// y = dep var +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 +// PZ = variables to partial out +// cons = 0 or 1 + + ytoken=tokens(yname) + X1tokens=tokens(X1names) + X2tokens=tokens(X2names) + Z1tokens=tokens(Z1names) + Ptokens=tokens(Pnames) + Ytokens = (ytoken, X1tokens, X2tokens, Z1tokens) + + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(Y, ., Ytokens, touse) + st_view(P, ., Ptokens, touse) + L = cols(P) + + if (cons & L>0) { // Vars to partial out including constant + Ymeans = mean(Y,wf*wvar) + Pmeans = mean(P,wf*wvar) + PY = quadcrossdev(P, Pmeans, wf*wvar, Y, Ymeans) + PP = quadcrossdev(P, Pmeans, wf*wvar, P, Pmeans) + } + else if (!cons & L>0) { // Vars to partial out NOT including constant + PY = quadcross(P, wf*wvar, Y) + PP = quadcross(P, wf*wvar, P) + } + else { // Only constant to partial out = demean + Ymeans = mean(Y,wf*wvar) + } + +// Partial-out coeffs. Default Cholesky; use QR if not full rank and collinearities present. +// Not necessary if no vars other than constant + if (L>0) { + b = cholqrsolve(PP, PY) + } +// Replace with residuals + if (cons & L>0) { // Vars to partial out including constant + Y[.,.] = (Y :- Ymeans) - (P :- Pmeans)*b + } + else if (!cons & L>0) { // Vars to partial out NOT including constant + Y[.,.] = Y - P*b + } + else { // Only constant to partial out = demean + Y[.,.] = (Y :- Ymeans) + } + +} // end program s_partial + + + +// ************** Common cross-products ************************************* + +void s_crossprods( string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N) + +{ + +// y = dep var +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + ytoken=tokens(yname) + X1tokens=tokens(X1names) + X2tokens=tokens(X2names) + Z1tokens=tokens(Z1names) + + Xtokens = (X1tokens, X2tokens) + Ztokens = (Z1tokens, X2tokens) + + K1=cols(X1tokens) + K2=cols(X2tokens) + K=K1+K2 + L1=cols(Z1tokens) + L2=cols(X2tokens) + L=L1+L2 + + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(A, ., st_tsrevar((ytoken, Xtokens, Z1tokens)), touse) + + AA = quadcross(A, wf*wvar, A) + + if (K>0) { + XX = AA[(2::K+1),(2..K+1)] + Xy = AA[(2::K+1),1] + } + if (K1>0) { + X1X1 = AA[(2::K1+1),(2..K1+1)] + } + + if (L1 > 0) { + Z1Z1 = AA[(K+2::rows(AA)),(K+2..rows(AA))] + } + + if (L2 > 0) { + Z2Z2 = AA[(K1+2::K+1), (K1+2::K+1)] + Z2y = AA[(K1+2::K+1), 1] + } + + if ((L1>0) & (L2>0)) { + Z2Z1 = AA[(K1+2::K+1), (K+2::rows(AA))] + ZZ2 = Z2Z1, Z2Z2 + ZZ1 = Z1Z1, Z2Z1' + ZZ = ZZ1 \ ZZ2 + } + else if (L1>0) { + ZZ = Z1Z1 + } + else { +// L1=0 + ZZ = Z2Z2 + ZZ2 = Z2Z2 + } + + if ((K1>0) & (L1>0)) { // K1>0, L1>0 + X1Z1 = AA[(2::K1+1), (K+2::rows(AA))] + } + + if ((K1>0) & (L2>0)) { + X1Z2 = AA[(2::K1+1), (K1+2::K+1)] + if (L1>0) { // K1>0, L1>0, L2>0 + X1Z = X1Z1, X1Z2 + XZ = X1Z \ ZZ2 + } + else { // K1>0, L1=0, L2>0 + XZ = X1Z2 \ ZZ2 + X1Z = X1Z2 + } + } + else if (K1>0) { // K1>0, L2=0 + XZ = X1Z1 + X1Z= X1Z1 + } + else if (L1>0) { // K1=0, L2>0 + XZ = AA[(2::K+1),(K+2..rows(AA))], AA[(2::K+1),(2..K+1)] + } + else { // K1=0, L2=0 + XZ = ZZ + } + + if ((L1>0) & (L2>0)) { + Zy = AA[(K+2::rows(AA)), 1] \ AA[(K1+2::K+1), 1] + ZY = AA[(K+2::rows(AA)), (1..K1+1)] \ AA[(K1+2::K+1), (1..K1+1)] + Z2Y = AA[(K1+2::K+1), (1..K1+1)] + } + else if (L1>0) { + Zy = AA[(K+2::rows(AA)), 1] + ZY = AA[(K+2::rows(AA)), (1..K1+1)] + } + else if (L2>0) { + Zy = AA[(K1+2::K+1), 1] + ZY = AA[(K1+2::K+1), (1..K1+1)] + Z2Y = ZY + } +// Zy, ZY, Z2Y not created if L1=L2=0 + + YY = AA[(1::K1+1), (1..K1+1)] + yy = AA[1,1] + st_subview(y, A, ., 1) + ym = sum(wf*wvar:*y)/N + yyc = quadcrossdev(y, ym, wf*wvar, y, ym) + + XXinv = invsym(XX) + if (Xtokens==Ztokens) { + ZZinv = XXinv + XPZXinv = XXinv + } + else { + ZZinv = invsym(ZZ) + XPZX = makesymmetric(XZ*ZZinv*XZ') + XPZXinv=invsym(XPZX) + } + +// condition numbers + condxx=cond(XX) + condzz=cond(ZZ) + + st_matrix("r(XX)", XX) + st_matrix("r(X1X1)", X1X1) + st_matrix("r(X1Z)", X1Z) + st_matrix("r(ZZ)", ZZ) + st_matrix("r(Z2Z2)", Z2Z2) + st_matrix("r(Z1Z2)", Z2Z1') + st_matrix("r(Z2y)",Z2y) + st_matrix("r(XZ)", XZ) + st_matrix("r(Xy)", Xy) + st_matrix("r(Zy)", Zy) + st_numscalar("r(yy)", yy) + st_numscalar("r(yyc)", yyc) + st_matrix("r(YY)", YY) + st_matrix("r(ZY)", ZY) + st_matrix("r(Z2Y)", Z2Y) + st_matrix("r(XXinv)", XXinv) + st_matrix("r(ZZinv)", ZZinv) + st_matrix("r(XPZXinv)", XPZXinv) + st_numscalar("r(condxx)",condxx) + st_numscalar("r(condzz)",condzz) + +} // end program s_crossprods + + +// *************** 1st step GMM ******************** // +// Can be either efficient or inefficient. +// Can be IV or other 1-step GMM estimator. + +void s_gmm1s( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Wmatrix, + string scalar Smatrix, + scalar dofminus, + scalar efficient, // flag to indicate that 1st-step GMM is efficient + scalar overid, // not guaranteed to be right if nocollin option used! + scalar useqr) // flag to force use of QR instead of Cholesky solver +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + ZZ = st_matrix(ZZmatrix) + XX = st_matrix(XXmatrix) + XZ = st_matrix(XZmatrix) + Zy = st_matrix(Zymatrix) + ZZinv = st_matrix(ZZinvmatrix) + + QZZ = ZZ / N + QXX = XX / N + QXZ = XZ / N + QZy = Zy / N + QZZinv = ZZinv*N + + useqr = (diag0cnt(QZZinv)>0) | useqr + +// Weighting matrix supplied (and inefficient GMM) + if (Wmatrix~="") { + W = st_matrix(Wmatrix) + useqr = (diag0cnt(W)>0) | useqr + } +// Var-cov matrix of orthog conditions supplied + if (Smatrix~="") { + omega=st_matrix(Smatrix) + useqr = (diag0cnt(omega)>0) | useqr + } + + if (efficient) { // Efficient 1-step GMM block: OLS, IV or provided S + if ((Xtokens==Ztokens) & (Smatrix=="")) { // OLS + + beta = cholqrsolve(QZZ, QZy, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + omega = sigmasq * QZZ + W = 1/sigmasq * QZZinv + V = 1/N * sigmasq * QZZinv + rankS = rows(omega) - diag0cnt(QZZinv) // inv(omega) is proportional to inv(QZZ) + rankV = rows(V) - diag0cnt(V) // inv(V) is proportional to inv(QZZ) + } + else if (Smatrix=="") { // IV + aux1 = cholqrsolve(QZZ, QXZ', useqr) + aux2 = cholqrsolve(QZZ, QZy, useqr) + aux3 = makesymmetric(QXZ * aux1) + beta = cholqrsolve(aux3, QXZ * aux2, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq = ee/(N-dofminus) + omega = sigmasq * QZZ + W = 1/sigmasq * QZZinv + V = 1/N * sigmasq * invsym(aux3) + rankS = rows(omega) - diag0cnt(QZZinv) // inv(omega) is proportional to inv(QZZ) + rankV = rows(V) - diag0cnt(V) // V is proportional to inv(aux3) + } + else { // efficient GMM with provided S (=omega) + aux1 = cholqrsolve(omega, QXZ', useqr) + aux2 = cholqrsolve(omega, QZy, useqr) + aux3 = makesymmetric(QXZ * aux1) + beta = cholqrsolve(aux3, QXZ * aux2, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = invsym(omega) + V = 1/N * invsym(aux3) // Normalize by N + rankS = rows(omega) - diag0cnt(W) // since W=inv(omega) + rankV = rows(V) - diag0cnt(V) // since V is prop to inv(aux3) + } + if (overid) { // J if overidentified + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + aux4 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux4 + } + else { + j=0 + } + st_matrix("r(beta)", beta) + st_matrix("r(V)", V) + st_matrix("r(S)", omega) + st_matrix("r(W)", W) + st_numscalar("r(rss)", ee) + st_numscalar("r(j)", j) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankS)", rankS) + st_numscalar("r(rankV)", rankV) + } + else { // inefficient 1st-step GMM; don't need V, S, j etc. + if ((Xtokens==Ztokens) & (Wmatrix=="")) { // OLS + beta = cholqrsolve(QZZ, QZy, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = 1/sigmasq * QZZinv + QXZ_W_QZX = 1/sigmasq * QZZ // b/c W incorporates sigma^2 + } + else if (Wmatrix=="") { // IV + aux1 = cholqrsolve(QZZ, QXZ', useqr) + aux2 = cholqrsolve(QZZ, QZy, useqr) + aux3 = makesymmetric(QXZ * aux1) + beta = cholqrsolve(aux3, QXZ * aux2, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + W = 1/sigmasq * QZZinv + QXZ_W_QZX = 1/sigmasq * aux3 // b/c IV weighting matrix incorporates sigma^2 + } + else { // some other 1st step inefficient GMM with provided W + QXZ_W_QZX = QXZ * W * QXZ' + _makesymmetric(QXZ_W_QZX) + beta = cholqrsolve(QXZ_W_QZX, QXZ * W * QZy, useqr) + beta = beta' + e[.,.] = y - X * beta' // update residuals + } + st_matrix("r(QXZ_W_QZX)", QXZ_W_QZX) + st_matrix("r(beta)", beta) + st_matrix("r(W)",W) // always return W + } + +} // end program s_gmm1s + + +// *************** efficient GMM ******************** // +// Uses inverse of provided S matrix as weighting matrix. +// IV won't be done here but code would work for it as a special case. + +void s_egmm( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Smatrix, // always provided + scalar dofminus, + scalar overid, // not guaranteed to be right if -nocollin- used! + scalar useqr) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + ZZ = st_matrix(ZZmatrix) + XX = st_matrix(XXmatrix) + XZ = st_matrix(XZmatrix) + Zy = st_matrix(Zymatrix) + ZZinv = st_matrix(ZZinvmatrix) + + QZZ = ZZ / N + QXX = XX / N + QXZ = XZ / N + QZy = Zy / N + QZZinv = ZZinv*N + +// Var-cov matrix of orthog conditions supplied + omega=st_matrix(Smatrix) + W = invsym(omega) // Efficient GMM weighting matrix + rankS = rows(omega) - diag0cnt(W) // since W=inv(omega) + + if (rankS cols(X)) { + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + aux4 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux4 + } + else { + j=0 + } + + st_matrix("r(beta)", beta) + st_matrix("r(V)", V) + st_matrix("r(W)", W) + st_numscalar("r(rss)", ee) + st_numscalar("r(j)", j) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankV)",rankV) + st_numscalar("r(rankS)",rankS) + +} // end program s_egmm + +// *************** inefficient GMM ******************** // + +void s_iegmm( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar QXZ_W_QZXmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar Wmatrix, + string scalar Smatrix, + string scalar bname, + scalar dofminus, + scalar overid, + scalar useqr) +{ + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QXZ = st_matrix(XZmatrix) / N + QZy = st_matrix(Zymatrix) / N + QXZ_W_QZX = st_matrix(QXZ_W_QZXmatrix) + + useqr = (diag0cnt(QXZ_W_QZX)>0) | useqr + +// beta is supplied + beta = st_matrix(bname) + +// Inefficient weighting matrix supplied + W = st_matrix(Wmatrix) + +// Var-cov matrix of orthog conditions supplied + omega=st_matrix(Smatrix) + +// Residuals are supplied + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + +// Calculate V and J. + +// V +// The GMM estimator is "root-N consistent", and technically we do +// inference on sqrt(N)*beta. By convention we work with beta, so we adjust +// the var-cov matrix instead: + aux5 = cholqrsolve(QXZ_W_QZX, QXZ * W, useqr) + V = 1/N * aux5 * omega * aux5' + _makesymmetric(V) + +// alternative +// QXZ_W_QZXinv=invsym(QXZ_W_QZX) +// V = 1/N * QXZ_W_QZXinv * QXZ * W * omega * W * QXZ' * QXZ_W_QZXinv + + rankV = rows(V) - diag0cnt(invsym(V)) // need explicitly to calc rank + rankS = rows(omega) - diag0cnt(invsym(omega)) // need explicitly to calc rank + +// J if overidentified + if (overid) { +// Note that J requires efficient GMM residuals, which means do 2-step GMM to get them. +// QXZ_W2s_QZX = QXZ * W2s * QXZ' +// _makesymmetric(QXZ_W2s_QZX) +// QXZ_W2s_QZXinv=invsym(QXZ_W2s_QZX) +// beta2s = (QXZ_W2s_QZXinv * QXZ * W2s * QZy) + aux1 = cholqrsolve(omega, QXZ', useqr) + aux2 = cholqrsolve(omega, QZy, useqr) + aux3s = makesymmetric(QXZ * aux1) + beta2s = cholqrsolve(aux3s, QXZ * aux2, useqr) + beta2s = beta2s' + e2s = y - X * beta2s' + Ze2s = quadcross(Z, wf*wvar, e2s) + gbar = Ze2s / N + aux4 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux4 + } + else { + j=0 + } + + st_matrix("r(V)", V) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankV)",rankV) + st_numscalar("r(rankS)",rankS) + +} // end program s_iegmm + +// *************** LIML ******************** // + +void s_liml( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar Zymatrix, + string scalar Z2Z2matrix, + string scalar YYmatrix, + string scalar ZYmatrix, + string scalar Z2Ymatrix, + string scalar Xymatrix, + string scalar ZZinvmatrix, + string scalar yname, + string scalar Ynames, + string scalar ename, + string scalar Xnames, + string scalar X1names, + string scalar Znames, + string scalar Z1names, + string scalar Z2names, + scalar fuller, + scalar kclass, + string scalar coviv, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar center, + scalar dofminus, + scalar useqr) + +{ + struct ms_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + Ytokens=tokens(Ynames) + Ztokens=tokens(Znames) + Z1tokens=tokens(Z1names) + Z2tokens=tokens(Z2names) + Xtokens=tokens(Xnames) + X1tokens=tokens(X1names) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QXZ = st_matrix(XZmatrix) / N + QZy = st_matrix(Zymatrix) / N + QZ2Z2 = st_matrix(Z2Z2matrix) / N + QYY = st_matrix(YYmatrix) / N + QZY = st_matrix(ZYmatrix) / N + QZ2Y = st_matrix(Z2Ymatrix) / N + QXy = st_matrix(Xymatrix) / N + QZZinv = st_matrix(ZZinvmatrix)*N + + useqr = (diag0cnt(QZZ)>0) | useqr + +// kclass=0 => LIML or Fuller LIML so calculate lambda + if (kclass == 0) { + aux1 = cholqrsolve(QZZ, QZY, useqr) + QWW = QYY - QZY'*aux1 + _makesymmetric(QWW) + if (cols(Z2tokens) > 0) { + aux2 = cholqrsolve(QZ2Z2, QZ2Y, useqr) + QWW1 = QYY - QZ2Y'*aux2 + _makesymmetric(QWW1) + } + else { +// Special case of no exogenous regressors + QWW1 = QYY + } + M=matpowersym(QWW, -0.5) + Eval=symeigenvalues(M*QWW1*M) + lambda=rowmin(Eval) + } + +// Exactly identified but might not be exactly 1, so make it so + if (cols(Z)==cols(X)) { + lambda=1 + } + + if (fuller > (N-cols(Z))) { +printf("\n{error:Error: invalid choice of Fuller LIML parameter.}\n") + exit(error(3351)) + } + else if (fuller > 0) { + k = lambda - fuller/(N-cols(Z)) + } + else if (kclass > 0) { + k = kclass + } + else { + k = lambda + } + + aux3 = cholqrsolve(QZZ, QXZ', useqr) + QXhXh=(1-k)*QXX + k*QXZ*aux3 + _makesymmetric(QXhXh) + aux4 = cholqrsolve(QZZ, QZy, useqr) + aux5 = cholqrsolve(QXhXh, QXZ, useqr) + aux6 = cholqrsolve(QXhXh, QXy, useqr) + beta = aux6*(1-k) + k*aux5*aux4 + beta = beta' + + e[.,.] = y - X * beta' + ee = quadcross(e, wf*wvar, e) + sigmasq = ee /(N-dofminus) + + omega = m_omega(vcvo) + + QXhXhinv=invsym(QXhXh) + + if ((robust=="") & (clustvarname=="") & (kernel=="")) { +// Efficient LIML + if (coviv=="") { +// Note dof correction is already in sigmasq + V = 1/N * sigmasq * QXhXhinv + rankV = rows(V) - diag0cnt(V) // since V is proportional to inv(QXhXh) + } + else { + aux7 = makesymmetric(QXZ * aux3) + V = 1/N * sigmasq * invsym(aux7) + rankV = rows(V) - diag0cnt(V) // since V is proportional to inv(aux7) + } + rankS = rows(omega) - diag0cnt(invsym(omega)) + if (cols(Z)>cols(X)) { + Ze = quadcross(Z, wf*wvar, e) + gbar = Ze / N + aux8 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux8 + } + else { + j=0 + } + } + else { +// Inefficient LIML + if (coviv=="") { + aux9 = cholqrsolve(QZZ, aux5', useqr) + V = 1/N * aux9' * omega * aux9 + _makesymmetric(V) + rankV = rows(V) - diag0cnt(invsym(V)) // need explicitly to calc rank + rankS = rows(omega) - diag0cnt(invsym(omega)) // need explicitly to calc rank + } + else { + aux10 = QXZ * aux3 + _makesymmetric(aux10) + aux11 = cholqrsolve(aux10, aux3', useqr) + V = 1/N * aux11 * omega * aux11' + _makesymmetric(V) + rankV = rows(V) - diag0cnt(invsym(V)) // need explicitly to calc rank + rankS = rows(omega) - diag0cnt(invsym(omega)) // need explicitly to calc rank + } + if (cols(Z)>cols(X)) { + aux12 = cholqrsolve(omega, QXZ', useqr) + aux13 = cholqrsolve(omega, QZy, useqr) + aux14 = makesymmetric(QXZ * aux12) + beta2s = cholqrsolve(aux14, QXZ * aux13, useqr) + beta2s = beta2s' + e2s = y - X * beta2s' + Ze2s = quadcross(Z, wf*wvar, e2s) + gbar = Ze2s / N + aux15 = cholqrsolve(omega, gbar, useqr) + j = N * gbar' * aux15 + } + else { + j=0 + } + } + _makesymmetric(V) + + st_matrix("r(beta)", beta) + st_matrix("r(S)", omega) + st_matrix("r(V)", V) + st_numscalar("r(lambda)", lambda) + st_numscalar("r(kclass)", k) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + st_numscalar("r(rankV)",rankV) + st_numscalar("r(rankS)",rankS) + +} // end program s_liml + + +// *************** CUE ******************** // + +void s_gmmcue( string scalar ZZmatrix, + string scalar XZmatrix, + string scalar yname, + string scalar ename, + string scalar Xnames, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + string scalar bname, + string scalar b0name, + scalar center, + scalar dofminus, + scalar useqr) + +{ + + struct ms_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + Ztokens=tokens(Znames) + Xtokens=tokens(Xnames) + + st_view(Z, ., st_tsrevar(Ztokens), touse) + st_view(X, ., st_tsrevar(Xtokens), touse) + st_view(y, ., st_tsrevar(yname), touse) + st_view(e, ., ename, touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + +// Pointers to views + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + py = &y + pX = &X + + if (b0name=="") { + +// CUE beta not supplied, so calculate/optimize + +// Our convention is that regressors are [endog included exog] +// and instruments are [excluded exog included exog] +// If a constant is included, it is the last column. + +// CUE is preceded by IV or 2-step GMM to get starting values. +// Stata convention is that parameter vectors are row vectors, and optimizers +// require this, so must conform to this in what follows. + + beta_init = st_matrix(bname) + +// What follows is how to set out an optimization in Stata. First, initialize +// the optimization structure in the variable S. Then tell Mata where the +// objective function is, that it's a minimization, that it's a "d0" type of +// objective function (no analytical derivatives or Hessians), and that the +// initial values for the parameter vector are in beta_init. Finally, optimize. + S = optimize_init() + + optimize_init_evaluator(S, &m_cuecrit()) + optimize_init_which(S, "min") + optimize_init_evaluatortype(S, "d0") + optimize_init_params(S, beta_init) +// CUE objective function takes 3 extra arguments: y, X and the structure with omega details + optimize_init_argument(S, 1, py) + optimize_init_argument(S, 2, pX) + optimize_init_argument(S, 3, vcvo) + optimize_init_argument(S, 4, useqr) + + beta = optimize(S) + +// The last evaluation of the GMM objective function is J. + j = optimize_result_value(S) + +// Call m_omega one last time to get CUE weighting matrix. + e[.,.] = y - X * beta' + omega = m_omega(vcvo) + } + else { +// CUE beta supplied, so obtain maximized GMM obj function at b0 + beta = st_matrix(b0name) + e[.,.] = y - X * beta' + omega = m_omega(vcvo) +// W = invsym(omega) + gbar = 1/N * quadcross(Z, wf*wvar, e) + j = N * gbar' * cholsolve(omega, gbar, useqr) +// j = N * gbar' * W * gbar + } + +// Bits and pieces + QXZ = st_matrix(XZmatrix)/N + + ee = quadcross(e, wf*wvar, e) + sigmasq=ee/(N-dofminus) + +// QXZ_W_QZX = QXZ * W * QXZ' +// _makesymmetric(QXZ_W_QZX) +// QXZ_W_QZXinv=invsym(QXZ_W_QZX) +// V = 1/N * QXZ_W_QZXinv + aux1 = cholsolve(omega, QXZ') + if (aux1[1,1]==.) { // omega not full rank; W=inv(omega) dubious, exit with error +errprintf("\nError: estimated covariance matrix of moment conditions not of full rank,") +errprintf("\n and optimal GMM weighting matrix not unique.") +errprintf("\nPossible causes:") +errprintf("\n collinearities in instruments (if -nocollin- option was used)") +errprintf("\n singleton dummy variable (dummy with one 1 and N-1 0s or vice versa)") +errprintf("\n {help ivreg2##partial:partial} option may address problem.\n") + exit(506) + } + aux3 = makesymmetric(QXZ * aux1) + V = 1/N * invsym(aux3) + if (diag0cnt(V)) { // V not full rank, likely caused by collinearities; + // b dubious, exit with error +errprintf("\nError: estimated variance matrix of b not of full rank, and CUE estimates") +errprintf("\n unreliable; may be caused by collinearities\n") + exit(506) + } + W = invsym(omega) + + st_matrix("r(beta)", beta) + st_matrix("r(S)", omega) + st_matrix("r(W)", W) + st_matrix("r(V)", V) + st_numscalar("r(j)", j) + st_numscalar("r(rss)", ee) + st_numscalar("r(sigmasq)", sigmasq) + +} // end program s_gmmcue + +// CUE evaluator function. +// Handles only d0-type optimization; todo, g and H are just ignored. +// beta is the parameter set over which we optimize, and +// J is the objective function to minimize. + +void m_cuecrit(todo, beta, pointer py, pointer pX, struct ms_vcvorthog scalar vcvo, useqr, j, g, H) +{ + *vcvo.e[.,.] = *py - *pX * beta' + + omega = m_omega(vcvo) + +// Calculate gbar=Z'*e/N + gbar = 1/vcvo.N * quadcross(*vcvo.Z, vcvo.wf*(*vcvo.wvar), *vcvo.e) + aux1 = cholqrsolve(omega, gbar, useqr) + j = vcvo.N * gbar' * aux1 + +// old method +// W = invsym(omega) +// j = vcvo.N * gbar' * W * gbar + +} // end program CUE criterion function + + +// ************** ffirst-stage stats ************************************* + +void s_ffirst( string scalar ZZmatrix, + string scalar XXmatrix, + string scalar XZmatrix, + string scalar ZYmatrix, + string scalar ZZinvmatrix, + string scalar XXinvmatrix, + string scalar XPZXinvmatrix, + string scalar X2X2matrix, + string scalar Z1X2matrix, + string scalar X2ymatrix, + string scalar ename, // Nx1 + string scalar ematnames, // Nx(K1+1) + string scalar yname, + string scalar X1names, + string scalar X2names, + string scalar Z1names, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + scalar N_clust, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar center, + scalar dofminus, + scalar sdofminus) + +{ + + struct ms_vcvorthog scalar vcvo + + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + +// X1 = endog regressors +// X2 = exog regressors = included IVs +// Z1 = excluded instruments +// Z2 = included IVs = X2 + + Xnames = invtokens( (X1names, X2names), " ") + Znames = invtokens( (Z1names, X2names), " ") + + st_view(y, ., st_tsrevar(tokens(yname)), touse) + st_view(X1, ., st_tsrevar(tokens(X1names)), touse) + st_view(Z1, ., st_tsrevar(tokens(Z1names)), touse) + st_view(X, ., st_tsrevar(tokens(Xnames)), touse) + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + st_view(e, ., ename, touse) + st_view(emat, ., tokens(ematnames), touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + + vcvo.wvar = &wvar + vcvo.Z = &Z + vcvo.Znames = Znames + vcvo.ZZ = st_matrix(ZZmatrix) + + if ("X2names"~="") { + st_view(X2, ., st_tsrevar(tokens(X2names)), touse) + } + + K1=cols(X1) + K2=cols(X2) + K=K1+K2 + L1=cols(Z1) + L2=cols(X2) + L=L1+L2 + df = L1 + df_r = N-L + + ZZinv = st_matrix(ZZinvmatrix) + XXinv = st_matrix(XXinvmatrix) + XPZXinv = st_matrix(XPZXinvmatrix) + QZZ = st_matrix(ZZmatrix) / N + QXX = st_matrix(XXmatrix) / N + QZX = st_matrix(XZmatrix)' / N + QZY = st_matrix(ZYmatrix) / N + QZZinv = ZZinv*N + QXXinv = XXinv*N + QX2X2 = st_matrix(X2X2matrix) / N + QZ1X2 = st_matrix(Z1X2matrix) / N + QX2y = st_matrix(X2ymatrix) / N + + sheaall = (diagonal(XXinv) :/ diagonal(XPZXinv)) // (X1, X2) in column vector + sheaall = (sheaall[(1::K1), 1 ])' // Just X1 in row vector + +// Full system of reduced form (col 1) and first-stage regressions + bz = cholsolve(QZZ, QZY) + Yhat = Z*bz + Xhat = Yhat[.,(2..(K1+1))], X2 +// VCV for full system + eall = (y, X1) - Yhat + ee = quadcross(eall, wf*wvar, eall) +// sigmas have large-sample dofminus correction incorporated but no small dof corrections + sigmasqall = ee / (N-dofminus) +// rmses have small dof corrections + rmseall = sqrt( ee / (N-L-dofminus-sdofminus) ) +// V has all the classical VCVs in block diagonals + V = sigmasqall # ZZinv +// For Wald test of excluded instruments + R = I(L1) , J(L1, L2, 0) +// For AP and SW stats + QXhXh = quadcross(Xhat, wf*wvar, Xhat) / N + QXhX1 = quadcross(Xhat, wf*wvar, X1 ) / N + +// VCV for system of first-stage eqns +// Can be robust; even if not, has correct off-block-diagonal covariances + vcvo.ename = ematnames + vcvo.e = &emat + emat[.,.] = eall + omegar = m_omega(vcvo) + Vr = makesymmetric(I(K1+1)#QZZinv * omegar * I(K1+1)#QZZinv) / N + +// AR statistics from RF (first column) + Rb = bz[ (1::L1), 1 ] + RVR = Vr[| 1,1 \ L1, L1 |] + ARWald = Rb' * cholsolve(RVR, Rb) + ARF = ARWald + ARdf = L1 + if (clustvarname=="") { + ARdf2 = (N-dofminus-L-sdofminus) + ARF = ARWald / (N-dofminus) * ARdf2 / ARdf + } + else { + ARdf2 = N_clust - 1 + ARF = ARWald / (N-1) * (N-L-sdofminus) /(N_clust) * ARdf2 / ARdf + } + ARFp = Ftail(ARdf, ARdf2, ARF) + ARchi2 = ARWald + ARchi2p = chi2tail(ARdf, ARchi2) + +// Stock-Wright LM S statistic +// Equivalent to J stat for model with coeff on endog=0 and with inexog partialled out +// = LM version of AR stat (matches weakiv) + if (K2>0) { + by = cholsolve(QX2X2, QX2y) + e[.,.] = y-X2*by + } + else { + e[.,.] = y + } +// initialize residual for VCV calc to be single Nx1 vector + vcvo.e = &e + vcvo.ename = ename +// get VCV and sstat=J + omega = m_omega(vcvo) + gbar = 1/N * quadcross(Z, wf*wvar, e) + sstat = N * gbar' * cholsolve(omega, gbar) + sstatdf = L1 + sstatp = chi2tail(sstatdf, sstat) + +// Prepare to loop over X1s for F, SW and AP stats +// initialize matrix to save first-stage results + firstmat=J(21,0,0) +// initialize residual for VCV calc to be single Nx1 vector + vcvo.e = &e + vcvo.ename = ename + + for (i=1; i<=K1; i++) { + +// RMSEs for first stage start in SECOND row/column (first has RF) + rmse = rmseall[i+1,i+1] +// Shea partial R2 + shea = sheaall[1,i] +// first-stage coeffs for ith X1. +// (nb: first column is reduced form eqn for y) + b=bz[., (i+1)] +// Classical Wald stat (chi2 here); also yields partial R2 +// Since r is an L1 x 1 zero vector, can use Rb instead of (Rb-r) + Rb = b[ (1::L1), . ] + RVR = V[| 1+i*L,1+i*L \ i*L+L1, i*L+L1 |] + Wald = Rb' * cholsolve(RVR, Rb) +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + pr2 = (Wald/(N-dofminus)) / (1 + (Wald/(N-dofminus))) + +// Robustify F stat if necessary. + if ((robust~="") | (clustvarname~="") | (kernel~="") | (sw~="")) { + RVR = Vr[| 1+i*L,1+i*L \ i*L+L1, i*L+L1 |] + Wald = Rb' * cholsolve(RVR, Rb) + } +// small dof adjustment is effectively additional L2, e.g., partialled-out regressors + df = L1 + if (clustvarname=="") { + df_r = (N-dofminus-L-sdofminus) + F = Wald / (N-dofminus) * df_r / df + } + else { + df_r = N_clust - 1 + F = Wald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / df + } + pvalue = Ftail(df, df_r, F) + +// If #endog=1, AP=SW=standard F stat + if (K1==1) { + Fdf1 = df + Fdf2 = df_r + SWF = F + SWFp = pvalue + SWchi2 = Wald + SWchi2p = chi2tail(Fdf1, SWchi2) + SWr2 = pr2 + APF = SWF + APFp = SWFp + APchi2 = SWchi2 + APchi2p = SWchi2p + APr2 = SWr2 + } + else { + +// Angrist-Pischke and Sanderson-Windmeijer stats etc. +// select matrix needed for both; will select all but the endog regressor of interest + selmat=J(1,K,1) + selmat[1,i]=0 // don't select endog regressor of interest + +// AP +// QXhXh is crossproduct of X1hats (fitted Xs) plus Z2s +// QXhX1 is crossproduct with X1s +// QXhXhi and QXhX1i remove the row/col for the endog regressor of interest + QXhXhi = select(select(QXhXh,selmat)', selmat) + QXhX1i = select(QXhX1[.,i], selmat') +// 1st step - in effect, 2nd stage of 2SLS using FITTED X1hats, and then get residuals e1 + b1=cholsolve(QXhXhi, QXhX1i) + QXhXhinv = invsym(QXhXhi) // Need this for V + b1=QXhXhinv*QXhX1i + e1 = X1[.,i] - select(Xhat,selmat)*b1 +// 2nd step - regress e1 on all Zs and test excluded ones + QZe1 = quadcross(Z, wf*wvar, e1 ) / N + b2=cholsolve(QZZ, QZe1) + APe2 = e1 - Z*b2 + ee = quadcross(APe2, wf*wvar, APe2) + sigmasq2 = ee / (N-dofminus) +// Classical V + Vi = sigmasq2 * QZZinv / N + APRb=b2[ (1::L1), .] + APRVR = Vi[ (1::L1), (1..L1) ] + APWald = APRb' * cholsolve(APRVR, APRb) +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + APr2 = (APWald/(N-dofminus)) / (1 + (APWald/(N-dofminus))) + +// Now SW stat +// Uses same 2SLS coeffs as AP but resids use ACTUAL X1 (not fitted X1) + e1 = X1[.,i] - select(X,selmat)*b1 +// next step - regress e on all Zs and test excluded ones + QZe1 = quadcross(Z, wf*wvar, e1 ) / N + b2=cholsolve(QZZ, QZe1) + SWe2 = e1 - Z*b2 + ee = quadcross(SWe2, wf*wvar, SWe2) + sigmasq2 = ee / (N-dofminus) + Vi = sigmasq2 * QZZinv / N + SWRb=b2[ (1::L1), .] + SWRVR = Vi[ (1::L1), (1..L1) ] + SWWald = SWRb' * cholsolve(SWRVR, SWRb) +// Wald stat has dofminus correction in it via sigmasq, +// so remove it to calculate partial R2 + SWr2 = (SWWald/(N-dofminus)) / (1 + (SWWald/(N-dofminus))) + +// Having calculated AP and SW R-sq based on non-robust Wald, now get robust Wald if needed. + if ((robust~="") | (clustvarname~="") | (kernel~="") | (sw~="")) { + e[.,1]=APe2 + omega=m_omega(vcvo) + Vi = makesymmetric(QZZinv * omega * QZZinv) / N + APRVR = Vi[ (1::L1), (1..L1) ] + APWald = APRb' * cholsolve(APRVR, APRb) // re-use APRb + e[.,1]=SWe2 + omega=m_omega(vcvo) + Vi = makesymmetric(QZZinv * omega * QZZinv) / N + SWRVR = Vi[ (1::L1), (1..L1) ] + SWWald = SWRb' * cholsolve(SWRVR, SWRb) // re-use SWRb + } + +// small dof adjustment is effectively additional L2, e.g., partialled-out regressors + Fdf1 = (L1-K1+1) + if (clustvarname=="") { + Fdf2 = (N-dofminus-L-sdofminus) + APF = APWald / (N-dofminus) * Fdf2 / Fdf1 + SWF = SWWald / (N-dofminus) * Fdf2 / Fdf1 + } + else { + Fdf2 = N_clust - 1 + APF = APWald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / Fdf1 + SWF = SWWald / (N-1) * (N-L-sdofminus) * (N_clust - 1) / N_clust / Fdf1 + } + APFp = Ftail(Fdf1, Fdf2, APF) + APchi2 = APWald + APchi2p = chi2tail(Fdf1, APchi2) + SWFp = Ftail(Fdf1, Fdf2, SWF) + SWchi2 = SWWald + SWchi2p = chi2tail(Fdf1, SWchi2) + } + +// Assemble results + firstmat = firstmat , /// + (rmse \ shea \ pr2 \ F \ df \ df_r \ pvalue /// + \ SWF \ Fdf1 \ Fdf2 \ SWFp \ SWchi2 \ SWchi2p \ SWr2 /// + \ APF \ Fdf1 \ Fdf2 \ APFp \ APchi2 \ APchi2p \ APr2) + } // end of loop for an X1 variable + + st_numscalar("r(rmse_rf)", rmseall[1,1]) + st_matrix("r(firstmat)", firstmat) + st_matrix("r(b)", bz) + st_matrix("r(V)", Vr) + st_matrix("r(S)", omegar) + st_numscalar("r(archi2)", ARchi2) + st_numscalar("r(archi2p)", ARchi2p) + st_numscalar("r(arf)", ARF) + st_numscalar("r(arfp)", ARFp) + st_numscalar("r(ardf)", ARdf) + st_numscalar("r(ardf_r)", ARdf2) + st_numscalar("r(sstat)",sstat) + st_numscalar("r(sstatp)",sstatp) + st_numscalar("r(sstatdf)",sstatdf) + +} // end program s_ffirst + +// ********************************************************************** + +void s_omega( + string scalar ZZmatrix, + string scalar ename, + string scalar Znames, + string scalar touse, + string scalar weight, + string scalar wvarname, + scalar wf, + scalar N, + string scalar robust, + string scalar clustvarname, + string scalar clustvarname2, + string scalar clustvarname3, + scalar bw, + string scalar kernel, + string scalar sw, + string scalar psd, + string scalar ivarname, + string scalar tvarname, + string scalar tindexname, + scalar tdelta, + scalar center, + scalar dofminus) +{ + + struct ms_vcvorthog scalar vcvo + + vcvo.ename = ename + vcvo.Znames = Znames + vcvo.touse = touse + vcvo.weight = weight + vcvo.wvarname = wvarname + vcvo.robust = robust + vcvo.clustvarname = clustvarname + vcvo.clustvarname2 = clustvarname2 + vcvo.clustvarname3 = clustvarname3 + vcvo.kernel = kernel + vcvo.sw = sw + vcvo.psd = psd + vcvo.ivarname = ivarname + vcvo.tvarname = tvarname + vcvo.tindexname = tindexname + vcvo.wf = wf + vcvo.N = N + vcvo.bw = bw + vcvo.tdelta = tdelta + vcvo.center = center + vcvo.dofminus = dofminus + vcvo.ZZ = st_matrix(ZZmatrix) + + st_view(Z, ., st_tsrevar(tokens(Znames)), touse) + st_view(wvar, ., st_tsrevar(wvarname), touse) + st_view(e, ., vcvo.ename, touse) + + vcvo.e = &e + vcvo.Z = &Z + vcvo.wvar = &wvar + + ZZ = st_matrix(ZZmatrix) + + S=m_omega(vcvo) + + st_matrix("r(S)", S) +} // end of s_omega program + + +// Mata utility for sequential use of solvers +// Default is cholesky; +// if that fails, use QR; +// if overridden, use QR. + +function cholqrsolve ( numeric matrix A, + numeric matrix B, + | real scalar useqr) +{ + if (args()==2) useqr = 0 + + real matrix C + + if (!useqr) { + C = cholsolve(A, B) + if (C[1,1]==.) { + C = qrsolve(A, B) + } + } + else { + C = qrsolve(A, B) + } + + return(C) + +} + +end // end Mata section + + +// Include ftools ----------------------------------------------------------- + cap findfile "ftools.mata" + if (_rc) { + di as error "ivreghdfe requires the {bf:ftools} package, which is not installed" + di as error `" - install from {stata ssc install ftools:SSC}"' + di as error `" - install from {stata `"net install ftools, from("https://github.com/sergiocorreia/ftools/raw/master/src/")"':Github}"' + exit 9 + } + +// Include reghdfe -------------------------------------------------- + cap findfile "reghdfe.mata" + if (_rc) { + di as error "ivreghdfe requires the {bf:reghdfe} package, which is not installed" + di as error `" - install from {stata ssc install reghdfe:SSC}"' + di as error `" - install from {stata `"net install reghdfe, from("https://github.com/sergiocorreia/reghdfe/raw/master/src/")"':Github}"' + exit 9 + } + include "reghdfe.mata", adopath + + +exit // exit before loading comments + +********************************** VERSION COMMENTS ********************************** +* Initial version cloned from official ivreg version 5.0.9 19Dec2001 +* 1.0.2: add logic for reg3. Sargan test +* 1.0.3: add prunelist to ensure that count of excluded exogeneous is correct +* 1.0.4: revise option to exog(), allow included exog to be specified as well +* 1.0.5: switch from reg3 to regress, many options and output changes +* 1.0.6: fixed treatment of nocons in Sargan and C-stat, and corrected problems +* relating to use of nocons combined with a constant as an IV +* 1.0.7: first option reports F-test of excluded exogenous; prunelist bug fix +* 1.0.8: dropped prunelist and switched to housekeeping of variable lists +* 1.0.9: added collinearity checks; C-stat calculated with recursive call; +* added ffirst option to report only F-test of excluded exogenous +* from 1st stage regressions +* 1.0.10: 1st stage regressions also report partial R2 of excluded exogenous +* 1.0.11: complete rewrite of collinearity approach - no longer uses calls to +* _rmcoll, does not track specific variables dropped; prunelist removed +* 1.0.12: reorganised display code and saved results to enable -replay()- +* 1.0.13: -robust- and -cluster- now imply -small- +* 1.0.14: fixed hascons bug; removed ivreg predict fn (it didn't work); allowed +* robust and cluster with z stats and correct dofs +* 1.0.15: implemented robust Sargan stat; changed to only F-stat, removed chi-sq; +* removed exog option (only orthog works) +* 1.0.16: added clusterised Sargan stat; robust Sargan handles collinearities; +* predict now works with standard SE options plus resids; fixed orthog() +* so it accepts time series operators etc. +* 1.0.17: fixed handling of weights. fw, aw, pw & iw all accepted. +* 1.0.18: fixed bug in robust Sargan code relating to time series variables. +* 1.0.19: fixed bugs in reporting ranks of X'X and Z'Z +* fixed bug in reporting presence of constant +* 1.0.20: added GMM option and replaced robust Sargan with (equivalent) J; +* added saved statistics of 1st stage regressions +* 1.0.21: added Cragg HOLS estimator, including allowing empty endog list; +* -regress- syntax now not allowed; revised code searching for "_cons" +* 1.0.22: modified cluster output message; fixed bug in replay for Sargan/Hansen stat; +* exactly identified Sargan/Hansen now exactly zero and p-value not saved as e(); +* cluster multiplier changed to 1 (from buggy multiplier), in keeping with +* eg Wooldridge 2002 p. 193. +* 1.0.23: fixed orthog option to prevent abort when restricted equation is underid. +* 1.0.24: fixed bug if 1st stage regressions yielded missing values for saving in e(). +* 1.0.25: Added Shea version of partial R2 +* 1.0.26: Replaced Shea algorithm with Godfrey algorithm +* 1.0.27: Main call to regress is OLS form if OLS or HOLS is specified; error variance +* in Sargan and C statistics use small-sample adjustment if -small- option is +* specified; dfn of S matrix now correctly divided by sample size +* 1.0.28: HAC covariance estimation implemented +* Symmetrize all matrices before calling syminv +* Added hack to catch F stats that ought to be missing but actually have a +* huge-but-not-missing value +* Fixed dof of F-stat - was using rank of ZZ, should have used rank of XX (couldn't use df_r +* because it isn't always saved. This is because saving df_r triggers small stats +* (t and F) even when -post- is called without dof() option, hence df_r saved only +* with -small- option and hence a separate saved macro Fdf2 is needed. +* Added rankS to saved macros +* Fixed trap for "no regressors specified" +* Added trap to catch gmm option with no excluded instruments +* Allow OLS syntax (no endog or excluded IVs specified) +* Fixed error messages and traps for rank-deficient robust cov matrix; includes +* singleton dummy possibility +* Capture error if posting estimated VCV that isn't pos def and report slightly +* more informative error message +* Checks 3 variable lists (endo, inexog, exexog) separately for collinearities +* Added AC (autocorrelation-consistent but conditionally-homoskedastic) option +* Sargan no longer has small-sample correction if -small- option +* robust, cluster, AC, HAC all passed on to first-stage F-stat +* bw must be < T +* 1.0.29 -orthog- also displays Hansen-Sargan of unrestricted equation +* Fixed collinearity check to include nocons as well as hascons +* Fixed small bug in Godfrey-Shea code - macros were global rather than local +* Fixed larger bug in Godfrey-Shea code - was using mixture of sigma-squares from IV and OLS +* with and without small-sample corrections +* Added liml and kclass +* 1.0.30 Changed order of insts macro to match saved matrices S and W +* 2.0.00 Collinearities no longer -qui- +* List of instruments tested in -orthog- option prettified +* 2.0.01 Fixed handling of nocons with no included exogenous, including LIML code +* 2.0.02 Allow C-test if unrestricted equation is just-identified. Implemented by +* saving Hansen-Sargan dof as = 0 in e() if just-identified. +* 2.0.03 Added score() option per latest revision to official ivreg +* 2.0.04 Changed score() option to pscore() per new official ivreg +* 2.0.05 Fixed est hold bug in first-stage regressions +* Fixed F-stat finite sample adjustment with cluster option to match official Stata +* Fixed F-stat so that it works with hascons (collinearity with constant is removed) +* Fixed bug in F-stat code - wasn't handling failed posting of vcv +* No longer allows/ignores nonsense options +* 2.0.06 Modified lsStop to sync with official ivreg 5.1.3 +* 2.0.07a Working version of CUE option +* Added sortpreserve, ivar and tvar options +* Fixed smalls bug in calculation of T for AC/HAC - wasn't using the last ob +* in QS kernel, and didn't take account of possible dropped observations +* 2.0.07b Fixed macro bug that truncated long varlists +* 2.0.07c Added dof option. +* Changed display of RMSE so that more digits are displayed (was %8.1g) +* Fixed small bug where cstat was local macro and should have been scalar +* Fixed bug where C stat failed with cluster. NB: wmatrix option and cluster are not compatible! +* 2.0.7d Fixed bug in dof option +* 2.1.0 Added first-stage identification, weak instruments, and redundancy stats +* 2.1.01 Tidying up cue option checks, reporting of cue in output header, etc. +* 2.1.02 Used Poskitt-Skeels (2002) result that C-D eval = cceval / (1-cceval) +* 2.1.03 Added saved lists of separate included and excluded exogenous IVs +* 2.1.04 Added Anderson-Rubin test of signif of endog regressors +* 2.1.05 Fix minor bugs relating to cluster and new first-stage stats +* 2.1.06 Fix bug in cue: capture estimates hold without corresponding capture on estimates unhold +* 2.1.07 Minor fix to ereturn local wexp, promote to version 8.2 +* 2.1.08 Added dofminus option, removed dof option. Added A-R test p-values to e(). +* Minor bug fix to A-R chi2 test - was N chi2, should have been N-L chi2. +* Changed output to remove potentially misleading refs to N-L etc. +* Bug fix to rhs count - sometimes regressors could have exact zero coeffs +* Bug fix related to cluster - if user omitted -robust-, orthog would use Sargan and not J +* Changed output of Shea R2 to make clearer that F and p-values do not refer to it +* Improved handling of collinearites to check across inexog, exexog and endo lists +* Total weight statement moved to follow summ command +* Added traps to catch errors if no room to save temporary estimations with _est hold +* Added -savefirst- option. Removed -hascons-, now synonymous with -nocons-. +* 2.1.09 Fixes to dof option with cluster so it no longer mimics incorrect areg behavior +* Local ivreg2cmd to allow testing under name ivreg2 +* If wmatrix supplied, used (previously not used if non-robust sargan stat generated) +* Allowed OLS using (=) syntax (empty endo and exexog lists) +* Clarified error message when S matrix is not of full rank +* cdchi2p, ardf, ardf_r added to saved macros +* first and ffirst replay() options; DispFirst and DispFFirst separately codes 1st stage output +* Added savefprefix, macro with saved first-stage equation names. +* Added version option. +* Added check for duplicate variables to collinearity checks +* Rewrote/simplified Godfrey-Shea partial r2 code +* 2.1.10 Added NOOUTput option +* Fixed rf bug so that first does not trigger unnecessary saved rf +* Fixed cue bug - was not starting with robust 2-step gmm if robust/cluster +* 2.1.11 Dropped incorrect/misleading dofminus adjustments in first-stage output summary +* 2.1.12 Collinearity check now checks across inexog/exexog/endog simultaneously +* 2.1.13 Added check to catch failed first-stage regressions +* Fixed misleading failed C-stat message +* 2.1.14 Fixed mishandling of missing values in AC (non-robust) block +* 2.1.15 Fixed bug in RF - was ignoring weights +* Added -endog- option +* Save W matrix for all cases; ensured copy is posted with wmatrix option so original isn't zapped +* Fixed cue bug - with robust, was entering IV block and overwriting correct VCV +* 2.1.16 Added -fwl- option +* Saved S is now robust cov matrix of orthog conditions if robust, whereas W is possibly non-robust +* weighting matrix used by estmator. inv(S)=W if estimator is efficient GMM. +* Removed pscore option (dropped by official ivreg). +* Fixed bug where -post- would fail because of missing values in vcv +* Remove hascons as synonym for nocons +* OLS now outputs 2nd footer with variable lists +* 2.1.17 Reorganization of code +* Added ll() macro +* Fixed N bug where weights meant a non-integer ob count that was rounded down +* Fixed -fwl- option so it correctly handles weights (must include when partialling-out) +* smatrix option takes over from wmatrix option. Consistent treatment of both. +* Saved smatrix and wmatrix now differ in case of inefficient GMM. +* Added title() and subtitle() options. +* b0 option returns a value for the Sargan/J stat even if exactly id'd. +* (Useful for S-stat = value of GMM objective function.) +* HAC and AC now allowed with LIML and k-class. +* Collinearity improvements: bug fixed because collinearity was mistakenly checked across +* inexog/exexog/endog simultaneously; endog predicted exactly by IVs => reclassified as inexog; +* _rmcollright enforces inexog>endo>exexog priority for collinearities, if Stata 9.2 or later. +* K-class, LIML now report Sargan and J. C-stat based on Sargan/J. LIML reports AR if homosked. +* nb: can always easily get a C-stat for LIML based on diff of two AR stats. +* Always save Sargan-Hansen as e(j); also save as e(sargan) if homoskedastic. +* Added Stock-Watson robust SEs options sw() +* 2.1.18 Added Cragg-Donald-Stock-Yogo weak ID statistic critical values to main output +* Save exexog_ct, inexog_ct and endog_ct as macros +* Stock-Watson robust SEs now assume ivar is group variable +* Option -sw- is standard SW. Option -swpsd- is PSD version a la page 6 point 10. +* Added -noid- option. Suppresses all first-stage and identification statistics. +* Internal calls to ivreg2 use noid option. +* Added hyperlinks to ivreg2.hlp and helpfile argument to display routines to enable this. +* 2.1.19 Added matrix rearrangement and checks for smatrix and wmatrix options +* Recursive calls to cstat simplified - no matrix rearrangement or separate robust/nonrobust needed +* Reintroduced weak ID stats to ffirst output +* Added robust ID stats to ffirst output for case of single endogenous regressor +* Fixed obscure bug in reporting 1st stage partial r2 - would report zero if no included exogenous vars +* Removed "HOLS" in main output (misleading if, e.g., estimation is AC but not HAC) +* Removed "ML" in main output if no endogenous regressors - now all ML is labelled LIML +* model=gmm is now model=gmm2s; wmatrix estimation is model=gmm +* wmatrix relates to gmm estimator; smatrix relates to gmm var-cov matrix; b0 behavior equiv to wmatrix +* b0 option implies nooutput and noid options +* Added nocollin option to skip collinearity checks +* Fixed minor display bug in ffirst output for endog vars with varnames > 12 characters +* Fixed bug in saved rf and first-stage results for vars with long varnames; uses permname +* Fixed bug in model df - had counted RHS, now calculates rank(V) since latter may be rank-deficient +* Rank of V now saved as macro rankV +* fwl() now allows partialling-out of just constant with _cons +* Added Stock-Wright S statistic (but adds overhead - calls preserve) +* Properties now include svyj. +* Noted only: fwl bug doesn't allow time-series operators. +* 2.1.20 Fixed Stock-Wright S stat bug - didn't allow time-series operators +* 2.1.21 Fixed Stock-Wright S stat to allow for no exog regressors cases +* 2.2.00 CUE partials out exog regressors, estimates endog coeffs, then exog regressors separately - faster +* gmm2s becomes standard option, gmm supported as legacy option +* 2.2.01 Added explanatory messages if gmm2s used. +* States if estimates efficient for/stats consistent for het, AC, etc. +* Fixed small bug that prevented "{help `helpfile'##fwl:fwl}" from displaying when -capture-d. +* Error message in footer about insuff rank of S changed to warning message with more informative message. +* Fixed bug in CUE with weights. +* 2.2.02 Removed CUE partialling-out; still available with fwl +* smatrix and wmatrix become documented options. e(model)="gmmw" means GMM with arbitrary W +* 2.2.03 Fixed bug in AC with aweights; was weighting zi'zi but not ei'ei. +* 2.2.04 Added abw code for bw(), removed properties(svyj) +* 2.2.05 Fixed bug in AC; need to clear variable vt1 at start of loop +* If iweights, N (#obs with precision) rounded to nearest integer to mimic official Stata treatment +* and therefore don't need N scalar at all - will be same as N +* Saves fwl_ct as macro. +* -ffirst- output, weak id stat, etc. now adjust for number of partialled-out variables. +* Related changes: df_m, df_r include adjustments for partialled-out variables. +* Option nofwlsmall introduced - suppresses above adjustments. Undocumented in ivreg2.hlp. +* Replaced ID tests based on canon corr with Kleibergen-Paap rk-based stats if not homoskedastic +* Replaced LR ID test stats with LM test stats. +* Checks that -ranktest- is installed. +* 2.2.06 Fixed bug with missing F df when cue called; updated required version of ranktest +* 2.2.07 Modified redundancy test statistic to match standard regression-based LM tests +* Change name of -fwl- option to -partial-. +* Use of b0 means e(model)=CUE. Added informative b0 option titles. b0 generates output but noid. +* Removed check for integer bandwidth if auto option used. +* 2.2.08 Add -nocollin- to internal calls and to -ivreg2_cue- to speed performance. +* 2.2.09 Per msg from Brian Poi, Alastair Hall verifies that Newey-West cited constant of 1.1447 +* is correct. Corrected mata abw() function. Require -ranktest- 1.1.03. +* 2.2.10 Added Angrist-Pischke multivariate f stats. Rewrite of first and ffirst output. +* Added Cragg-Donald to weak ID output even when non-iid. +* Fixed small bug in non-robust HAC code whereby extra obs could be used even if dep var missing. +* (required addition of L`tau'.(`s1resid') in creation of second touse variable) +* Fixed bugs that zapped varnames with "_cons" in them +* Changed tvar and ivar setup so that data must be tsset or xtset. +* Fixed bug in redundancy test stat when called by xtivreg2+cluster - no dofminus adj needed in this case +* Changed reporting so that gaps between panels are not reported as such. +* Added check that weight variable is not transformed by partialling out. +* Changed Stock-Wright S statistic so that it uses straight partialling-out of exog regressors +* (had been, in effect, doing 2SGMM partialling-out) +* Fixed bug where dropped collinear endogenous didn't get a warning or listing +* Removed N*CDEV Wald chi-sq statistic from ffirst output (LM stat enough) +* 3.0.00 Fully rewritten and Mata-ized code. Require min Stata 10.1 and ranktest 1.2.00. +* Mata support for Stock-Watson SEs for fixed effects estimator; doesn't support fweights. +* Changed handling of iweights yielding non-integer N so that (unlike official -regress-) all calcs +* for RMSE etc. use non-integer N and N is rounded down only at the end. +* Added support for Thompson/Cameron-Gelbach-Miller 2-level cluster-robust vcvs. +* 3.0.01 Now exits more gracefully if no regressors survive after collinearity checks +* 3.0.02 -capture- instead of -qui- before reduced form to suppress not-full-rank error warning +* Modified Stock-Wright code to partial out all incl Xs first, to reduce possibility of not-full-rank +* omega and missing sstat. Added check within Stock-Wright code to catch not-full-rank omega. +* Fixed bug where detailed first-stage stats with cluster were disrupted if data had been tsset +* using a different variables. +* Fixed bug that didn't allow regression on just a constant. +* Added trap for no observations. +* Added trap for auto bw with panel data - not allowed. +* 3.0.03 Fixed bug in m_omega that always used Stock-Watson spectral decomp to create invertible shat +* instead of only when (undocumented) spsd option is called. +* Fixed bug where, if matsize too small, exited with wrong error (mistakenly detected as collinearities) +* Removed inefficient call to -ranktest- that unnecessarily requested stats for all ranks, not just full. +* 3.0.04 Fixed coding error in m_omega for cluster+kernel. Was *vcvo.e[tmatrix[.,1]], should have been (*vcvo.e)[tmatrix[.,1]]. +* Fixed bug whereby clusters defined by strings were not handled correctly. +* Updated ranktest version check +* 3.0.05 Added check to catch unwanted transformations of time or panel variables by partial option. +* 3.0.06 Fixed partial bug - partialcons macro saved =0 unless _cons explicitly in partial() varlist +* 3.0.07 kclass was defaulting to LIML - fixed. +* Renamed spsd option to psda (a=abs) following Stock-Watson 2008. Added psd0 option following Politis 2007. +* Fixed bug that would prevent RF and first-stage with cluster and TS operators if cluster code changed sort order. +* Modified action if S matrix is not full rank and 2-step GMM chosen. Now continue but report problem in footer +* and do not report J stat etc. +* 3.0.08 Fixed cluster+bw; was not using all observations of all panel units if panel was unbalanced. +* Fixed inconsequential bug in m_omega that caused kernel loop to be entered (with no impact) even if kernel=="" +* Fixed small bug that compared bw to T instead of (correctly) to T/delta when checking that bw can't be too long. +* Added dkraay option = cluster on t var + kernel-robust +* Added kiefer option = truncated kernel, bw=T (max), and no robust +* Fixed minor reporting bug that reported time-series gaps in entire panel dataset rather than just portion touse-d. +* Recoded bw and kernel checks into subroutine vkernel. Allow non-integer bandwidth within check as in ranktest. +* 3.1.01 First ivreg2 version with accompanying Mata library (shared with -ranktest-). Mata library includes +* struct ms_vcvorthog, m_omega, m_calckw, s_vkernel. +* Fixed bug in 2-way cluster code (now in m_omega in Mata library) - would crash if K>1 (relevant for -ranktest- only). +* 3.1.02 Converted cdsy to Mata code and moved to Mata library. Standardized spelling/caps/etc. of QS as "Quadratic Spectral". +* 3.1.03 Improved partialling out in s_sstat and s_ffirst: replaced qrsolve with invsym. +* 3.1.04 Fixed minor bug in s_crossprod - would crash with L1=0 K1>0, and also with K=0 +* 3.1.05 Fixed minor bug in orthog - wasn't saving est results if eqn w/o suspect instruments did not execute properly +* Fixed minor bug in s_cccollin() - didn't catch perverse case of K1>0 (endog regressors) and L1=0 (no excl IVs) +* 3.1.06 Spelling fix for Danielle kernel, correct error check for bw vs T-1 +* 3.1.07 Fixed bug that would prevent save of e(sample) when partialling out just a constant +* 3.1.08 01Jan14. Fixed reporting bug with 2-way clustering and kernel-robust that would give wrong count for 2nd cluster variable. +* 3.1.09 13July14. _rmcollright under version control has serious bug for v10 and earlier. Replaced with canon corr approach. +* Fixed obscure bug in estimation sample - was not using obs when tsset tvar is missing, even if TS operators not used. +* Fixed bug in auto bw code so now ivreg2 and ivregress agree. Also, ivreg2 auto bw code handles gaps in TS correctly. +* 4.0.00 25Jan15. Promote to require Stata version 11.2 +* Rewrite of s_gmm1s, s_iegmm, s_egmm etc. to use matrix solvers rather than inversion. +* rankS and rankV now calculated along with estimators; rankS now always saved. +* Returned to use of _rmcollright to detect collinearities since bug was in Stata 10's _rmcollright and now not relevant. +* Added reporting of collinearities and duplicates in replay mode. +* Rewrite of legacy support for previous ivreg2x version. Main program calls ivreg2x depending on _caller(). +* Estimation and replay moved to ivreg211 subroutine above. +* 4.0.01 8Feb15. Fixed bug in default name and command used used for saved first and RF equations +* Fixed bug in saved command line (was ivreg211, should be ivreg2). +* 4.0.02 9Feb15. Changed forced exit at Stata <11 before continuing loading to forced exit pre-Mata code at Stata <9. +* 4.1.00 Substantial rewrite to allow factor variables. Now also accepts TS ops as well as FV ops in partial varlist. +* Rewrite included code for dropped/collinear/reclassified. +* Saved RF and 1st-stage estimations have "if e(sample)" instead of "if `touse'" in e(cmdline). +* Rewrite of s_gmm1s etc. to use qrsolve if weighting matrix not full rank or cholsolve fails +* Fixed bug in display subroutines that would display hyperlink to wrong (nonexistent) help file. +* 4.1.01 15Jun15. Fixed bug that did not allow dropped variables to be in partial(.) varlist. +* Major rewrite of parsing code and collinearity/dropped/reclassified code. +* Added support for display options noomitted, vsquish, noemptycells, baselevels, allbaselevels. +* Changed from _rmcoll/_rmcollright/_rmcoll2list to internal ivreg2_rmcollright2 +* Changed failure of ranktest to obtain id stats to non-fatal so that estimation proceeds. +* Removed recount via _rmcoll if noid option specified +* Added partial(_all) option. +* Improved checks of smatrix, wmatrix, b0 options +* Rewrite of first-stage and reduced form code; rewrite of replay(.) functionality +* Added option for displaying system of first-stage/reduced form eqns. +* Replaced AP first-stage test stats with SW (Sanderson-Windmeijer) first-stage stats +* Corrected S LM stat option; now calcuated in effect as J stat for case of no endog (i.e. b=0) +* with inexog partialled out i.e. LM version of AR stat; now matches weakiv +* Undocumented FV-related options: fvsep (expand endo, inexog, exexog separately) fvall (expand together) +* 4.1.02 17Jun15. Fixed bug in collinearity check - was ignoring weights. +* More informative error message if invalid matrix provided to smatrix(.) or wmatrix(.) options. +* Caught error if depvar was FV or TS var that expanded to >1 variable. +* 4.1.03 18Jun15. Fixed bug with robust + rf option. +* 4.1.04 18Jun15. Fixed bug in AR stat with dofminus option + cluster (was subtracting dof, shouldn't). +* 4.1.05 18Jun15. Added rmse, df_m, df_r to saved RF and first-stage equation results. +* 4.1.06 4July15. Replaced mvreg with Mata code for partialling out (big speed gains with many vars). +* Rewrote AddOmitted to avoid inefficient loop; replaced with Mata subscripting. +* Failure of id stats because of collinearities triggers error message only; estimation continues. +* Calculation of dofs etc. uses rankS and rankV instead of iv1_ct and rhs1_ct; +* counts are therefore correct even in presence of collinearities and use of nocollin option. +* nocollin options triggers use of QR instead of default Cholesky. +* rankxx and rankzz now based on diag0cnt of (XX)^-1 and (ZZ)^-1. +* CUE fails if either S or V not full rank; can happen if nocollin option used. +* Added undocumented useqr option to force use of QR instead of Cholesky. +* Misc other code tweaks to make results more robust to nocollin option. +* 4.1.07 12July15. Fixed bugs in calculation of rank(V) (had miscounted in some cases if omega not full rank) +* Changed calc of dofs etc. from rankS and rankV to rankzz and rankxx (had miscounted in some cases etc.). +* Restored warning message for all exog regressors case if S not full rank. +* 4.1.08 27July15. Replaced wordcount(.) function with word count macro in AddOmitted; +* AddOmitted called only if any omitted regressors to add. +* Added center option for centering moments. +* 4.1.09 20Aug15. Expanded error message for failure to save first-stage estimations (var name too long). +* Fixed bug when weighting used with new partial-out code (see 4.1.06 4July15). +* Tweaked code so that if called under Stata version < 11, main ivreg2.ado is exited immediately after +* loading parent ivreg2 program. Removed automatic use of QR solver when nocollin option used. +* Added saved condition numbers for XX and ZZ. +* e(cmdline) now saves original string including any "s (i.e., saves `0' instead of `*'). +* 4.1.10 Fixed bug with posting first-stage results if sort had been disrupted by Mata code. +* Fixed bug which mean endog(.) and orthog(.) varlists weren't saved or displayed. +* 4.1.11 22Nov19. Added caller(.) option to ivreg211 subroutine to pass version of parent Stata _caller(.). +* Local macro with this parent Stata version is `caller'. +* Changed calls to ranktest so that if parent Stata is less than version 16, +* ranktest is called under version control as version 11.2: ranktest ..., +* otherwise it is called as version `caller': ranktest ... . +* Added macro e(ranktestcmd); will be ranktest, or ranktest11, or .... diff --git a/110/replication_package/replication/ado/plus/i/ivreghdfe.sthlp b/110/replication_package/replication/ado/plus/i/ivreghdfe.sthlp new file mode 100644 index 0000000000000000000000000000000000000000..fe06efbf5eb09ba1db93390d2a30bca49bda1bfb --- /dev/null +++ b/110/replication_package/replication/ado/plus/i/ivreghdfe.sthlp @@ -0,0 +1,72 @@ +{smcl} +{* *! version 1.1.1 14Dec2021}{...} +{vieweralsosee "ivreg" "help ivreg2"}{...} +{vieweralsosee "reghdfe" "help reghdfe"}{...} +{vieweralsosee "ftools" "help ftools"}{...} +{vieweralsosee "[R] ivregress" "help ivregress"}{...} +{vieweralsosee "" "--"}{...} +{title:Title} + +{p2colset 5 18 20 2}{...} +{p2col :{cmd:ivreghdfe} {hline 2}}Extended instrumental variable regressions with multiple levels of fixed effects{p_end} +{p2colreset}{...} + +{marker syntax}{...} +{title:Syntax} + +{pstd} +{cmd:ivreghdfe} is essentially {help ivreg2} with an additional {help reghdfe##options:absorb()} option from {cmd:reghdfe}. See the links above for the detailed help files of each program. + +{pstd} +To use {cmd:ivreghdfe}, you must have installed three packages: {cmd: ftools}, {cmd: reghdfe}, and {cmd: ivreg2} +(see the {browse "https://github.com/sergiocorreia/ivreghdfe#installation":online guide}). + +{pstd} +You can also pass additional reghdfe optimization options directly: + +{phang2}{stata sysuse auto}{p_end} +{phang2}{stata ivreghdfe price weight (length=gear), absorb(rep78) tol(1e-6)}{p_end} +{phang2}{stata ivreghdfe price weight (length=gear), absorb(rep78) accel(none)}{p_end} + + +{title:Citation} + +Please cite the ivreg2 and/or reghdfe commands directly: + +{phang}Baum, C.F., Schaffer, M.E., Stillman, S. 2010. +ivreg2: Stata module for extended instrumental variables/2SLS, GMM and AC/HAC, LIML and k-class regression. +{browse "http://ideas.repec.org/c/boc/bocode/s425401.html":http://ideas.repec.org/c/boc/bocode/s425401.html}{p_end} + +{phang}Correia, Sergio. 2017. +Linear Models with High-Dimensional Fixed Effects: An Efficient and Feasible Estimator (Working Paper) +{browse "https://github.com/sergiocorreia/reghdfe/#citation":https://github.com/sergiocorreia/reghdfe/#citation}{p_end} + + +{title:Support for margins} + +Note that there is experimental support for the {cmd:margins} postestimation command, but it hasn't been tested with advanced options such as nonlinear expressions. + + +{title:Feedback} + +For any issues or suggestions, please see the {browse "https://github.com/sergiocorreia/ivreghdfe":Github} website, including the {browse "https://github.com/sergiocorreia/ivreghdfe/issues":issue tracker}. + + +{title:ivreg2 Authors} + + Christopher F Baum, Boston College, USA + baum@bc.edu + + Mark E Schaffer, Heriot-Watt University, UK + m.e.schaffer@hw.ac.uk + + Steven Stillman, Motu Economic and Public Policy Research + stillman@motu.org.nz + + +{title:reghdfe Author} + +{pstd}Sergio Correia{break} +Board of Governors of the Federal Reserve{break} +Email: {browse "mailto:sergio.correia@gmail.com":sergio.correia@gmail.com} +{p_end} diff --git a/110/replication_package/replication/ado/plus/j/join.ado b/110/replication_package/replication/ado/plus/j/join.ado new file mode 100644 index 0000000000000000000000000000000000000000..e586d96e6f01f35a2091d3d484d40e8b5610b679 --- /dev/null +++ b/110/replication_package/replication/ado/plus/j/join.ado @@ -0,0 +1,669 @@ +*! version 2.48.0 29mar2021 +program define join + +// Parse -------------------------------------------------------------------- + + syntax /// + [anything] /// Variables that will be added (default is _all unless keepnone is used) + , /// + [from(string asis) into(string asis)] /// -using- dataset + [by(string)] /// Primary and foreign keys + [KEEP(string)] /// 1 master 2 using 3 match + [ASSERT(string)] /// 1 master 2 using 3 match + [GENerate(name) NOGENerate] /// _merge variable + [UNIQuemaster] /// Assert that -by- is an id in the master dataset + [noLabel] /// + [KEEPNone] /// + [noNOTEs] /// + [noREPort] /// + [Verbose] /// + [METHOD(string)] // empty, or hash0, hash1, etc. + + * Parse details of using dataset + _assert (`"`from'"' != "") + (`"`into'"' != "") == 1, /// + msg("specify either from() or into()") + ParseUsing `from'`into' // Return -filename- and -if- + + * Parse _merge indicator + _assert ("`generate'" != "") + ("`nogenerate'" != "") < 2, /// + msg("generate() and nogenerate are mutually exclusive") + if ("`nogenerate'" == "") { + if ("`generate'" == "") loc generate _merge + confirm new variable `generate' + } + else { + tempvar generate + } + + * Parse booleans + loc is_from = (`"`from'"' != "") + loc uniquemaster = ("`uniquemaster'" != "") + loc label = ("`label'" == "") + loc notes = ("`notes'" == "") + loc report = ("`report'" == "") + loc verbose = ("`verbose'" != "") + + * Parse keep() and assert() requirements + ParseMerge, keep(`keep') assert(`assert') + /* Return locals + keep_using: 1 if we will keep using-only obs + assert_not_using: 1 to check that there are no using-only obs. + keep_nums: {1, 3, 1 3} depending on whether we keep master/match + assert_nums: as above but to assert only these exist (besides using) + keep_words assert_words: as above but with words instead of nums + */ + + * Parse -key- variables + ParseBy `is_from' `by' /// Return -master_keys- and -using_keys- + + +// Load using dataset ------------------------------------------------------- + + * Load -using- dataset + if (`is_from') { + preserve + if (substr("`filename'", -8, 8) == ".parquet") { + if ("`anything'" != "" | "`keepnone'" != "") { + loc vars "`using_keys' `anything' using" + } + cap noi parquet use `vars' "`filename'", clear + if (_rc != 0) { + di as err "Parquet reading failed" + di as err "Try reading parquet file directly to see full error:" + di as err " parquet use `vars' `filename'" + exit _rc + } + } + else { + use "`filename'", clear + unab using_keys : `using_keys' // continuation of ParseBy + } + if (`"`if'"' != "") qui keep `if' + + loc cmd restore + } + else { + loc cmd `"qui use `if' using "`filename'", clear"' + } + + if ("`anything'" != "" | "`keepnone'" != "") { + keep `using_keys' `anything' + } + else { + qui ds `using_keys', not + loc anything `r(varlist)' + } + unab anything : `anything', min(0) + + +// Join --------------------------------------------------------------------- + + mata: join("`using_keys'", "`master_keys'", "`anything'", /// + `"`cmd'"', "`generate'", `uniquemaster', /// + `keep_using', `assert_not_using', /// + `label', `notes', /// + `verbose', "`method'") + + +// Apply requirements on _merge variable ------------------------------------ + + cap la def _merge /// + 1 "master only (1)" 2 "using only (2)" 3 "matched (3)" /// Used + 4 "missing updated (4)" 5 "nonmissing conflict (5)" // Unused + la val `generate' _merge + + loc msg "merge: after merge, not all observations from <`assert_words'>" + if ("`assert_nums'" == "") _assert !inlist(`generate', 1, 3), msg("`msg'") + if ("`assert_nums'" == "1") _assert !inlist(`generate', 3), msg("`msg'") + if ("`assert_nums'" == "3") _assert !inlist(`generate', 1), msg("`msg'") + + if ("`keep_nums'" == "") qui drop if inlist(`generate', 1, 3) + if ("`keep_nums'" == "1") qui drop if inlist(`generate', 3) + if ("`keep_nums'" == "3") qui drop if inlist(`generate', 1) + + * Adding data should clear the sort order of the master dataset + if (`keep_using') { + ClearSortOrder + } + + if (`report') { + Table `generate' + } + + if ("`nogenerate'" != "") { + label drop _merge + } +end + + +program define ParseUsing + * SAMPLE INPUT: somefile.dta if foreign==true + gettoken filename if : 0, + c_local filename `"`filename'"' + loc if `if' // remove leading/trailing spaces + c_local if `"`if'"' +end + + +program define ParseMerge + syntax, [keep(string) assert(string)] + if ("`keep'" == "") loc keep "master match using" + if ("`assert'" == "") loc assert "master match using" + loc keep_using 0 + loc assert_not_using 1 + + loc match_valid `""3", "match", "mat", "matc", "matches", "matched""' + + foreach cat in keep assert { + loc nums + loc words + foreach word of local `cat' { + if ("`word'"=="1" | substr("`word'", 1, 3) == "mas") { + loc nums `nums' 1 + loc words `words' master + } + else if ("`word'"=="2" | substr("`word'", 1, 2) == "us") { + if ("`cat'" == "keep") loc keep_using 1 + if ("`cat'" == "assert") loc assert_not_using 0 + } + else if (inlist("`word'", `match_valid')) { + loc nums `nums' 3 + loc words `words' match + } + else { + di as error "invalid category: <`word'>" + error 117 + } + } + loc words : list sort words + loc nums : list sort nums + + if ("`cat'"=="assert" & !`assert_not_using') loc words `words' using + + c_local `cat'_words `words' + c_local `cat'_nums `nums' + } + c_local keep_using `keep_using' + c_local assert_not_using `assert_not_using' +end + + +program define ParseBy + * SAMPLE INPUT: 1 turn trunk + * SAMPLE INPUT: 0 year=time country=cou + gettoken is_from 0 : 0 // 1 if used from() , 0 if used into() + assert inlist(`is_from', 0, 1) + while ("`0'" != "") { + gettoken right 0 : 0 + gettoken left right : right, parse("=") + if ("`right'" != "") { + gettoken eqsign right : right, parse("=") + assert "`eqsign'" == "=" + } + else { + loc right `left' + } + loc master_keys `master_keys' `left' + loc using_keys `using_keys' `right' + } + * Mata functions such as st_vartype() don't play well with abbreviations + if (`is_from') unab master_keys : `master_keys' + if (!`is_from') unab using_keys : `using_keys' + c_local master_keys `master_keys' + c_local using_keys `using_keys' +end + + +program define ClearSortOrder + * Andrew Maurer's trick to clear `: sortedby' + * copied from fsort.ado + * see https://github.com/sergiocorreia/ftools/issues/32 + + loc sortvar : sortedby + if ("`sortvar'" != "") { + loc sortvar : word 1 of `sortvar' + loc sortvar_type : type `sortvar' + loc sortvar_is_str = strpos("`sortvar_type'", "str") == 1 + loc val = `sortvar'[1] + + if (`sortvar_is_str') { + qui replace `sortvar' = cond(mi(`"`val'"'), ".", "") in 1 + qui replace `sortvar' = `"`val'"' in 1 + } + else { + qui replace `sortvar' = cond(mi(`val'), 0, .) in 1 + qui replace `sortvar' = `val' in 1 + } + assert "`: sortedby'" == "" + } +end + + +program define Table + syntax varname + + * Initialize defaults + loc N 0 + forval i = 1/3 { + loc m`i' 0 + } + + if (c(N)) { + tempname freqs values + tab `varlist', nolabel nofreq matcell(`freqs') matrow(`values') + loc N = rowsof(`freqs') + loc is_temp = substr("`varlist'", 1, 2) == "__" + } + + * Fill actual values + forval i = 1/`N' { + loc j = `values'[`i', 1] + loc m`j' = `freqs'[`i', 1] + if (!`is_temp') loc v`j' "(`varlist'==`j')" + } + + * This chunk is based on merge.ado + di + di as smcl as txt _col(5) "Result" _col(38) "# of obs." + di as smcl as txt _col(5) "{hline 41}" + di as smcl as txt _col(5) "not matched" /// + _col(30) as res %16.0fc (`m1'+`m2') + if (`m1'|`m2') { + di as smcl as txt _col(9) "from master" /// + _col(30) as res %16.0fc `m1' as txt " `v1'" + di as smcl as txt _col(9) "from using" /// + _col(30) as res %16.0fc `m2' as txt " `v2'" + di + } + di as smcl as txt _col(5) "matched" /// + _col(30) as res %16.0fc `m3' as txt " `v3'" + di as smcl as txt _col(5) "{hline 41}" +end + + +findfile "ftools.mata" +include "`r(fn)'" + + +mata: +mata set matastrict on +//mata set matalnum on + +void join(`String' using_keys, + `String' master_keys, + `String' varlist, + `String' cmd, + `Varname' generate, + `Boolean' uniquemaster, + `Boolean' keep_using, + `Boolean' assert_not_using, + `Boolean' join_labels, + `Boolean' join_chars, + `Boolean' verbose, + `String' method) +{ + `Varlist' pk_names, fk_names, varformats + `Varlist' varnames_num, varnames_str, deck + `Varlist' vartypes_num, vartypes_str + `Variables' pk, fk + `Integer' N, i, val, j, k + `Factor' F + `DataFrame' data_num, reshaped_num, data_str, reshaped_str + `Vector' index, range, mask + + `Boolean' integers_only + `Boolean' has_using + `Varname' var + `String' msg + + `StringVector' varlabels, varvaluelabels, pk_varvaluelabels + `Dict' label_values, label_text + `Vector' values + `StringVector' text + `String' label + + `Integer' old_width, new_width + + `Integer' num_chars + `StringMatrix' chars + `StringVector' charnames + `String' char_name, char_val + + // Note: + // - On the -using- dataset the keys will be unique, hence why they are the PKs (primary keys) + // - On the -master- dataset we allow duplicates (unless -uniquemaster- is set), hence whey they are FKs (foreign keys) + + // Using + pk_names = tokens(using_keys) + pk = __fload_data(pk_names) + N = rows(pk) + + // Assert keys are unique IDs in using + integers_only = is_integers_only(pk_names, pk) + F = _factor(pk, integers_only, verbose, method, 0) + assert_is_id(F, using_keys, "using") + + varnames_num = varnames_str = deck = tokens(varlist) + vartypes_num = vartypes_str = J(1, cols(deck), "") + + varformats = J(1, cols(deck), "") + varlabels = J(1, cols(deck), "") + varvaluelabels = J(1, cols(deck), "") + pk_varvaluelabels = J(1, cols(pk_names), "") + label_values = asarray_create("string", 1) + label_text = asarray_create("string", 1) + text = "" + values = . + + if (join_chars) { + num_chars = rows(st_dir("char", "_dta", "*")) + } + + + for (i=1; i<=cols(deck); i++) { + var = deck[i] + + // Assert vars are not strings (could allow for it, but not useful) + if (st_isstrvar(var)) { + varnames_num[i] = "" + vartypes_str[i] = st_vartype(var) + } + else { + varnames_str[i] = "" + vartypes_num[i] = st_vartype(var) + } + + + // Add variable labels, value labels, and assignments + varformats[i] = st_varformat(var) + varlabels[i] = st_varlabel(var) + varvaluelabels[i] = label = st_varvaluelabel(var) + if (join_labels) { + if (label != "" ? st_vlexists(label) : 0) { + st_vlload(label, values, text) + asarray(label_values, label, values) + asarray(label_text, label, text) + } + } + + if (join_chars) { + num_chars = num_chars + rows(st_dir("char", var, "*")) + } + } + + // Save value labels from the by() variables + if (join_labels) { + for (i=1; i<=cols(pk_names); i++) { + var = pk_names[i] + label = st_varvaluelabel(var) + if (label != "" ? st_vlexists(label) : 0) { + pk_varvaluelabels[i] = label + st_vlload(label, values, text) + asarray(label_values, label, values) + asarray(label_text, label, text) + } + } + } + + // Save chars + // Note: we are NOT saving chars from the by() variables + if (join_chars) { + chars = J(num_chars, 3, "") + j = 0 + for (k=0; k<=cols(deck); k++) { + var = k ? deck[k] : "_dta" + charnames = st_dir("char", var, "*") + for (i=1 ; i<=rows(charnames); i++) { + ++j + chars[j, 1] = var + chars[j, 2] = charnames[i] + chars[j, 3] = st_global(sprintf("%s[%s]", var, charnames[i])) + } + } + } + + varnames_num = tokens(invtokens(varnames_num)) + varnames_str = tokens(invtokens(varnames_str)) + vartypes_num = tokens(invtokens(vartypes_num)) + vartypes_str = tokens(invtokens(vartypes_str)) + + if (cols(varnames_num) > 0) { + data_num = st_data(., varnames_num) , J(st_nobs(), 1, 3) // _merge==3 + } + else { + data_num = J(st_nobs(), 1, 3) // _merge==3 + } + + if (cols(varnames_str) > 0) { + data_str = st_sdata(., varnames_str) + } + + // Master + stata(cmd) // load (either -restore- or -use-) + if (cmd != "restore") { + stata("unab master_keys : " + master_keys) // continuation of ParseBy + master_keys = st_local("master_keys") + } + + // Check that variables don't exist yet + msg = "{err}merge: variable %s already exists in master dataset\n" + for (i=1; i<=cols(deck); i++) { + var = deck[i] + if (_st_varindex(var) != .) { + printf(msg, var) + exit(108) + } + } + if (verbose) printf("{txt}variables added: {res}%s{txt}\n", invtokens(deck)) + + fk_names = tokens(master_keys) + fk = __fload_data(fk_names) + if (integers_only) { + integers_only = is_integers_only(fk_names, fk) + } + + if (verbose) { + printf("{txt}(integers only? {res}%s{txt})\n", integers_only ? "true" : "false") + } + F = _factor(pk \ fk, integers_only, verbose, method, 0) + + // Fill -reshaped_num- matrix with data from -using- + // 1. Start with the matrix full of MVs, for levels that appear only in -master- (_merge==1) + reshaped_num = J(F.num_levels, cols(data_num)-1, .) , J(F.num_levels, 1, 1) // _merge==1 + // 2. Get the levels that also appear in -using- + index = F.levels[| 1 \ N |] // Note that F.levels is unique in 1..N only because the keys are unique in -using- + // 3. Populate the rows that are also in -using- with the data from using + reshaped_num[index, .] = data_num + if (cols(varnames_str) > 0) { + reshaped_str = J(F.num_levels, cols(data_str), "") + reshaped_str[index, .] = data_str + } + // 4. Rearrange and optionally expand the matrix to conform to the -master- dataset + index = F.levels[| N+1 \ . |] + reshaped_num = reshaped_num[index , .] + if (cols(varnames_str) > 0) { + reshaped_str = reshaped_str[index , .] + } + + index = . // conserve memory + assert(st_nobs() == rows(reshaped_num)) + vartypes_num = vartypes_num, "byte" + varnames_num = varnames_num, generate + val = setbreakintr(0) + + st_store(., st_addvar(vartypes_num, varnames_num, 1), reshaped_num) + if (cols(varnames_str) > 0) { + st_sstore(., st_addvar(vartypes_str, varnames_str, 1), reshaped_str) + } + + reshaped_num = reshaped_str = . // conserve memory + (void) setbreakintr(val) + + // Add labels of new variables + msg = "{err}(warning: value label %s already exists; values overwritten)" + for (i=1; i<=cols(deck); i++) { + var = deck[i] + + // label variable + if (varlabels[i] != "") { + st_varlabel(var, varlabels[i]) + } + + st_varformat(var, varformats[i]) + + label = varvaluelabels[i] + + if (label != "") { + // label values