From d3fab6584cb8cd52924759fe727684512317b44d Mon Sep 17 00:00:00 2001 From: ayushpatnaikgit Date: Wed, 27 Apr 2022 19:39:58 +0530 Subject: [PATCH 1/9] Adding doctests after using seed. Work in progress. --- docs/src/examples.md | 1180 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 968 insertions(+), 212 deletions(-) diff --git a/docs/src/examples.md b/docs/src/examples.md index 0b8a617..a8fe829 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -1,334 +1,1090 @@ ## Example 1: Linear Regression -```@repl examples -using RDatasets, NLSolversBase, CRRao, Logging -Logging.disable_logging(Logging.Warn); CRRao.setprogress!(false) -``` - -```@repl examples -df = dataset("datasets", "mtcars") -m1_1 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression()) - -m1_1.fit -m1_1.sigma -m1_1.LogLike -m1_1.AIC -m1_1.BIC -m1_1.R_sqr -m1_1.Adjusted_R_sqr - -m1_1.fittedResponse -m1_1.residuals -m1_1.Cooks_distance +```jldoctest examples +julia> using RDatasets, NLSolversBase, CRRao, Logging; + +julia> Logging.disable_logging(Logging.Warn); CRRao.setprogress!(false); + +julia> CRRao_seed(123); + +julia> df = dataset("datasets", "mtcars") +32×12 DataFrame + Row │ Model MPG Cyl Disp HP DRat WT QS ⋯ + │ String31 Float64 Int64 Float64 Int64 Float64 Float64 Fl ⋯ +─────┼────────────────────────────────────────────────────────────────────────── + 1 │ Mazda RX4 21.0 6 160.0 110 3.9 2.62 ⋯ + 2 │ Mazda RX4 Wag 21.0 6 160.0 110 3.9 2.875 + 3 │ Datsun 710 22.8 4 108.0 93 3.85 2.32 + 4 │ Hornet 4 Drive 21.4 6 258.0 110 3.08 3.215 + 5 │ Hornet Sportabout 18.7 8 360.0 175 3.15 3.44 ⋯ + 6 │ Valiant 18.1 6 225.0 105 2.76 3.46 + 7 │ Duster 360 14.3 8 360.0 245 3.21 3.57 + 8 │ Merc 240D 24.4 4 146.7 62 3.69 3.19 + ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋱ + 26 │ Fiat X1-9 27.3 4 79.0 66 4.08 1.935 ⋯ + 27 │ Porsche 914-2 26.0 4 120.3 91 4.43 2.14 + 28 │ Lotus Europa 30.4 4 95.1 113 3.77 1.513 + 29 │ Ford Pantera L 15.8 8 351.0 264 4.22 3.17 + 30 │ Ferrari Dino 19.7 6 145.0 175 3.62 2.77 ⋯ + 31 │ Maserati Bora 15.0 8 301.0 335 3.54 3.57 + 32 │ Volvo 142E 21.4 4 121.0 109 4.11 2.78 + 5 columns and 17 rows omitted + +julia> m1_1 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression()); + +julia> m1_1.fit +──────────────────────────────────────────────────────────────────────────── + Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95% +──────────────────────────────────────────────────────────────────────────── +(Intercept) 32.0137 4.63226 6.91 <1e-06 22.5249 41.5024 +HP -0.0367861 0.00989146 -3.72 0.0009 -0.0570478 -0.0165243 +WT -3.19781 0.846546 -3.78 0.0008 -4.93188 -1.46374 +Gear 1.01998 0.851408 1.20 0.2410 -0.72405 2.76401 +──────────────────────────────────────────────────────────────────────────── + +julia> m1_1.sigma +2.5741691724978977 + +julia> m1_1.LogLike +-73.52638935960971 + +julia> m1_1.AIC +157.05277871921942 + +julia> m1_1.BIC +164.38145823321804 + +julia> m1_1.R_sqr +0.8352309600685555 + +julia> m1_1.Adjusted_R_sqr +0.8175771343616149 + +julia> m1_1.residuals +32-element Vector{Float64}: + -2.6688499523387037 + -1.8534082432063457 + -2.453556140740865 + 0.6538282376886713 + 1.06442945616989 + -2.046638453886395 + -0.3448310401665502 + 0.7881712764862279 + 0.27419879500614286 + -1.3684264750049024 + ⋮ + -0.481422171673465 + 2.8595427589095195 + -0.17793682112103681 + -0.9227150395749106 + 2.281550994801261 + -1.4649819082486246 + -2.1180653993796668 + 1.6259525228014091 + -1.7939863113843444 + +julia> m1_1.Cooks_distance +32-element Vector{Float64}: + 0.013342034282302684 + 0.00688728266731234 + 0.015495847517058797 + 0.0014309089637600369 + 0.004471979213924145 + 0.014588985833724696 + 0.0015401004198812064 + 0.005826402580870707 + 0.0003074315682457445 + 0.007011803724485943 + ⋮ + 0.0020768256096929424 + 0.02203970419212919 + 0.0001378106083284689 + 0.006862929526075293 + 0.047038899451778936 + 0.0381204513180911 + 0.03540469459036285 + 0.13715341355042346 + 0.006145660329519638 ``` **Linear Regression - Ridge Prior** -```@repl examples -m1_2 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Ridge()) -m1_2.summaries -m1_2.quantiles +```jldoctest examples +julia> m1_2 = @fitmodel((MPG ~ HP + WT+Gear), df, LinearRegression(), Prior_Ridge()); + +julia> m1_2.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 6.7959 3.8426 0.0384 0.0656 3504.6565 1.0000 ⋯ + σ 2.6845 0.3933 0.0039 0.0066 3519.8321 0.9999 ⋯ + α 28.3443 5.3753 0.0538 0.0993 2412.5004 0.9999 ⋯ + β[1] -0.0400 0.0105 0.0001 0.0002 3626.7198 0.9999 ⋯ + β[2] -2.6492 0.9481 0.0095 0.0176 2590.8328 0.9999 ⋯ + β[3] 1.6538 0.9761 0.0098 0.0175 2629.2168 0.9999 ⋯ + 1 column omitted + +julia> m1_2.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 2.4776 4.3942 5.9531 8.0658 16.4623 + σ 2.0531 2.4095 2.6403 2.9047 3.5856 + α 16.9646 25.0338 28.5895 31.8601 38.3976 + β[1] -0.0608 -0.0470 -0.0399 -0.0330 -0.0195 + β[2] -4.4404 -3.2875 -2.6778 -2.0376 -0.6931 + β[3] -0.1610 1.0006 1.6097 2.2526 3.7245 ``` **Linear Regression - Laplace Prior** -```@repl examples -m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()) -m1_3.summaries -m1_3.quantiles +```jldoctest examples +julia> m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()); + +julia> m1_3.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 4.3681 3.5844 0.0358 0.0509 3935.6187 0.9999 ⋯ + σ 2.6666 0.3809 0.0038 0.0059 4034.9336 1.0000 ⋯ + α 29.1630 5.1674 0.0517 0.0805 3118.7281 1.0000 ⋯ + β[1] -0.0398 0.0105 0.0001 0.0002 4170.8923 0.9999 ⋯ + β[2] -2.7284 0.9316 0.0093 0.0151 3161.8637 1.0000 ⋯ + β[3] 1.4945 0.9379 0.0094 0.0145 3299.8195 0.9999 ⋯ + 1 column omitted + +julia> m1_3.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 1.2592 2.4031 3.4896 5.2107 12.5745 + σ 2.0453 2.3982 2.6273 2.8867 3.5336 + α 18.7995 25.8276 29.3558 32.6159 38.8971 + β[1] -0.0605 -0.0468 -0.0398 -0.0328 -0.0190 + β[2] -4.5024 -3.3491 -2.7417 -2.1278 -0.8679 + β[3] -0.2803 0.8657 1.4662 2.1077 3.3809 + ``` **Linear Regression - Cauchy Prior** -```@repl examples -m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000) -m1_4.summaries -m1_4.quantiles +```jldoctest examples +julia> m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000); + +julia> m1_4.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + σ 2.5858 0.3486 0.0025 0.0039 9681.3301 1.0001 ⋯ + α 30.3606 4.6081 0.0326 0.0601 5086.7259 1.0000 ⋯ + β[1] -0.0395 0.0099 0.0001 0.0001 6944.3903 1.0000 ⋯ + β[2] -2.8396 0.8538 0.0060 0.0109 5127.7097 1.0000 ⋯ + β[3] 1.2596 0.8380 0.0059 0.0107 5509.6327 1.0000 ⋯ + 1 column omitted + +julia> m1_4.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + σ 2.0142 2.3370 2.5468 2.7920 3.3841 + α 21.0188 27.3578 30.4396 33.4542 39.2363 + β[1] -0.0589 -0.0460 -0.0394 -0.0329 -0.0200 + β[2] -4.5079 -3.4098 -2.8391 -2.2857 -1.1349 + β[3] -0.3206 0.7001 1.2473 1.7952 2.9440 + ``` **Linear Regression - T-Distributed Prior** -```@repl examples -m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()) -m1_5.summaries -m1_5.quantiles -``` +```jldoctest examples +julia> m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); + +julia> m1_5.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + ν 1.0435 0.5361 0.0054 0.0067 6299.0784 0.9999 ⋯ + σ 2.6215 0.3595 0.0036 0.0056 4014.6383 0.9999 ⋯ + α 30.2491 4.6744 0.0467 0.0714 3829.7770 0.9999 ⋯ + β[1] -0.0395 0.0101 0.0001 0.0001 4682.2786 1.0000 ⋯ + β[2] -2.8195 0.8609 0.0086 0.0138 3756.4596 0.9999 ⋯ + β[3] 1.2744 0.8481 0.0085 0.0126 4089.8676 0.9999 ⋯ + 1 column omitted + +julia> m1_5.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + ν 0.3843 0.6722 0.9193 1.2795 2.4194 + σ 2.0277 2.3677 2.5766 2.8377 3.4500 + α 20.8821 27.1538 30.2773 33.4444 39.4333 + β[1] -0.0595 -0.0460 -0.0395 -0.0328 -0.0198 + β[2] -4.4847 -3.3954 -2.8202 -2.2444 -1.0906 + β[3] -0.3638 0.7009 1.2560 1.8346 2.9985 - **Linear Regression - Uniform Prior** -```@repl examples -m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()) -m1_6.summaries -m1_6.quantiles ``` -## Example 2: Logistic Regression -```@repl examples -turnout = dataset("Zelig", "turnout") - -m2_1 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Logit()) -m2_1.fit -m2_1.modelClass -m2_1.LogLike -m2_1.AIC -m2_1.BIC - - -m2_2 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Probit()) -m2_2.fit -m2_2.BIC - + **Linear Regression - Uniform Prior** +```jldoctest examples +julia> m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); + +julia> m1_6.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + ν 1.0435 0.5361 0.0054 0.0067 6299.0784 0.9999 ⋯ + σ 2.6215 0.3595 0.0036 0.0056 4014.6383 0.9999 ⋯ + α 30.2491 4.6744 0.0467 0.0714 3829.7770 0.9999 ⋯ + β[1] -0.0395 0.0101 0.0001 0.0001 4682.2786 1.0000 ⋯ + β[2] -2.8195 0.8609 0.0086 0.0138 3756.4596 0.9999 ⋯ + β[3] 1.2744 0.8481 0.0085 0.0126 4089.8676 0.9999 ⋯ + 1 column omitted + +julia> m1_6.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + ν 0.3843 0.6722 0.9193 1.2795 2.4194 + σ 2.0277 2.3677 2.5766 2.8377 3.4500 + α 20.8821 27.1538 30.2773 33.4444 39.4333 + β[1] -0.0595 -0.0460 -0.0395 -0.0328 -0.0198 + β[2] -4.4847 -3.3954 -2.8202 -2.2444 -1.0906 + β[3] -0.3638 0.7009 1.2560 1.8346 2.9985 -m2_3 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Cloglog()) -m2_3.fit -m2_3.BIC +``` -m2_4 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Cauchit()) -m2_4.fit -m2_4.BIC +## Example 2: Logistic Regression +```jldoctest examples +julia> turnout = dataset("Zelig", "turnout") +2000×5 DataFrame + Row │ Race Age Educate Income Vote + │ Cat… Int32 Float64 Float64 Int32 +──────┼─────────────────────────────────────── + 1 │ white 60 14.0 3.3458 1 + 2 │ white 51 10.0 1.8561 0 + 3 │ white 24 12.0 0.6304 0 + 4 │ white 38 8.0 3.4183 1 + 5 │ white 25 12.0 2.7852 1 + 6 │ white 67 12.0 2.3866 1 + 7 │ white 40 12.0 4.2857 0 + 8 │ white 56 10.0 9.3205 1 + ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ + 1994 │ white 58 12.0 0.1936 0 + 1995 │ white 22 7.0 0.2364 0 + 1996 │ white 26 16.0 3.3834 0 + 1997 │ white 34 12.0 2.917 1 + 1998 │ white 51 16.0 7.8949 1 + 1999 │ white 22 10.0 2.4811 0 + 2000 │ white 59 10.0 0.5523 0 + 1985 rows omitted + +julia> m2_1 = @fitmodel((Vote ~ Age + Race +Income + Educate) + ,turnout,LogisticRegression(),Logit()); + +julia> m2_1.fit +──────────────────────────────────────────────────────────────────────────── + Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% +──────────────────────────────────────────────────────────────────────────── +(Intercept) -3.03426 0.325927 -9.31 <1e-19 -3.67307 -2.39546 +Age 0.0283543 0.00346034 8.19 <1e-15 0.0215722 0.0351365 +Race: white 0.250798 0.146457 1.71 0.0868 -0.0362521 0.537847 +Income 0.177112 0.0271516 6.52 <1e-10 0.123896 0.230328 +Educate 0.175634 0.0203308 8.64 <1e-17 0.135786 0.215481 +──────────────────────────────────────────────────────────────────────────── + +julia> m2_1.modelClass +"LogisticReg" + +julia> m2_1.LogLike +-1011.9906318515575 + +julia> m2_1.AIC +2033.981263703115 + +julia> m2_1.BIC +2061.9857760008254 + +julia> m2_2 = @fitmodel((Vote ~ Age + Race +Income + Educate) + ,turnout,LogisticRegression(),Probit()); + +julia> m2_2.fit +──────────────────────────────────────────────────────────────────────────── + Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% +──────────────────────────────────────────────────────────────────────────── +(Intercept) -1.76141 0.188556 -9.34 <1e-20 -2.13097 -1.39185 +Age 0.0164973 0.00199897 8.25 <1e-15 0.0125794 0.0204152 +Race: white 0.162856 0.0876885 1.86 0.0633 -0.0090108 0.334722 +Income 0.0963117 0.0149675 6.43 <1e-09 0.066976 0.125647 +Educate 0.10417 0.0116713 8.93 <1e-18 0.0812949 0.127046 +──────────────────────────────────────────────────────────────────────────── + +julia> m2_2.BIC +2062.201026236795 + + +julia> m2_3 = @fitmodel((Vote ~ Age + Race +Income + Educate) + ,turnout,LogisticRegression(),Cloglog()); + +julia> m2_3.fit +───────────────────────────────────────────────────────────────────────────── + Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% +───────────────────────────────────────────────────────────────────────────── +(Intercept) -1.94617 0.184123 -10.57 <1e-25 -2.30704 -1.58529 +Age 0.0147857 0.00184088 8.03 <1e-15 0.0111776 0.0183937 +Race: white 0.185139 0.087101 2.13 0.0335 0.014424 0.355854 +Income 0.0768268 0.0126411 6.08 <1e-08 0.0520506 0.101603 +Educate 0.0983976 0.0108857 9.04 <1e-18 0.077062 0.119733 +───────────────────────────────────────────────────────────────────────────── + +julia> m2_3.BIC +2064.694633749211 + + +julia> m2_4 = @fitmodel((Vote ~ Age + Race +Income + Educate) + ,turnout,LogisticRegression(),Cauchit()); + +julia> m2_4.fit +──────────────────────────────────────────────────────────────────────────── + Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% +──────────────────────────────────────────────────────────────────────────── +(Intercept) -3.16889 0.384429 -8.24 <1e-15 -3.92235 -2.41542 +Age 0.0304105 0.00413473 7.35 <1e-12 0.0223066 0.0385144 +Race: white 0.181839 0.144766 1.26 0.2091 -0.101898 0.465576 +Income 0.235267 0.038152 6.17 <1e-09 0.16049 0.310043 +Educate 0.169276 0.0240098 7.05 <1e-11 0.122217 0.216334 +──────────────────────────────────────────────────────────────────────────── + +julia> m2_4.BIC +2078.9464617505087 ``` **Logistic Regression - with Ridge Prior** -```@repl examples -m2_5 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Ridge()) -m2_5.summaries -m2_5.quantiles - - -m2_6 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Ridge(),1.0) - -m2_6.summaries -m2_6.quantiles - - -m2_7 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Ridge(),1.0) - -m2_7.summaries -m2_7.quantiles - - -m2_8 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Ridge(),1.0) - -m2_8.summaries -m2_8.quantiles +```jldoctest examples +julia> m2_5 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Logit(),Prior_Ridge()); + +julia> m2_5.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 1.5372 0.6825 0.0068 0.0090 4699.5179 0.9999 ⋯ + β[1] -2.8659 0.3285 0.0033 0.0044 4940.2410 1.0002 ⋯ + β[2] 0.0271 0.0035 0.0000 0.0000 6757.3608 0.9999 ⋯ + β[3] 0.2293 0.1449 0.0014 0.0018 6894.9720 1.0001 ⋯ + β[4] 0.1774 0.0272 0.0003 0.0004 7279.5100 1.0002 ⋯ + β[5] 0.1678 0.0205 0.0002 0.0003 5484.0619 1.0002 ⋯ + 1 column omitted + +julia> m2_5.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.7565 1.0976 1.3837 1.7861 3.2992 + β[1] -3.5009 -3.0890 -2.8642 -2.6411 -2.2094 + β[2] 0.0203 0.0247 0.0271 0.0295 0.0340 + β[3] -0.0578 0.1331 0.2302 0.3272 0.5135 + β[4] 0.1253 0.1587 0.1772 0.1957 0.2314 + β[5] 0.1276 0.1541 0.1679 0.1820 0.2077 + + +julia> m2_6 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Probit(),Prior_Ridge(),1.0); + +julia> m2_6.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.8974 0.3516 0.0035 0.0055 4766.0718 1.0004 ⋯ + β[1] -1.6640 0.1915 0.0019 0.0030 4483.8131 1.0003 ⋯ + β[2] 0.0158 0.0020 0.0000 0.0000 7100.6926 1.0002 ⋯ + β[3] 0.1501 0.0870 0.0009 0.0011 6954.9861 0.9999 ⋯ + β[4] 0.0964 0.0143 0.0001 0.0002 8092.4537 0.9999 ⋯ + β[5] 0.0997 0.0117 0.0001 0.0002 5151.0238 1.0002 ⋯ + 1 column omitted + +julia> m2_6.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.4628 0.6641 0.8191 1.0405 1.7562 + β[1] -2.0392 -1.7949 -1.6625 -1.5332 -1.2890 + β[2] 0.0118 0.0144 0.0158 0.0171 0.0197 + β[3] -0.0224 0.0905 0.1507 0.2100 0.3175 + β[4] 0.0680 0.0868 0.0964 0.1059 0.1244 + β[5] 0.0764 0.0918 0.0996 0.1075 0.1228 + + +julia> m2_7 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cloglog(),Prior_Ridge(),1.0); + + +julia> m2_7.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.9863 0.3754 0.0038 0.0047 5487.6972 0.9999 ⋯ + β[1] -1.8649 0.1860 0.0019 0.0025 5954.0953 1.0001 ⋯ + β[2] 0.0142 0.0019 0.0000 0.0000 9071.5557 0.9999 ⋯ + β[3] 0.1730 0.0851 0.0009 0.0010 8023.9162 1.0002 ⋯ + β[4] 0.0769 0.0123 0.0001 0.0001 7536.1264 0.9999 ⋯ + β[5] 0.0947 0.0112 0.0001 0.0001 6251.6050 0.9999 ⋯ + 1 column omitted + +julia> m2_7.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.5239 0.7336 0.9009 1.1478 1.9601 + β[1] -2.2303 -1.9903 -1.8638 -1.7379 -1.5141 + β[2] 0.0107 0.0129 0.0142 0.0155 0.0179 + β[3] 0.0074 0.1144 0.1727 0.2305 0.3396 + β[4] 0.0526 0.0687 0.0768 0.0854 0.1007 + β[5] 0.0733 0.0871 0.0946 0.1024 0.1170 + +julia> m2_8 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cauchit(),Prior_Ridge(),1.0); + + +julia> m2_8.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 1.5233 0.6058 0.0061 0.0090 5251.4092 1.0000 ⋯ + β[1] -2.9883 0.3942 0.0039 0.0060 4750.7406 1.0001 ⋯ + β[2] 0.0289 0.0044 0.0000 0.0001 5952.1391 1.0002 ⋯ + β[3] 0.1589 0.1528 0.0015 0.0018 5618.7281 0.9999 ⋯ + β[4] 0.2395 0.0399 0.0004 0.0005 6082.9861 1.0005 ⋯ + β[5] 0.1604 0.0241 0.0002 0.0004 5048.3073 1.0003 ⋯ + 1 column omitted + +julia> m2_8.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.7718 1.1194 1.3885 1.7735 3.1323 + β[1] -3.8019 -3.2411 -2.9768 -2.7227 -2.2400 + β[2] 0.0208 0.0259 0.0287 0.0317 0.0379 + β[3] -0.1462 0.0572 0.1599 0.2635 0.4511 + β[4] 0.1639 0.2123 0.2385 0.2653 0.3206 + β[5] 0.1152 0.1440 0.1596 0.1762 0.2097 ``` **Logistic Regression - with Laplace Prior** -```@repl examples -m2_9 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Laplace()) - -m2_9.summaries -m2_9.quantiles - -m2_10 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Laplace()) - -m2_10.summaries -m2_10.quantiles - - -m2_11 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Laplace(),1.0) - -m2_11.summaries -m2_11.quantiles - - -m2_12 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Laplace(),1.0) - -m2_12.summaries -m2_12.quantiles +```jldoctest examples +julia> m2_9 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Logit(),Prior_Laplace()); + + +julia> m2_9.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.8658 0.4932 0.0049 0.0069 4709.3634 1.0001 ⋯ + β[1] -2.8704 0.3336 0.0033 0.0048 5134.2137 1.0000 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 6552.4899 0.9999 ⋯ + β[3] 0.2106 0.1463 0.0015 0.0018 6941.0532 0.9999 ⋯ + β[4] 0.1776 0.0271 0.0003 0.0003 6854.0396 0.9999 ⋯ + β[5] 0.1687 0.0207 0.0002 0.0003 5680.7532 1.0000 ⋯ + 1 column omitted + +julia> m2_9.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.3334 0.5500 0.7447 1.0356 2.1764 + β[1] -3.5208 -3.0940 -2.8733 -2.6430 -2.2124 + β[2] 0.0205 0.0249 0.0273 0.0296 0.0341 + β[3] -0.0654 0.1072 0.2088 0.3097 0.5061 + β[4] 0.1263 0.1590 0.1770 0.1952 0.2320 + β[5] 0.1275 0.1549 0.1686 0.1828 0.2097 + +julia> m2_10 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Probit(),Prior_Laplace()); + + +julia> m2_10.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.8658 0.4932 0.0049 0.0069 4709.3634 1.0001 ⋯ + β[1] -2.8704 0.3336 0.0033 0.0048 5134.2137 1.0000 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 6552.4899 0.9999 ⋯ + β[3] 0.2106 0.1463 0.0015 0.0018 6941.0532 0.9999 ⋯ + β[4] 0.1776 0.0271 0.0003 0.0003 6854.0396 0.9999 ⋯ + β[5] 0.1687 0.0207 0.0002 0.0003 5680.7532 1.0000 ⋯ + 1 column omitted + +julia> m2_10.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.3334 0.5500 0.7447 1.0356 2.1764 + β[1] -3.5208 -3.0940 -2.8733 -2.6430 -2.2124 + β[2] 0.0205 0.0249 0.0273 0.0296 0.0341 + β[3] -0.0654 0.1072 0.2088 0.3097 0.5061 + β[4] 0.1263 0.1590 0.1770 0.1952 0.2320 + β[5] 0.1275 0.1549 0.1686 0.1828 0.2097 + + +julia> m2_11 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cloglog(),Prior_Laplace(),1.0); + +julia> m2_11.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.9026 0.4653 0.0047 0.0064 4863.7878 1.0000 ⋯ + β[1] -2.8813 0.3311 0.0033 0.0045 5456.6584 1.0005 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 7030.4545 1.0000 ⋯ + β[3] 0.2133 0.1413 0.0014 0.0016 8023.5642 1.0002 ⋯ + β[4] 0.1771 0.0267 0.0003 0.0003 8059.5351 1.0000 ⋯ + β[5] 0.1695 0.0203 0.0002 0.0003 6003.4945 1.0003 ⋯ + 1 column omitted + +julia> m2_11.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.3833 0.6002 0.7948 1.0682 2.1101 + β[1] -3.5374 -3.1016 -2.8841 -2.6589 -2.2367 + β[2] 0.0207 0.0249 0.0273 0.0297 0.0342 + β[3] -0.0606 0.1179 0.2129 0.3079 0.4917 + β[4] 0.1256 0.1591 0.1771 0.1948 0.2294 + β[5] 0.1296 0.1558 0.1695 0.1833 0.2092 + + +julia> m2_12 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cauchit(),Prior_Laplace(),1.0); + + +julia> m2_12.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.9026 0.4653 0.0047 0.0064 4863.7878 1.0000 ⋯ + β[1] -2.8813 0.3311 0.0033 0.0045 5456.6584 1.0005 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 7030.4545 1.0000 ⋯ + β[3] 0.2133 0.1413 0.0014 0.0016 8023.5642 1.0002 ⋯ + β[4] 0.1771 0.0267 0.0003 0.0003 8059.5351 1.0000 ⋯ + β[5] 0.1695 0.0203 0.0002 0.0003 6003.4945 1.0003 ⋯ + 1 column omitted + +julia> m2_12.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.3833 0.6002 0.7948 1.0682 2.1101 + β[1] -3.5374 -3.1016 -2.8841 -2.6589 -2.2367 + β[2] 0.0207 0.0249 0.0273 0.0297 0.0342 + β[3] -0.0606 0.1179 0.2129 0.3079 0.4917 + β[4] 0.1256 0.1591 0.1771 0.1948 0.2294 + β[5] 0.1296 0.1558 0.1695 0.1833 0.2092 ``` **Logistic Regression - with Cauchy Prior** -```@repl examples -m2_13 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Cauchy(),1.0) - -m2_13.summaries -m2_13.quantiles - - -m2_14 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Cauchy(),2.0,30000) -m2_14.summaries -m2_14.quantiles +```jldoctest examples +julia> m2_13 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Logit(),Prior_Cauchy(),1.0); + + +julia> m2_13.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.3100 0.2478 0.0025 0.0034 5605.2824 1.0013 ⋯ + β[1] -2.9090 0.3257 0.0033 0.0049 5376.8054 1.0008 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7004.5181 1.0006 ⋯ + β[3] 0.1768 0.1384 0.0014 0.0018 5821.5948 1.0000 ⋯ + β[4] 0.1770 0.0273 0.0003 0.0004 7017.1793 1.0000 ⋯ + β[5] 0.1723 0.0204 0.0002 0.0003 5482.9126 1.0003 ⋯ + 1 column omitted + +julia> m2_13.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.0650 0.1570 0.2446 0.3821 0.9595 + β[1] -3.5631 -3.1231 -2.9068 -2.6885 -2.2846 + β[2] 0.0212 0.0255 0.0278 0.0302 0.0347 + β[3] -0.0695 0.0765 0.1684 0.2695 0.4671 + β[4] 0.1240 0.1585 0.1770 0.1952 0.2313 + β[5] 0.1328 0.1585 0.1720 0.1861 0.2131 + + +julia> m2_14 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Probit(),Prior_Cauchy(),2.0,30000); + + +julia> m2_14.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.3100 0.2478 0.0025 0.0034 5605.2824 1.0013 ⋯ + β[1] -2.9090 0.3257 0.0033 0.0049 5376.8054 1.0008 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7004.5181 1.0006 ⋯ + β[3] 0.1768 0.1384 0.0014 0.0018 5821.5948 1.0000 ⋯ + β[4] 0.1770 0.0273 0.0003 0.0004 7017.1793 1.0000 ⋯ + β[5] 0.1723 0.0204 0.0002 0.0003 5482.9126 1.0003 ⋯ + 1 column omitted + +julia> m2_14.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.0650 0.1570 0.2446 0.3821 0.9595 + β[1] -3.5631 -3.1231 -2.9068 -2.6885 -2.2846 + β[2] 0.0212 0.0255 0.0278 0.0302 0.0347 + β[3] -0.0695 0.0765 0.1684 0.2695 0.4671 + β[4] 0.1240 0.1585 0.1770 0.1952 0.2313 + β[5] 0.1328 0.1585 0.1720 0.1861 0.2131 -m2_15 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Cauchy(),1.0) -m2_15.summaries -m2_15.quantiles - - -m2_16 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Cauchy(),1.0) -m2_16.summaries -m2_16.quantiles +julia> m2_15 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cloglog(),Prior_Cauchy(),1.0); + +julia> m2_15.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.3100 0.2478 0.0025 0.0034 5605.2824 1.0013 ⋯ + β[1] -2.9090 0.3257 0.0033 0.0049 5376.8054 1.0008 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7004.5181 1.0006 ⋯ + β[3] 0.1768 0.1384 0.0014 0.0018 5821.5948 1.0000 ⋯ + β[4] 0.1770 0.0273 0.0003 0.0004 7017.1793 1.0000 ⋯ + β[5] 0.1723 0.0204 0.0002 0.0003 5482.9126 1.0003 ⋯ + 1 column omitted + +julia> m2_15.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.0650 0.1570 0.2446 0.3821 0.9595 + β[1] -3.5631 -3.1231 -2.9068 -2.6885 -2.2846 + β[2] 0.0212 0.0255 0.0278 0.0302 0.0347 + β[3] -0.0695 0.0765 0.1684 0.2695 0.4671 + β[4] 0.1240 0.1585 0.1770 0.1952 0.2313 + β[5] 0.1328 0.1585 0.1720 0.1861 0.2131 ``` **Logistic Regression - with T-Dist Prior** -```@repl examples -m2_17 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_TDist(),1.0) -m2_17.summaries -m2_17.quantiles - -m2_18 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_TDist(),1.0) -m2_18.summaries -m2_18.quantiles - - -m2_19 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_TDist(),1.0) -m2_19.summaries -m2_19.quantiles - -m2_20 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_TDist(),1.0) -m2_20.summaries -m2_20.quantiles +```jldoctest examples +julia> m2_17 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Logit(),Prior_TDist(),1.0); + + +julia> m2_17.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ + ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ + β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ + β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ + β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ + β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + 1 column omitted + +julia> m2_17.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.1714 0.3041 0.4354 0.6468 1.5183 + ν 0.3738 0.7694 1.1794 1.8559 7.8037 + β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 + β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 + β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 + β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 + β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + +julia> m2_18 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Probit(),Prior_TDist(),1.0); + + +julia> m2_18.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ + ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ + β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ + β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ + β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ + β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + 1 column omitted + +julia> m2_18.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.1714 0.3041 0.4354 0.6468 1.5183 + ν 0.3738 0.7694 1.1794 1.8559 7.8037 + β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 + β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 + β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 + β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 + β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + +julia> m2_19 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cloglog(),Prior_TDist(),1.0); + + +julia> m2_19.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ + ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ + β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ + β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ + β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ + β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + 1 column omitted + +julia> m2_19.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.1714 0.3041 0.4354 0.6468 1.5183 + ν 0.3738 0.7694 1.1794 1.8559 7.8037 + β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 + β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 + β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 + β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 + β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + +julia> m2_20 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cauchit(),Prior_TDist(),1.0); + + +julia> m2_20.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ + ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ + β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ + β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ + β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ + β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + 1 column omitted + +julia> m2_20.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.1714 0.3041 0.4354 0.6468 1.5183 + ν 0.3738 0.7694 1.1794 1.8559 7.8037 + β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 + β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 + β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 + β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 + β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + ``` **Logistic Regression - with Uniform Prior** -```@repl examples -m2_21 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Uniform(),1.0) -m2_21.summaries -m2_21.quantiles - - -m2_22 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Uniform(),1.0) -m2_22.summaries -m2_22.quantiles - -m2_23 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Uniform(),1.0) -m2_23.summaries -m2_23.quantiles +```jldoctest examples +julia> m2_21 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Logit(),Prior_Uniform(),1.0); + + +julia> m2_21.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ + β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ + β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ + β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ + β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + 1 column omitted + +julia> m2_21.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 1.3158 2.2473 3.8181 8.1392 74.7266 + β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 + β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 + β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 + β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 + β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 + +julia> m2_22 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Probit(),Prior_Uniform(),1.0); + + + +julia> m2_22.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ + β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ + β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ + β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ + β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + 1 column omitted + +julia> m2_22.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 1.3158 2.2473 3.8181 8.1392 74.7266 + β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 + β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 + β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 + β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 + β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 + +julia> m2_23 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cloglog(),Prior_Uniform(),1.0); + + +julia> m2_23.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ + β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ + β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ + β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ + β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + 1 column omitted + +julia> m2_23.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 1.3158 2.2473 3.8181 8.1392 74.7266 + β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 + β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 + β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 + β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 + β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 + -m2_24 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Uniform(),1.0) -m2_24.summaries -m2_24.quantiles +julia> m2_24 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cauchit(),Prior_Uniform(),1.0); + + +julia> m2_24.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ + β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ + β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ + β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ + β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + 1 column omitted + +julia> m2_24.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 1.3158 2.2473 3.8181 8.1392 74.7266 + β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 + β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 + β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 + β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 + β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 ``` ## Example 3: Poisson Regression **Poisson Regression - Likelihood analysis** -```@repl examples +```jldoctest examples sanction = dataset("Zelig", "sanction") -sanction -m3_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression()) +m3_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression()); + m3_1.fit + m3_1.LogLike m3_1.AIC + m3_1.BIC + ``` **Poisson Regression with Ridge Prior** -```@repl examples +```jldoctest examples m3_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Ridge()) + m3_2.summaries + m3_2.quantiles + ``` **Poisson Regression with Laplace Prior** -```@repl examples +```jldoctest examples + +m3_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Laplace()); -m3_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Laplace()) m3_3.summaries + m3_3.quantiles + ``` **Poisson Regression with Cauchy Prior** -```@repl examples -m3_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Cauchy()) +```jldoctest examples +m3_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Cauchy()); + m3_4.summaries + m3_4.quantiles + ``` **Poisson Regression with TDist Prior** -```@repl examples -m3_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_TDist()) +```jldoctest examples +m3_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_TDist()); + m3_5.summaries + m3_5.quantiles + ``` **Poisson Regression with Uniform Prior** -```@repl examples -m3_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Uniform()) +```jldoctest examples +m3_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Uniform()); + m3_6.summaries + m3_6.quantiles -``` -## Example 4: Negative Binomial Regression -```@repl examples -sanction = dataset("Zelig", "sanction") -sanction ``` +## Example 4: Negative Binomial Regression **Negative Binomial Regression - Likelihood method** -```@repl examples -m4_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression()) +```jldoctest examples +m4_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction, NegBinomRegression()); + m4_1.fit + m4_1.AIC + m4_1.BIC + m4_1.lambda_hat ``` **NegativeBinomial Regression with Ridge Prior** -```@repl examples -m4_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Ridge()) +```jldoctest examples + +m4_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Ridge()); + m4_2.summaries + m4_2.quantiles + ``` **NegativeBinomial Regression with Laplace Prior** -```@repl examples -m4_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Laplace()) +```jldoctest examples +m4_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Laplace()); + m4_3.summaries + m4_3.quantiles + ``` **Negative Binomial Regression with Cauchy Prior** -```@repl examples -m4_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Cauchy()) +```jldoctest examples +m4_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Cauchy()); + m4_4.summaries + m4_4.quantiles + ``` **Negative Binomial Regression with TDist Prior** -```@repl examples -m4_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_TDist()) +```jldoctest examples +m4_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_TDist()); + m4_5.summaries + m4_5.quantiles + ``` **Negative Binomial Regression with Uniform Prior** -```@repl examples -m4_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Uniform(),1.0) +```jldoctest examples +m4_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Uniform(),1.0); + m4_6.summaries + m4_6.quantiles + ``` From 9842d7828b361335abdef29cdeecb9513a907abe Mon Sep 17 00:00:00 2001 From: ayushpatnaikgit Date: Fri, 29 Apr 2022 18:45:21 +0530 Subject: [PATCH 2/9] We gratefully acknowledge the JuliaLab at MIT for financial support for this project. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index a16c37f..e3c0657 100644 --- a/README.md +++ b/README.md @@ -109,3 +109,7 @@ Language | Package/Function | Mean time taken -----------|--------------------|------------------ where we emphasise that the performance of fitmodel() here is a tiny overhead on top of the implementation of the linear regression in GLM.jl. + +# Support + +We gratefully acknowledge the JuliaLab at MIT for financial support for this project. \ No newline at end of file From 84533668f90a3b9afb19d1f7cbdbb242ea8dd17c Mon Sep 17 00:00:00 2001 From: ayushpatnaikgit Date: Mon, 16 May 2022 15:35:32 +0530 Subject: [PATCH 3/9] Adding a function to set random number generator --- src/CRRao.jl | 4 ++-- src/LinearRegression.jl | 24 ++++++++++++------------ src/LogisticRegression.jl | 24 ++++++++++++------------ src/NegBinomialRegression.jl | 24 ++++++++++++------------ src/PoissonRegression.jl | 24 ++++++++++++------------ src/random_number_generator.jl | 11 +++++++++++ src/set_seed.jl | 5 ----- 7 files changed, 61 insertions(+), 55 deletions(-) create mode 100644 src/random_number_generator.jl delete mode 100644 src/set_seed.jl diff --git a/src/CRRao.jl b/src/CRRao.jl index fcee2bf..543298b 100644 --- a/src/CRRao.jl +++ b/src/CRRao.jl @@ -41,9 +41,9 @@ struct Cauchit end export LinearRegression, LogisticRegression, PoissonRegression, NegBinomRegression export Prior_Ridge, Prior_Laplace, Prior_Cauchy, Prior_TDist, Prior_Uniform -export Logit, Probit, Cloglog, Cauchit, fitmodel, @fitmodel, CRRao_seed +export Logit, Probit, Cloglog, Cauchit, fitmodel, @fitmodel -include("set_seed.jl") +include("random_number_generator.jl") include("general_stats.jl") include("LinearRegression.jl") include("LogisticRegression.jl") diff --git a/src/LinearRegression.jl b/src/LinearRegression.jl index be2e128..79dd823 100644 --- a/src/LinearRegression.jl +++ b/src/LinearRegression.jl @@ -19,7 +19,7 @@ struct analysis_lm_Gauss_NIP_Optim end function linear_reg(formula::FormulaTerm,data::DataFrame) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + modelClass = "LinearReg"; LikelihoodMod="Gauss"; PriorMod="NIP"; @@ -61,7 +61,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame) end function linear_reg_predicts(obj,newdata::DataFrame) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = obj.formula; fm_frame=ModelFrame(formula,newdata); X=modelmatrix(fm_frame); @@ -71,7 +71,7 @@ function linear_reg_predicts(obj,newdata::DataFrame) end function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @@ -91,7 +91,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge,h y ~ MvNormal(α .+ X * β, σ); end; LinReg_model=LinReg(X,y); - chain = sample(LinReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LinReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) @@ -99,7 +99,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge,h end function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Laplace,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @@ -118,7 +118,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Laplace y ~ MvNormal(α .+ X * β, σ); end; LinReg_model=LinReg(X,y); - chain = sample(LinReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LinReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) @@ -127,7 +127,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Laplace end function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauchy,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @@ -143,7 +143,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauchy, y ~ MvNormal(α .+ X * β, σ); end; LinReg_model=LinReg(X,y); - chain = sample(LinReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LinReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) @@ -152,7 +152,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauchy, end function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @@ -171,7 +171,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist,h y ~ MvNormal(α .+ X * β, σ); end; LinReg_model=LinReg(X,y); - chain = sample(LinReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LinReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) @@ -180,7 +180,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist,h end function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Uniform,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @@ -197,7 +197,7 @@ function linear_reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Uniform y ~ MvNormal(α .+ X * β, σ); end; LinReg_model=LinReg(X,y); - chain = sample(LinReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LinReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) diff --git a/src/LogisticRegression.jl b/src/LogisticRegression.jl index 99e854c..64af9b8 100644 --- a/src/LogisticRegression.jl +++ b/src/LogisticRegression.jl @@ -20,7 +20,7 @@ struct analysis_logistic_Binom_NIP_Optim end function logistic_reg(formula::FormulaTerm,data,Link::String="LogitLink") - CRRao.seed != nothing && Random.seed!(CRRao.seed) + modelClass = "LogisticReg"; LikelihoodMod="Binomial"; PriorMod="NIP"; @@ -65,7 +65,7 @@ function logistic_reg(formula::FormulaTerm,data,Link::String="LogitLink") end function logistic_reg_predicts(obj,newdata::DataFrame) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = obj.formula; fm_frame=ModelFrame(formula,newdata); X=modelmatrix(fm_frame); @@ -94,7 +94,7 @@ end ## logistic regression with Ridge Prior function logistic_reg_internal(formula::FormulaTerm, data::DataFrame, link_function, PriorMod::Prior_Ridge, h::Float64=0.5,sim_size::Int64=10000) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); fm_frame=ModelFrame(formula,data); @@ -123,7 +123,7 @@ function logistic_reg_internal(formula::FormulaTerm, data::DataFrame, link_funct LogisticReg_model=LogisticReg(X,y); - chain = sample(LogisticReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LogisticReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); @@ -152,7 +152,7 @@ end ## logistic regression with Laplace Prior function logistic_reg_internal(formula::FormulaTerm,data::DataFrame, link_function,PriorMod::Prior_Laplace,h::Float64=0.5,sim_size::Int64=10000) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); fm_frame=ModelFrame(formula,data); @@ -181,7 +181,7 @@ function logistic_reg_internal(formula::FormulaTerm,data::DataFrame, link_functi LogisticReg_model=LogisticReg(X,y); - chain = sample(LogisticReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LogisticReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); @@ -210,7 +210,7 @@ end ## logistic regression with Cauchy Prior function logistic_reg_internal(formula::FormulaTerm,data,link_function,PriorMod::Prior_Cauchy,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); fm_frame=ModelFrame(formula,data); @@ -239,7 +239,7 @@ function logistic_reg_internal(formula::FormulaTerm,data,link_function,PriorMod: LogisticReg_model=LogisticReg(X,y); - chain = sample(LogisticReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LogisticReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); @@ -268,7 +268,7 @@ end ## logistic regression with TDist Prior function logistic_reg_internal(formula::FormulaTerm,data,link_function,PriorMod::Prior_TDist,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); fm_frame=ModelFrame(formula,data); @@ -298,7 +298,7 @@ function logistic_reg_internal(formula::FormulaTerm,data,link_function,PriorMod: LogisticReg_model=LogisticReg(X,y); - chain = sample(LogisticReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LogisticReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); @@ -328,7 +328,7 @@ end ## logistic regression with Uniform Prior function logistic_reg_internal(formula::FormulaTerm,data,link_function,PriorMod::Prior_Uniform,h::Float64=0.5,sim_size::Int64=10000) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); fm_frame=ModelFrame(formula,data); @@ -357,7 +357,7 @@ function logistic_reg_internal(formula::FormulaTerm,data,link_function,PriorMod: LogisticReg_model=LogisticReg(X,y); - chain = sample(LogisticReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, LogisticReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); diff --git a/src/NegBinomialRegression.jl b/src/NegBinomialRegression.jl index dd9112b..c68f3bd 100644 --- a/src/NegBinomialRegression.jl +++ b/src/NegBinomialRegression.jl @@ -20,7 +20,7 @@ end function NegBinom_Reg(formula::FormulaTerm,data,Link::String="LogLink") - CRRao.seed != nothing && Random.seed!(CRRao.seed) + modelClass = "CountReg"; LikelihoodMod="NegativeBinomial"; PriorMod="NIP"; @@ -53,7 +53,7 @@ end function NegBinom_Reg_predicts(obj,newdata::DataFrame) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = obj.formula; fm_frame=ModelFrame(formula,newdata); X=modelmatrix(fm_frame); @@ -73,7 +73,7 @@ end ## Negative Binomial Regression with Ridge Prior function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model NegBinomReg(X, y) = begin @@ -94,7 +94,7 @@ function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge end end NegBinomReg_model=NegBinomReg(X,y); - chain = sample(NegBinomReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, NegBinomReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -104,7 +104,7 @@ end ## Negative Binomial Regression with Laplace Prior function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Laplace,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model NegBinomReg(X, y) = begin @@ -125,7 +125,7 @@ function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Lapla end end NegBinomReg_model=NegBinomReg(X,y); - chain = sample(NegBinomReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, NegBinomReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -133,7 +133,7 @@ end ## Negative Binomial Regression with Cauchy Prior function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauchy,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model NegBinomReg(X, y) = begin @@ -154,7 +154,7 @@ function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauch end end NegBinomReg_model=NegBinomReg(X,y); - chain = sample(NegBinomReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, NegBinomReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -163,7 +163,7 @@ end ## Negative Binomial Regression with TDist Prior function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model NegBinomReg(X, y) = begin @@ -185,7 +185,7 @@ function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist end end NegBinomReg_model=NegBinomReg(X,y); - chain = sample(NegBinomReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, NegBinomReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -195,7 +195,7 @@ end ## Negative Binomial Regression with Uniform Prior function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Uniform,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model NegBinomReg(X, y) = begin @@ -216,7 +216,7 @@ function NegBinom_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Unifo end end NegBinomReg_model=NegBinomReg(X,y); - chain = sample(NegBinomReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, NegBinomReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans diff --git a/src/PoissonRegression.jl b/src/PoissonRegression.jl index fc0822c..dcf8122 100644 --- a/src/PoissonRegression.jl +++ b/src/PoissonRegression.jl @@ -20,7 +20,7 @@ struct analysis_Poisson_Reg end function Poisson_Reg(formula::FormulaTerm,data::DataFrame) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); fm_frame=ModelFrame(formula,data); @@ -44,7 +44,7 @@ function Poisson_Reg(formula::FormulaTerm,data::DataFrame) end function Poisson_Reg_predicts(obj,newdata::DataFrame) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = obj.formula; fm_frame=ModelFrame(formula,newdata); X=modelmatrix(fm_frame); @@ -57,7 +57,7 @@ end ## Poisson Regression with Ridge Prior function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model PoissonReg(X, y) = begin @@ -78,7 +78,7 @@ function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Ridge, end end PoissonReg_model=PoissonReg(X,y); - chain = sample(PoissonReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, PoissonReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -86,7 +86,7 @@ end ## Poisson Regression with Laplace Prior function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Laplace,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model PoissonReg(X, y) = begin @@ -107,7 +107,7 @@ function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Laplac end end PoissonReg_model=PoissonReg(X,y); - chain = sample(PoissonReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, PoissonReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -115,7 +115,7 @@ end ## Poisson Regression with Cauchy Prior function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauchy,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model PoissonReg(X, y) = begin @@ -136,7 +136,7 @@ function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Cauchy end end PoissonReg_model=PoissonReg(X,y); - chain = sample(PoissonReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, PoissonReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -144,7 +144,7 @@ end ## Poisson Regression with T-Distributed Prior function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model PoissonReg(X, y) = begin @@ -166,7 +166,7 @@ function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_TDist, end end PoissonReg_model=PoissonReg(X,y); - chain = sample(PoissonReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, PoissonReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans @@ -174,7 +174,7 @@ end ## Poisson Regression with Uniform Prior function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Uniform,h::Float64,sim_size::Int64) - CRRao.seed != nothing && Random.seed!(CRRao.seed) + formula = apply_schema(formula, schema(formula, data)); y, X = modelcols(formula, data); @model PoissonReg(X, y) = begin @@ -194,7 +194,7 @@ function Poisson_Reg(formula::FormulaTerm,data::DataFrame,PriorMod::Prior_Unifor end end PoissonReg_model=PoissonReg(X,y); - chain = sample(PoissonReg_model, NUTS(), sim_size); + chain = sample(CRRao_rng, PoissonReg_model, NUTS(), sim_size); summaries, quantiles = describe(chain); ans = MCMC_chain(chain,summaries,quantiles) ans diff --git a/src/random_number_generator.jl b/src/random_number_generator.jl new file mode 100644 index 0000000..ccf4491 --- /dev/null +++ b/src/random_number_generator.jl @@ -0,0 +1,11 @@ +global CRRao_rng = MersenneTwister() + +""" +```julia +using StableRNGs +CRRao.set_rng(StableRNG(1234)) +``` +""" +function set_rng(x) + global CRRao_rng = x +end diff --git a/src/set_seed.jl b/src/set_seed.jl deleted file mode 100644 index c64a6f0..0000000 --- a/src/set_seed.jl +++ /dev/null @@ -1,5 +0,0 @@ -global seed = nothing - -function CRRao_seed(x) - global seed = x -end From 7eb242cae3e50ce8847d403665221e36a3b50794 Mon Sep 17 00:00:00 2001 From: Siddhant Chaudhary Date: Thu, 19 May 2022 18:34:17 +0200 Subject: [PATCH 4/9] Added doctests in examples.md. --- .github/workflows/documentation.yml | 3 +- docs/Project.toml | 3 +- docs/make.jl | 1 + docs/src/examples.md | 1231 +++++++++++++++++---------- 4 files changed, 778 insertions(+), 460 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index f4f238a..2d7148c 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -15,7 +15,8 @@ jobs: - uses: actions/checkout@v2 - uses: julia-actions/setup-julia@latest with: - version: '1.7' + version: '1.7.2' + arch: x86_64 - name: Install documentation dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy diff --git a/docs/Project.toml b/docs/Project.toml index fc54e9b..e7b3c1a 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -4,5 +4,6 @@ Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" NLSolversBase = "d41bc354-129a-5804-8e4c-c37616107c6c" RDatasets = "ce6b1742-4840-55fa-b093-852dadbb1d8b" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" StatsModels = "3eaba693-59b7-5ba5-a881-562e759f1c8d" -StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" \ No newline at end of file +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" diff --git a/docs/make.jl b/docs/make.jl index c76ac71..b028143 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -17,6 +17,7 @@ makedocs(; "Home" => "index.md", "Examples" => "examples.md" ], + strict = :doctest ) deploydocs(; diff --git a/docs/src/examples.md b/docs/src/examples.md index a8fe829..9dab6a2 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -1,12 +1,17 @@ -## Example 1: Linear Regression +## Examples: Setting up the code ```jldoctest examples -julia> using RDatasets, NLSolversBase, CRRao, Logging; +julia> using RDatasets, NLSolversBase, CRRao, Logging, StableRNGs; -julia> Logging.disable_logging(Logging.Warn); CRRao.setprogress!(false); +julia> Logging.disable_logging(Logging.Warn); CRRao.setprogress!(false); -julia> CRRao_seed(123); +julia> CRRao.set_rng(StableRNG(1234)) +StableRNGs.LehmerRNG(state=0x000000000000000000000000000009a5) +``` +## Example 1: Linear Regression + +```jldoctest examples julia> df = dataset("datasets", "mtcars") 32×12 DataFrame Row │ Model MPG Cyl Disp HP DRat WT QS ⋯ @@ -43,7 +48,7 @@ Gear 1.01998 0.851408 1.20 0.2410 -0.72405 2.76401 ──────────────────────────────────────────────────────────────────────────── julia> m1_1.sigma -2.5741691724978977 +2.5741691724978972 julia> m1_1.LogLike -73.52638935960971 @@ -60,69 +65,93 @@ julia> m1_1.R_sqr julia> m1_1.Adjusted_R_sqr 0.8175771343616149 +julia> m1_1.fittedResponse +32-element Vector{Float64}: + 23.668849952338718 + 22.85340824320634 + 25.253556140740894 + 20.746171762311384 + 17.635570543830177 + 20.14663845388644 + 14.644831040166633 + 23.61182872351372 + 22.525801204993822 + 20.568426475004856 + ⋮ + 13.781422171673526 + 16.340457241090512 + 27.47793682112109 + 26.922715039574857 + 28.11844900519874 + 17.264981908248554 + 21.818065399379595 + 13.374047477198516 + 23.193986311384343 + julia> m1_1.residuals 32-element Vector{Float64}: - -2.6688499523387037 - -1.8534082432063457 - -2.453556140740865 - 0.6538282376886713 - 1.06442945616989 - -2.046638453886395 - -0.3448310401665502 - 0.7881712764862279 - 0.27419879500614286 - -1.3684264750049024 + -2.668849952338718 + -1.8534082432063386 + -2.4535561407408935 + 0.6538282376886144 + 1.0644294561698224 + -2.0466384538864375 + -0.3448310401666319 + 0.7881712764862776 + 0.2741987950061784 + -1.3684264750048563 ⋮ - -0.481422171673465 - 2.8595427589095195 - -0.17793682112103681 - -0.9227150395749106 - 2.281550994801261 - -1.4649819082486246 - -2.1180653993796668 - 1.6259525228014091 + -0.4814221716735254 + 2.8595427589094875 + -0.1779368211210901 + -0.9227150395748573 + 2.2815509948012576 + -1.4649819082485536 + -2.1180653993795957 + 1.6259525228014837 -1.7939863113843444 julia> m1_1.Cooks_distance 32-element Vector{Float64}: - 0.013342034282302684 - 0.00688728266731234 - 0.015495847517058797 - 0.0014309089637600369 - 0.004471979213924145 - 0.014588985833724696 - 0.0015401004198812064 - 0.005826402580870707 - 0.0003074315682457445 - 0.007011803724485943 + 0.013342034282302798 + 0.006887282667312197 + 0.015495847517059161 + 0.0014309089637597765 + 0.004471979213923591 + 0.014588985833725164 + 0.001540100419881934 + 0.005826402580871439 + 0.00030743156824582164 + 0.00701180372448546 ⋮ - 0.0020768256096929424 - 0.02203970419212919 - 0.0001378106083284689 - 0.006862929526075293 - 0.047038899451778936 - 0.0381204513180911 - 0.03540469459036285 - 0.13715341355042346 - 0.006145660329519638 + 0.002076825609693457 + 0.022039704192128577 + 0.0001378106083285506 + 0.006862929526074502 + 0.04703889945177857 + 0.038120451318087265 + 0.035404694590360615 + 0.1371534135504359 + 0.006145660329519691 + ``` **Linear Regression - Ridge Prior** ```jldoctest examples -julia> m1_2 = @fitmodel((MPG ~ HP + WT+Gear), df, LinearRegression(), Prior_Ridge()); +julia> m1_2 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Ridge()); julia> m1_2.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - v 6.7959 3.8426 0.0384 0.0656 3504.6565 1.0000 ⋯ - σ 2.6845 0.3933 0.0039 0.0066 3519.8321 0.9999 ⋯ - α 28.3443 5.3753 0.0538 0.0993 2412.5004 0.9999 ⋯ - β[1] -0.0400 0.0105 0.0001 0.0002 3626.7198 0.9999 ⋯ - β[2] -2.6492 0.9481 0.0095 0.0176 2590.8328 0.9999 ⋯ - β[3] 1.6538 0.9761 0.0098 0.0175 2629.2168 0.9999 ⋯ + v 6.7693 3.9279 0.0393 0.0610 3574.4104 1.0000 ⋯ + σ 2.6884 0.3984 0.0040 0.0062 3466.2021 1.0002 ⋯ + α 28.4603 5.4967 0.0550 0.1052 2208.7354 1.0008 ⋯ + β[1] -0.0399 0.0108 0.0001 0.0002 3733.4487 1.0005 ⋯ + β[2] -2.6629 0.9680 0.0097 0.0180 2467.8793 1.0011 ⋯ + β[3] 1.6338 0.9939 0.0099 0.0183 2342.6812 1.0006 ⋯ 1 column omitted julia> m1_2.quantiles @@ -130,31 +159,32 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - v 2.4776 4.3942 5.9531 8.0658 16.4623 - σ 2.0531 2.4095 2.6403 2.9047 3.5856 - α 16.9646 25.0338 28.5895 31.8601 38.3976 - β[1] -0.0608 -0.0470 -0.0399 -0.0330 -0.0195 - β[2] -4.4404 -3.2875 -2.6778 -2.0376 -0.6931 - β[3] -0.1610 1.0006 1.6097 2.2526 3.7245 + v 2.3216 4.4133 5.9258 8.0968 16.1426 + σ 2.0474 2.4028 2.6364 2.9225 3.5785 + α 16.7786 25.1108 28.6936 32.1160 38.5610 + β[1] -0.0616 -0.0470 -0.0398 -0.0328 -0.0183 + β[2] -4.5039 -3.3021 -2.6899 -2.0502 -0.6440 + β[3] -0.2071 0.9672 1.5988 2.2439 3.7647 + ``` **Linear Regression - Laplace Prior** ```jldoctest examples -julia> m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()); +julia> m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()); julia> m1_3.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - v 4.3681 3.5844 0.0358 0.0509 3935.6187 0.9999 ⋯ - σ 2.6666 0.3809 0.0038 0.0059 4034.9336 1.0000 ⋯ - α 29.1630 5.1674 0.0517 0.0805 3118.7281 1.0000 ⋯ - β[1] -0.0398 0.0105 0.0001 0.0002 4170.8923 0.9999 ⋯ - β[2] -2.7284 0.9316 0.0093 0.0151 3161.8637 1.0000 ⋯ - β[3] 1.4945 0.9379 0.0094 0.0145 3299.8195 0.9999 ⋯ + v 4.3182 3.3442 0.0334 0.0490 3968.7204 0.9999 ⋯ + σ 2.6657 0.3792 0.0038 0.0056 4690.6502 1.0000 ⋯ + α 29.0672 5.1669 0.0517 0.0918 3367.7350 1.0001 ⋯ + β[1] -0.0399 0.0105 0.0001 0.0002 4116.7824 1.0005 ⋯ + β[2] -2.7069 0.9341 0.0093 0.0170 3286.4012 1.0002 ⋯ + β[3] 1.5082 0.9373 0.0094 0.0163 3601.6346 1.0002 ⋯ 1 column omitted julia> m1_3.quantiles @@ -162,29 +192,29 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - v 1.2592 2.4031 3.4896 5.2107 12.5745 - σ 2.0453 2.3982 2.6273 2.8867 3.5336 - α 18.7995 25.8276 29.3558 32.6159 38.8971 - β[1] -0.0605 -0.0468 -0.0398 -0.0328 -0.0190 - β[2] -4.5024 -3.3491 -2.7417 -2.1278 -0.8679 - β[3] -0.2803 0.8657 1.4662 2.1077 3.3809 + v 1.2224 2.3903 3.4576 5.1836 12.4068 + σ 2.0359 2.3975 2.6246 2.8949 3.5178 + α 18.0169 25.8440 29.3179 32.5968 38.4400 + β[1] -0.0609 -0.0466 -0.0396 -0.0330 -0.0199 + β[2] -4.4455 -3.3403 -2.7409 -2.1152 -0.7174 + β[3] -0.2418 0.8759 1.4648 2.1019 3.4821 ``` **Linear Regression - Cauchy Prior** ```jldoctest examples -julia> m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000); +julia> m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000); julia> m1_4.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - σ 2.5858 0.3486 0.0025 0.0039 9681.3301 1.0001 ⋯ - α 30.3606 4.6081 0.0326 0.0601 5086.7259 1.0000 ⋯ - β[1] -0.0395 0.0099 0.0001 0.0001 6944.3903 1.0000 ⋯ - β[2] -2.8396 0.8538 0.0060 0.0109 5127.7097 1.0000 ⋯ - β[3] 1.2596 0.8380 0.0059 0.0107 5509.6327 1.0000 ⋯ + σ 2.5855 0.3416 0.0024 0.0036 9218.6691 1.0001 ⋯ + α 30.3875 4.6394 0.0328 0.0678 4559.8857 1.0001 ⋯ + β[1] -0.0394 0.0099 0.0001 0.0001 7652.1219 1.0000 ⋯ + β[2] -2.8435 0.8542 0.0060 0.0116 4998.6993 1.0001 ⋯ + β[3] 1.2513 0.8428 0.0060 0.0120 5011.2306 1.0000 ⋯ 1 column omitted julia> m1_4.quantiles @@ -192,30 +222,30 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - σ 2.0142 2.3370 2.5468 2.7920 3.3841 - α 21.0188 27.3578 30.4396 33.4542 39.2363 - β[1] -0.0589 -0.0460 -0.0394 -0.0329 -0.0200 - β[2] -4.5079 -3.4098 -2.8391 -2.2857 -1.1349 - β[3] -0.3206 0.7001 1.2473 1.7952 2.9440 + σ 2.0120 2.3452 2.5484 2.7877 3.3675 + α 20.9628 27.4262 30.4704 33.5255 39.1492 + β[1] -0.0591 -0.0459 -0.0393 -0.0328 -0.0201 + β[2] -4.4925 -3.4133 -2.8494 -2.2897 -1.1335 + β[3] -0.3345 0.6759 1.2385 1.7936 2.9585 ``` **Linear Regression - T-Distributed Prior** ```jldoctest examples -julia> m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); +julia> m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); julia> m1_5.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - ν 1.0435 0.5361 0.0054 0.0067 6299.0784 0.9999 ⋯ - σ 2.6215 0.3595 0.0036 0.0056 4014.6383 0.9999 ⋯ - α 30.2491 4.6744 0.0467 0.0714 3829.7770 0.9999 ⋯ - β[1] -0.0395 0.0101 0.0001 0.0001 4682.2786 1.0000 ⋯ - β[2] -2.8195 0.8609 0.0086 0.0138 3756.4596 0.9999 ⋯ - β[3] 1.2744 0.8481 0.0085 0.0126 4089.8676 0.9999 ⋯ + ν 1.0539 0.5597 0.0056 0.0070 5800.1893 1.0003 ⋯ + σ 2.6265 0.3686 0.0037 0.0047 6165.5244 0.9999 ⋯ + α 30.2167 4.8679 0.0487 0.1012 2225.6405 0.9999 ⋯ + β[1] -0.0393 0.0103 0.0001 0.0002 3319.3510 1.0005 ⋯ + β[2] -2.8300 0.8976 0.0090 0.0187 2396.9552 1.0001 ⋯ + β[3] 1.2837 0.8841 0.0088 0.0179 2334.0136 0.9999 ⋯ 1 column omitted julia> m1_5.quantiles @@ -223,30 +253,30 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - ν 0.3843 0.6722 0.9193 1.2795 2.4194 - σ 2.0277 2.3677 2.5766 2.8377 3.4500 - α 20.8821 27.1538 30.2773 33.4444 39.4333 - β[1] -0.0595 -0.0460 -0.0395 -0.0328 -0.0198 - β[2] -4.4847 -3.3954 -2.8202 -2.2444 -1.0906 - β[3] -0.3638 0.7009 1.2560 1.8346 2.9985 + ν 0.3731 0.6686 0.9233 1.2896 2.4911 + σ 2.0385 2.3621 2.5841 2.8463 3.4736 + α 20.4434 27.0806 30.3379 33.4157 39.6471 + β[1] -0.0597 -0.0461 -0.0393 -0.0324 -0.0192 + β[2] -4.5979 -3.4317 -2.8360 -2.2500 -1.0505 + β[3] -0.4012 0.6970 1.2552 1.8472 3.0717 ``` **Linear Regression - Uniform Prior** ```jldoctest examples -julia> m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); +julia> m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); julia> m1_6.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - ν 1.0435 0.5361 0.0054 0.0067 6299.0784 0.9999 ⋯ - σ 2.6215 0.3595 0.0036 0.0056 4014.6383 0.9999 ⋯ - α 30.2491 4.6744 0.0467 0.0714 3829.7770 0.9999 ⋯ - β[1] -0.0395 0.0101 0.0001 0.0001 4682.2786 1.0000 ⋯ - β[2] -2.8195 0.8609 0.0086 0.0138 3756.4596 0.9999 ⋯ - β[3] 1.2744 0.8481 0.0085 0.0126 4089.8676 0.9999 ⋯ + ν 1.0665 0.5900 0.0059 0.0081 5791.3987 1.0007 ⋯ + σ 2.6276 0.3678 0.0037 0.0056 3788.8270 0.9999 ⋯ + α 30.3304 4.7387 0.0474 0.0881 2697.4202 0.9999 ⋯ + β[1] -0.0394 0.0102 0.0001 0.0002 3969.3250 0.9999 ⋯ + β[2] -2.8421 0.8679 0.0087 0.0159 2813.1886 0.9999 ⋯ + β[3] 1.2646 0.8642 0.0086 0.0157 2858.6901 0.9999 ⋯ 1 column omitted julia> m1_6.quantiles @@ -254,16 +284,15 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - ν 0.3843 0.6722 0.9193 1.2795 2.4194 - σ 2.0277 2.3677 2.5766 2.8377 3.4500 - α 20.8821 27.1538 30.2773 33.4444 39.4333 - β[1] -0.0595 -0.0460 -0.0395 -0.0328 -0.0198 - β[2] -4.4847 -3.3954 -2.8202 -2.2444 -1.0906 - β[3] -0.3638 0.7009 1.2560 1.8346 2.9985 + ν 0.3749 0.6698 0.9298 1.3095 2.5508 + σ 2.0306 2.3712 2.5893 2.8418 3.4723 + α 20.4677 27.3179 30.5001 33.5657 39.1292 + β[1] -0.0596 -0.0460 -0.0392 -0.0324 -0.0198 + β[2] -4.5194 -3.4288 -2.8534 -2.2704 -1.1239 + β[3] -0.3362 0.6886 1.2226 1.8201 3.0601 ``` - ## Example 2: Logistic Regression ```jldoctest examples julia> turnout = dataset("Zelig", "turnout") @@ -290,7 +319,7 @@ julia> turnout = dataset("Zelig", "turnout") 1985 rows omitted julia> m2_1 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Logit()); + ,turnout,LogisticRegression(),Logit()); julia> m2_1.fit ──────────────────────────────────────────────────────────────────────────── @@ -316,7 +345,7 @@ julia> m2_1.BIC 2061.9857760008254 julia> m2_2 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Probit()); + ,turnout,LogisticRegression(),Probit()); julia> m2_2.fit ──────────────────────────────────────────────────────────────────────────── @@ -332,9 +361,8 @@ Educate 0.10417 0.0116713 8.93 <1e-18 0.0812949 0.127046 julia> m2_2.BIC 2062.201026236795 - julia> m2_3 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Cloglog()); + ,turnout,LogisticRegression(),Cloglog()); julia> m2_3.fit ───────────────────────────────────────────────────────────────────────────── @@ -348,11 +376,10 @@ Educate 0.0983976 0.0108857 9.04 <1e-18 0.077062 0.119733 ───────────────────────────────────────────────────────────────────────────── julia> m2_3.BIC -2064.694633749211 - +2064.69463374921 julia> m2_4 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Cauchit()); + ,turnout,LogisticRegression(),Cauchit()); julia> m2_4.fit ──────────────────────────────────────────────────────────────────────────── @@ -366,26 +393,27 @@ Educate 0.169276 0.0240098 7.05 <1e-11 0.122217 0.216334 ──────────────────────────────────────────────────────────────────────────── julia> m2_4.BIC -2078.9464617505087 +2078.946461750509 + ``` **Logistic Regression - with Ridge Prior** ```jldoctest examples julia> m2_5 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Ridge()); + ,LogisticRegression(),Logit(),Prior_Ridge()); julia> m2_5.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 1.5372 0.6825 0.0068 0.0090 4699.5179 0.9999 ⋯ - β[1] -2.8659 0.3285 0.0033 0.0044 4940.2410 1.0002 ⋯ - β[2] 0.0271 0.0035 0.0000 0.0000 6757.3608 0.9999 ⋯ - β[3] 0.2293 0.1449 0.0014 0.0018 6894.9720 1.0001 ⋯ - β[4] 0.1774 0.0272 0.0003 0.0004 7279.5100 1.0002 ⋯ - β[5] 0.1678 0.0205 0.0002 0.0003 5484.0619 1.0002 ⋯ + λ 1.5314 0.6655 0.0067 0.0088 4736.5514 1.0003 ⋯ + β[1] -2.8619 0.3365 0.0034 0.0047 4505.8853 1.0004 ⋯ + β[2] 0.0270 0.0035 0.0000 0.0000 6544.5015 1.0002 ⋯ + β[3] 0.2279 0.1446 0.0014 0.0015 6968.9017 1.0000 ⋯ + β[4] 0.1773 0.0274 0.0003 0.0004 6716.7864 0.9999 ⋯ + β[5] 0.1676 0.0208 0.0002 0.0003 5279.6598 1.0001 ⋯ 1 column omitted julia> m2_5.quantiles @@ -393,28 +421,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.7565 1.0976 1.3837 1.7861 3.2992 - β[1] -3.5009 -3.0890 -2.8642 -2.6411 -2.2094 - β[2] 0.0203 0.0247 0.0271 0.0295 0.0340 - β[3] -0.0578 0.1331 0.2302 0.3272 0.5135 - β[4] 0.1253 0.1587 0.1772 0.1957 0.2314 - β[5] 0.1276 0.1541 0.1679 0.1820 0.2077 - + λ 0.7570 1.1039 1.3921 1.7719 3.2092 + β[1] -3.5306 -3.0918 -2.8651 -2.6338 -2.2062 + β[2] 0.0203 0.0247 0.0270 0.0295 0.0338 + β[3] -0.0540 0.1311 0.2280 0.3264 0.5073 + β[4] 0.1243 0.1587 0.1767 0.1956 0.2327 + β[5] 0.1273 0.1535 0.1675 0.1815 0.2085 julia> m2_6 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Ridge(),1.0); + ,LogisticRegression(),Probit(),Prior_Ridge(),1.0); julia> m2_6.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.8974 0.3516 0.0035 0.0055 4766.0718 1.0004 ⋯ - β[1] -1.6640 0.1915 0.0019 0.0030 4483.8131 1.0003 ⋯ - β[2] 0.0158 0.0020 0.0000 0.0000 7100.6926 1.0002 ⋯ - β[3] 0.1501 0.0870 0.0009 0.0011 6954.9861 0.9999 ⋯ - β[4] 0.0964 0.0143 0.0001 0.0002 8092.4537 0.9999 ⋯ - β[5] 0.0997 0.0117 0.0001 0.0002 5151.0238 1.0002 ⋯ + λ 0.9025 0.3651 0.0037 0.0054 4480.4540 1.0000 ⋯ + β[1] -1.6606 0.1915 0.0019 0.0030 4852.0370 0.9999 ⋯ + β[2] 0.0158 0.0020 0.0000 0.0000 7009.3493 1.0001 ⋯ + β[3] 0.1496 0.0856 0.0009 0.0009 7761.4054 0.9999 ⋯ + β[4] 0.0967 0.0148 0.0001 0.0001 7444.0661 1.0000 ⋯ + β[5] 0.0994 0.0118 0.0001 0.0002 5268.9316 0.9999 ⋯ 1 column omitted julia> m2_6.quantiles @@ -422,29 +449,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.4628 0.6641 0.8191 1.0405 1.7562 - β[1] -2.0392 -1.7949 -1.6625 -1.5332 -1.2890 + λ 0.4607 0.6606 0.8182 1.0396 1.8407 + β[1] -2.0383 -1.7904 -1.6583 -1.5311 -1.2863 β[2] 0.0118 0.0144 0.0158 0.0171 0.0197 - β[3] -0.0224 0.0905 0.1507 0.2100 0.3175 - β[4] 0.0680 0.0868 0.0964 0.1059 0.1244 - β[5] 0.0764 0.0918 0.0996 0.1075 0.1228 - + β[3] -0.0173 0.0925 0.1488 0.2073 0.3180 + β[4] 0.0679 0.0865 0.0967 0.1066 0.1257 + β[5] 0.0768 0.0914 0.0993 0.1072 0.1231 julia> m2_7 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Ridge(),1.0); - + ,LogisticRegression(),Cloglog(),Prior_Ridge(),1.0); julia> m2_7.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.9863 0.3754 0.0038 0.0047 5487.6972 0.9999 ⋯ - β[1] -1.8649 0.1860 0.0019 0.0025 5954.0953 1.0001 ⋯ - β[2] 0.0142 0.0019 0.0000 0.0000 9071.5557 0.9999 ⋯ - β[3] 0.1730 0.0851 0.0009 0.0010 8023.9162 1.0002 ⋯ - β[4] 0.0769 0.0123 0.0001 0.0001 7536.1264 0.9999 ⋯ - β[5] 0.0947 0.0112 0.0001 0.0001 6251.6050 0.9999 ⋯ + λ 0.9928 0.3745 0.0037 0.0054 5503.9612 0.9999 ⋯ + β[1] -1.8666 0.1845 0.0018 0.0026 5165.8451 1.0001 ⋯ + β[2] 0.0142 0.0018 0.0000 0.0000 7814.5837 1.0002 ⋯ + β[3] 0.1730 0.0860 0.0009 0.0009 6881.1222 0.9999 ⋯ + β[4] 0.0771 0.0120 0.0001 0.0001 7628.3991 1.0000 ⋯ + β[5] 0.0947 0.0109 0.0001 0.0002 5465.7776 1.0000 ⋯ 1 column omitted julia> m2_7.quantiles @@ -452,28 +477,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.5239 0.7336 0.9009 1.1478 1.9601 - β[1] -2.2303 -1.9903 -1.8638 -1.7379 -1.5141 - β[2] 0.0107 0.0129 0.0142 0.0155 0.0179 - β[3] 0.0074 0.1144 0.1727 0.2305 0.3396 - β[4] 0.0526 0.0687 0.0768 0.0854 0.1007 - β[5] 0.0733 0.0871 0.0946 0.1024 0.1170 + λ 0.5248 0.7395 0.9092 1.1465 1.9612 + β[1] -2.2325 -1.9919 -1.8664 -1.7424 -1.5047 + β[2] 0.0107 0.0130 0.0142 0.0154 0.0178 + β[3] 0.0038 0.1149 0.1732 0.2303 0.3426 + β[4] 0.0542 0.0687 0.0770 0.0851 0.1006 + β[5] 0.0728 0.0875 0.0947 0.1021 0.1162 julia> m2_8 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Ridge(),1.0); - + ,LogisticRegression(),Cauchit(),Prior_Ridge(),1.0); julia> m2_8.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 1.5233 0.6058 0.0061 0.0090 5251.4092 1.0000 ⋯ - β[1] -2.9883 0.3942 0.0039 0.0060 4750.7406 1.0001 ⋯ - β[2] 0.0289 0.0044 0.0000 0.0001 5952.1391 1.0002 ⋯ - β[3] 0.1589 0.1528 0.0015 0.0018 5618.7281 0.9999 ⋯ - β[4] 0.2395 0.0399 0.0004 0.0005 6082.9861 1.0005 ⋯ - β[5] 0.1604 0.0241 0.0002 0.0004 5048.3073 1.0003 ⋯ + λ 1.5164 0.6110 0.0061 0.0088 5073.8844 0.9999 ⋯ + β[1] -2.9665 0.3808 0.0038 0.0055 3900.2655 1.0001 ⋯ + β[2] 0.0286 0.0043 0.0000 0.0001 4943.5487 1.0002 ⋯ + β[3] 0.1604 0.1508 0.0015 0.0018 6891.3045 0.9999 ⋯ + β[4] 0.2395 0.0397 0.0004 0.0005 6038.7928 0.9999 ⋯ + β[5] 0.1592 0.0237 0.0002 0.0003 4328.8957 1.0000 ⋯ 1 column omitted julia> m2_8.quantiles @@ -481,31 +505,31 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.7718 1.1194 1.3885 1.7735 3.1323 - β[1] -3.8019 -3.2411 -2.9768 -2.7227 -2.2400 - β[2] 0.0208 0.0259 0.0287 0.0317 0.0379 - β[3] -0.1462 0.0572 0.1599 0.2635 0.4511 - β[4] 0.1639 0.2123 0.2385 0.2653 0.3206 - β[5] 0.1152 0.1440 0.1596 0.1762 0.2097 + λ 0.7799 1.1154 1.3821 1.7577 3.0617 + β[1] -3.7283 -3.2133 -2.9612 -2.7075 -2.2404 + β[2] 0.0205 0.0257 0.0286 0.0315 0.0374 + β[3] -0.1449 0.0613 0.1635 0.2610 0.4474 + β[4] 0.1640 0.2126 0.2385 0.2651 0.3212 + β[5] 0.1148 0.1431 0.1583 0.1748 0.2070 + ``` **Logistic Regression - with Laplace Prior** ```jldoctest examples julia> m2_9 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Laplace()); - + ,LogisticRegression(),Logit(),Prior_Laplace()); julia> m2_9.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.8658 0.4932 0.0049 0.0069 4709.3634 1.0001 ⋯ - β[1] -2.8704 0.3336 0.0033 0.0048 5134.2137 1.0000 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 6552.4899 0.9999 ⋯ - β[3] 0.2106 0.1463 0.0015 0.0018 6941.0532 0.9999 ⋯ - β[4] 0.1776 0.0271 0.0003 0.0003 6854.0396 0.9999 ⋯ - β[5] 0.1687 0.0207 0.0002 0.0003 5680.7532 1.0000 ⋯ + λ 0.8729 0.5109 0.0051 0.0070 4168.8950 0.9999 ⋯ + β[1] -2.8745 0.3293 0.0033 0.0043 4525.5742 0.9999 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 6601.7518 0.9999 ⋯ + β[3] 0.2103 0.1424 0.0014 0.0013 7660.9194 1.0000 ⋯ + β[4] 0.1767 0.0271 0.0003 0.0003 7474.8745 1.0001 ⋯ + β[5] 0.1690 0.0202 0.0002 0.0003 4860.4472 1.0001 ⋯ 1 column omitted julia> m2_9.quantiles @@ -513,28 +537,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.3334 0.5500 0.7447 1.0356 2.1764 - β[1] -3.5208 -3.0940 -2.8733 -2.6430 -2.2124 - β[2] 0.0205 0.0249 0.0273 0.0296 0.0341 - β[3] -0.0654 0.1072 0.2088 0.3097 0.5061 - β[4] 0.1263 0.1590 0.1770 0.1952 0.2320 - β[5] 0.1275 0.1549 0.1686 0.1828 0.2097 + λ 0.3280 0.5428 0.7460 1.0415 2.1841 + β[1] -3.5072 -3.1004 -2.8769 -2.6541 -2.2327 + β[2] 0.0206 0.0250 0.0273 0.0297 0.0342 + β[3] -0.0614 0.1128 0.2082 0.3091 0.4884 + β[4] 0.1252 0.1582 0.1762 0.1947 0.2305 + β[5] 0.1297 0.1552 0.1691 0.1827 0.2079 julia> m2_10 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Laplace()); - + ,LogisticRegression(),Probit(),Prior_Laplace()); julia> m2_10.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.8658 0.4932 0.0049 0.0069 4709.3634 1.0001 ⋯ - β[1] -2.8704 0.3336 0.0033 0.0048 5134.2137 1.0000 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 6552.4899 0.9999 ⋯ - β[3] 0.2106 0.1463 0.0015 0.0018 6941.0532 0.9999 ⋯ - β[4] 0.1776 0.0271 0.0003 0.0003 6854.0396 0.9999 ⋯ - β[5] 0.1687 0.0207 0.0002 0.0003 5680.7532 1.0000 ⋯ + λ 0.8677 0.5061 0.0051 0.0066 5223.5887 0.9999 ⋯ + β[1] -2.8772 0.3324 0.0033 0.0051 3975.5078 0.9999 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 6116.3599 0.9999 ⋯ + β[3] 0.2075 0.1449 0.0014 0.0017 6674.4916 0.9999 ⋯ + β[4] 0.1772 0.0274 0.0003 0.0003 6721.6931 1.0000 ⋯ + β[5] 0.1693 0.0205 0.0002 0.0003 4354.2790 0.9999 ⋯ 1 column omitted julia> m2_10.quantiles @@ -542,28 +565,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.3334 0.5500 0.7447 1.0356 2.1764 - β[1] -3.5208 -3.0940 -2.8733 -2.6430 -2.2124 - β[2] 0.0205 0.0249 0.0273 0.0296 0.0341 - β[3] -0.0654 0.1072 0.2088 0.3097 0.5061 - β[4] 0.1263 0.1590 0.1770 0.1952 0.2320 - β[5] 0.1275 0.1549 0.1686 0.1828 0.2097 - + λ 0.3292 0.5522 0.7445 1.0321 2.1142 + β[1] -3.5240 -3.1023 -2.8816 -2.6542 -2.2191 + β[2] 0.0205 0.0250 0.0273 0.0297 0.0341 + β[3] -0.0676 0.1086 0.2058 0.3058 0.4905 + β[4] 0.1248 0.1584 0.1768 0.1960 0.2308 + β[5] 0.1296 0.1554 0.1693 0.1830 0.2104 julia> m2_11 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Laplace(),1.0); + ,LogisticRegression(),Cloglog(),Prior_Laplace(),1.0); julia> m2_11.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.9026 0.4653 0.0047 0.0064 4863.7878 1.0000 ⋯ - β[1] -2.8813 0.3311 0.0033 0.0045 5456.6584 1.0005 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 7030.4545 1.0000 ⋯ - β[3] 0.2133 0.1413 0.0014 0.0016 8023.5642 1.0002 ⋯ - β[4] 0.1771 0.0267 0.0003 0.0003 8059.5351 1.0000 ⋯ - β[5] 0.1695 0.0203 0.0002 0.0003 6003.4945 1.0003 ⋯ + λ 0.9072 0.4890 0.0049 0.0069 4738.4116 1.0004 ⋯ + β[1] -2.8796 0.3286 0.0033 0.0041 5837.4510 1.0000 ⋯ + β[2] 0.0273 0.0035 0.0000 0.0000 7668.8459 1.0001 ⋯ + β[3] 0.2121 0.1436 0.0014 0.0015 7916.5774 1.0002 ⋯ + β[4] 0.1777 0.0268 0.0003 0.0003 8707.6308 0.9999 ⋯ + β[5] 0.1691 0.0204 0.0002 0.0002 6188.7181 0.9999 ⋯ 1 column omitted julia> m2_11.quantiles @@ -571,29 +593,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.3833 0.6002 0.7948 1.0682 2.1101 - β[1] -3.5374 -3.1016 -2.8841 -2.6589 -2.2367 - β[2] 0.0207 0.0249 0.0273 0.0297 0.0342 - β[3] -0.0606 0.1179 0.2129 0.3079 0.4917 - β[4] 0.1256 0.1591 0.1771 0.1948 0.2294 - β[5] 0.1296 0.1558 0.1695 0.1833 0.2092 - + λ 0.3746 0.5961 0.7889 1.0716 2.1763 + β[1] -3.5249 -3.1023 -2.8795 -2.6541 -2.2457 + β[2] 0.0207 0.0249 0.0273 0.0297 0.0341 + β[3] -0.0624 0.1129 0.2114 0.3114 0.4930 + β[4] 0.1253 0.1597 0.1772 0.1962 0.2305 + β[5] 0.1290 0.1553 0.1691 0.1827 0.2089 julia> m2_12 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Laplace(),1.0); - + ,LogisticRegression(),Cauchit(),Prior_Laplace(),1.0); julia> m2_12.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.9026 0.4653 0.0047 0.0064 4863.7878 1.0000 ⋯ - β[1] -2.8813 0.3311 0.0033 0.0045 5456.6584 1.0005 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 7030.4545 1.0000 ⋯ - β[3] 0.2133 0.1413 0.0014 0.0016 8023.5642 1.0002 ⋯ - β[4] 0.1771 0.0267 0.0003 0.0003 8059.5351 1.0000 ⋯ - β[5] 0.1695 0.0203 0.0002 0.0003 6003.4945 1.0003 ⋯ + λ 0.8988 0.4514 0.0045 0.0063 5520.6699 1.0006 ⋯ + β[1] -2.8904 0.3304 0.0033 0.0043 4419.1261 0.9999 ⋯ + β[2] 0.0274 0.0034 0.0000 0.0000 5962.6203 0.9999 ⋯ + β[3] 0.2133 0.1421 0.0014 0.0017 6693.5568 1.0008 ⋯ + β[4] 0.1773 0.0271 0.0003 0.0003 7664.6326 0.9999 ⋯ + β[5] 0.1698 0.0203 0.0002 0.0003 4712.2368 1.0001 ⋯ 1 column omitted julia> m2_12.quantiles @@ -601,31 +621,31 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.3833 0.6002 0.7948 1.0682 2.1101 - β[1] -3.5374 -3.1016 -2.8841 -2.6589 -2.2367 - β[2] 0.0207 0.0249 0.0273 0.0297 0.0342 - β[3] -0.0606 0.1179 0.2129 0.3079 0.4917 - β[4] 0.1256 0.1591 0.1771 0.1948 0.2294 - β[5] 0.1296 0.1558 0.1695 0.1833 0.2092 + λ 0.3770 0.5991 0.7909 1.0748 2.0284 + β[1] -3.5400 -3.1123 -2.8919 -2.6659 -2.2510 + β[2] 0.0208 0.0250 0.0274 0.0297 0.0343 + β[3] -0.0553 0.1161 0.2109 0.3058 0.4987 + β[4] 0.1243 0.1587 0.1771 0.1953 0.2310 + β[5] 0.1308 0.1558 0.1695 0.1834 0.2095 + ``` **Logistic Regression - with Cauchy Prior** ```jldoctest examples julia> m2_13 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Cauchy(),1.0); - + ,LogisticRegression(),Logit(),Prior_Cauchy(),1.0); julia> m2_13.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.3100 0.2478 0.0025 0.0034 5605.2824 1.0013 ⋯ - β[1] -2.9090 0.3257 0.0033 0.0049 5376.8054 1.0008 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7004.5181 1.0006 ⋯ - β[3] 0.1768 0.1384 0.0014 0.0018 5821.5948 1.0000 ⋯ - β[4] 0.1770 0.0273 0.0003 0.0004 7017.1793 1.0000 ⋯ - β[5] 0.1723 0.0204 0.0002 0.0003 5482.9126 1.0003 ⋯ + λ 0.3060 0.2319 0.0023 0.0028 6670.6339 0.9999 ⋯ + β[1] -2.9196 0.3286 0.0033 0.0044 4636.5902 0.9999 ⋯ + β[2] 0.0280 0.0034 0.0000 0.0000 6407.9414 0.9999 ⋯ + β[3] 0.1791 0.1358 0.0014 0.0019 5612.7276 1.0000 ⋯ + β[4] 0.1773 0.0273 0.0003 0.0004 6538.4075 1.0014 ⋯ + β[5] 0.1725 0.0205 0.0002 0.0003 5017.4150 1.0000 ⋯ 1 column omitted julia> m2_13.quantiles @@ -633,29 +653,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.0650 0.1570 0.2446 0.3821 0.9595 - β[1] -3.5631 -3.1231 -2.9068 -2.6885 -2.2846 - β[2] 0.0212 0.0255 0.0278 0.0302 0.0347 - β[3] -0.0695 0.0765 0.1684 0.2695 0.4671 - β[4] 0.1240 0.1585 0.1770 0.1952 0.2313 - β[5] 0.1328 0.1585 0.1720 0.1861 0.2131 - + λ 0.0648 0.1582 0.2433 0.3780 0.9132 + β[1] -3.5551 -3.1405 -2.9220 -2.6936 -2.2841 + β[2] 0.0213 0.0257 0.0279 0.0303 0.0348 + β[3] -0.0580 0.0786 0.1723 0.2712 0.4634 + β[4] 0.1232 0.1586 0.1774 0.1957 0.2306 + β[5] 0.1326 0.1585 0.1724 0.1866 0.2122 julia> m2_14 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Cauchy(),2.0,30000); - + ,LogisticRegression(),Probit(),Prior_Cauchy(),2.0,30000); julia> m2_14.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.3100 0.2478 0.0025 0.0034 5605.2824 1.0013 ⋯ - β[1] -2.9090 0.3257 0.0033 0.0049 5376.8054 1.0008 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7004.5181 1.0006 ⋯ - β[3] 0.1768 0.1384 0.0014 0.0018 5821.5948 1.0000 ⋯ - β[4] 0.1770 0.0273 0.0003 0.0004 7017.1793 1.0000 ⋯ - β[5] 0.1723 0.0204 0.0002 0.0003 5482.9126 1.0003 ⋯ + λ 0.3083 0.2464 0.0025 0.0030 6527.1307 0.9999 ⋯ + β[1] -2.9164 0.3268 0.0033 0.0054 4001.0802 0.9999 ⋯ + β[2] 0.0279 0.0034 0.0000 0.0000 5853.3102 0.9999 ⋯ + β[3] 0.1778 0.1384 0.0014 0.0017 6653.8555 0.9999 ⋯ + β[4] 0.1766 0.0270 0.0003 0.0004 5995.1423 1.0002 ⋯ + β[5] 0.1727 0.0202 0.0002 0.0003 4212.0545 1.0000 ⋯ 1 column omitted julia> m2_14.quantiles @@ -663,62 +681,88 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.0650 0.1570 0.2446 0.3821 0.9595 - β[1] -3.5631 -3.1231 -2.9068 -2.6885 -2.2846 - β[2] 0.0212 0.0255 0.0278 0.0302 0.0347 - β[3] -0.0695 0.0765 0.1684 0.2695 0.4671 - β[4] 0.1240 0.1585 0.1770 0.1952 0.2313 - β[5] 0.1328 0.1585 0.1720 0.1861 0.2131 - - - + λ 0.0642 0.1553 0.2436 0.3806 0.9498 + β[1] -3.5544 -3.1398 -2.9155 -2.6942 -2.2855 + β[2] 0.0211 0.0256 0.0280 0.0303 0.0348 + β[3] -0.0658 0.0766 0.1706 0.2721 0.4679 + β[4] 0.1239 0.1584 0.1769 0.1946 0.2298 + β[5] 0.1332 0.1592 0.1727 0.1860 0.2127 + julia> m2_15 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Cauchy(),1.0); + ,LogisticRegression(),Cloglog(),Prior_Cauchy(),1.0); julia> m2_15.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.3100 0.2478 0.0025 0.0034 5605.2824 1.0013 ⋯ - β[1] -2.9090 0.3257 0.0033 0.0049 5376.8054 1.0008 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7004.5181 1.0006 ⋯ - β[3] 0.1768 0.1384 0.0014 0.0018 5821.5948 1.0000 ⋯ - β[4] 0.1770 0.0273 0.0003 0.0004 7017.1793 1.0000 ⋯ - β[5] 0.1723 0.0204 0.0002 0.0003 5482.9126 1.0003 ⋯ + λ 0.3077 0.2368 0.0024 0.0031 5706.3441 1.0003 ⋯ + β[1] -2.9128 0.3295 0.0033 0.0042 5051.3410 0.9999 ⋯ + β[2] 0.0279 0.0035 0.0000 0.0000 6723.5109 0.9999 ⋯ + β[3] 0.1785 0.1376 0.0014 0.0017 6301.5177 0.9999 ⋯ + β[4] 0.1772 0.0270 0.0003 0.0003 7536.6524 0.9999 ⋯ + β[5] 0.1724 0.0203 0.0002 0.0002 5509.0935 0.9999 ⋯ 1 column omitted julia> m2_15.quantiles Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.0650 0.1570 0.2446 0.3821 0.9595 - β[1] -3.5631 -3.1231 -2.9068 -2.6885 -2.2846 - β[2] 0.0212 0.0255 0.0278 0.0302 0.0347 - β[3] -0.0695 0.0765 0.1684 0.2695 0.4671 - β[4] 0.1240 0.1585 0.1770 0.1952 0.2313 - β[5] 0.1328 0.1585 0.1720 0.1861 0.2131 + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.0647 0.1573 0.2431 0.3769 0.9521 + β[1] -3.5601 -3.1328 -2.9126 -2.6877 -2.2779 + β[2] 0.0213 0.0255 0.0278 0.0302 0.0349 + β[3] -0.0625 0.0791 0.1714 0.2699 0.4611 + β[4] 0.1243 0.1590 0.1768 0.1952 0.2306 + β[5] 0.1329 0.1587 0.1723 0.1860 0.2117 + +julia> m2_16 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout + ,LogisticRegression(),Cauchit(),Prior_Cauchy(),1.0); + +julia> m2_16.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.3049 0.2370 0.0024 0.0029 5684.3206 1.0000 ⋯ + β[1] -2.9134 0.3314 0.0033 0.0046 5120.8859 1.0000 ⋯ + β[2] 0.0279 0.0034 0.0000 0.0000 6977.9876 0.9999 ⋯ + β[3] 0.1752 0.1372 0.0014 0.0015 6830.2857 1.0000 ⋯ + β[4] 0.1770 0.0267 0.0003 0.0003 8103.9753 0.9999 ⋯ + β[5] 0.1726 0.0204 0.0002 0.0003 5655.1416 0.9999 ⋯ + 1 column omitted + +julia> m2_16.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 0.0667 0.1556 0.2404 0.3788 0.9164 + β[1] -3.5774 -3.1275 -2.9059 -2.6959 -2.2603 + β[2] 0.0212 0.0255 0.0279 0.0303 0.0347 + β[3] -0.0675 0.0764 0.1688 0.2667 0.4589 + β[4] 0.1254 0.1584 0.1769 0.1950 0.2298 + β[5] 0.1331 0.1588 0.1724 0.1864 0.2138 + ``` **Logistic Regression - with T-Dist Prior** ```jldoctest examples julia> m2_17 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_TDist(),1.0); - + ,LogisticRegression(),Logit(),Prior_TDist(),1.0); julia> m2_17.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ - ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ - β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ - β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ - β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ - β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + λ 0.5289 0.3511 0.0035 0.0056 4448.5392 0.9999 ⋯ + ν 2.2155 10.7265 0.1073 0.1899 3067.8091 1.0003 ⋯ + β[1] -2.9351 0.3287 0.0033 0.0040 5364.8317 1.0002 ⋯ + β[2] 0.0278 0.0035 0.0000 0.0000 7461.5560 0.9999 ⋯ + β[3] 0.2065 0.1429 0.0014 0.0017 7208.6730 0.9999 ⋯ + β[4] 0.1768 0.0278 0.0003 0.0003 7523.2925 0.9999 ⋯ + β[5] 0.1727 0.0206 0.0002 0.0002 5830.2383 1.0002 ⋯ 1 column omitted julia> m2_17.quantiles @@ -726,61 +770,59 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.1714 0.3041 0.4354 0.6468 1.5183 - ν 0.3738 0.7694 1.1794 1.8559 7.8037 - β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 - β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 - β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 - β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 - β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + λ 0.1780 0.3040 0.4273 0.6348 1.4809 + ν 0.3755 0.7694 1.1673 1.8724 7.4220 + β[1] -3.5790 -3.1586 -2.9311 -2.7106 -2.3021 + β[2] 0.0210 0.0255 0.0278 0.0302 0.0350 + β[3] -0.0652 0.1084 0.2039 0.2996 0.4975 + β[4] 0.1230 0.1579 0.1765 0.1955 0.2328 + β[5] 0.1325 0.1587 0.1727 0.1864 0.2135 julia> m2_18 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_TDist(),1.0); - + ,LogisticRegression(),Probit(),Prior_TDist(),1.0); julia> m2_18.summaries Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ - ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ - β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ - β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ - β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ - β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + parameters mean std naive_se mcse ess rhat e ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 4.8118 0.0029 0.0000 0.0003 21.0934 1.3204 ⋯ + ν 3.7561 0.0016 0.0000 0.0002 22.4348 1.1310 ⋯ + β[1] -22.6598 0.0004 0.0000 0.0000 22.2429 1.2323 ⋯ + β[2] 0.8228 0.0000 0.0000 0.0000 66.1383 1.1389 ⋯ + β[3] -2.0240 0.0004 0.0000 0.0000 21.0386 2.6936 ⋯ + β[4] -8.7439 0.0027 0.0000 0.0003 20.2520 2.8354 ⋯ + β[5] -6.9804 0.0062 0.0001 0.0006 20.2317 2.7320 ⋯ 1 column omitted julia> m2_18.quantiles Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.1714 0.3041 0.4354 0.6468 1.5183 - ν 0.3738 0.7694 1.1794 1.8559 7.8037 - β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 - β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 - β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 - β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 - β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + λ 4.8048 4.8110 4.8128 4.8138 4.8151 + ν 3.7527 3.7552 3.7562 3.7574 3.7584 + β[1] -22.6608 -22.6600 -22.6598 -22.6596 -22.6592 + β[2] 0.8228 0.8228 0.8228 0.8228 0.8229 + β[3] -2.0247 -2.0243 -2.0240 -2.0237 -2.0234 + β[4] -8.7480 -8.7470 -8.7436 -8.7415 -8.7392 + β[5] -6.9900 -6.9861 -6.9801 -6.9747 -6.9706 julia> m2_19 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_TDist(),1.0); - + ,LogisticRegression(),Cloglog(),Prior_TDist(),1.0); julia> m2_19.summaries Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ - ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ - β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ - β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ - β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ - β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + parameters mean std naive_se mcse ess rhat es ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.7808 0.4602 0.0046 0.0453 23.5174 1.2619 ⋯ + ν 3.7505 4.1940 0.0419 0.4159 21.9030 1.2467 ⋯ + β[1] -2.7421 0.3762 0.0038 0.0363 26.8105 1.1348 ⋯ + β[2] 0.0415 0.0723 0.0007 0.0072 27.8089 1.0384 ⋯ + β[3] 0.1168 0.4182 0.0042 0.0408 26.9065 1.0372 ⋯ + β[4] -0.0110 0.8895 0.0089 0.0882 27.0833 1.0456 ⋯ + β[5] 0.1890 0.1184 0.0012 0.0112 29.1688 1.0274 ⋯ 1 column omitted julia> m2_19.quantiles @@ -788,30 +830,29 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.1714 0.3041 0.4354 0.6468 1.5183 - ν 0.3738 0.7694 1.1794 1.8559 7.8037 - β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 - β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 - β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 - β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 - β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + λ 0.2057 0.4125 0.6491 1.1041 1.8468 + ν 0.6845 1.3588 2.1210 3.7455 17.0191 + β[1] -3.3513 -3.0447 -2.7642 -2.4708 -2.0116 + β[2] 0.0195 0.0242 0.0269 0.0295 0.3964 + β[3] -1.6313 0.1010 0.2012 0.2958 0.4656 + β[4] -4.3298 0.1543 0.1750 0.1952 0.2313 + β[5] 0.1229 0.1497 0.1663 0.1821 0.6713 julia> m2_20 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_TDist(),1.0); - + ,LogisticRegression(),Cauchit(),Prior_TDist(),1.0); julia> m2_20.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - λ 0.5359 0.3600 0.0036 0.0052 4597.0424 0.9999 ⋯ - ν 2.3246 15.5231 0.1552 0.2815 3060.4891 1.0002 ⋯ - β[1] -2.9360 0.3329 0.0033 0.0051 5185.3889 0.9999 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 7333.0907 1.0000 ⋯ - β[3] 0.2057 0.1420 0.0014 0.0015 8778.7328 0.9999 ⋯ - β[4] 0.1770 0.0274 0.0003 0.0003 6953.1952 0.9999 ⋯ - β[5] 0.1726 0.0205 0.0002 0.0003 5407.3816 0.9999 ⋯ + λ 0.5314 0.3468 0.0035 0.0044 4487.2447 1.0004 ⋯ + ν 1.9974 5.5248 0.0552 0.0980 3013.3594 0.9999 ⋯ + β[1] -2.9315 0.3305 0.0033 0.0040 5717.6278 1.0002 ⋯ + β[2] 0.0278 0.0035 0.0000 0.0000 7735.5418 1.0001 ⋯ + β[3] 0.2045 0.1403 0.0014 0.0016 8011.0127 1.0006 ⋯ + β[4] 0.1772 0.0273 0.0003 0.0003 7454.0591 0.9999 ⋯ + β[5] 0.1724 0.0206 0.0002 0.0003 5544.3491 1.0011 ⋯ 1 column omitted julia> m2_20.quantiles @@ -819,33 +860,32 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - λ 0.1714 0.3041 0.4354 0.6468 1.5183 - ν 0.3738 0.7694 1.1794 1.8559 7.8037 - β[1] -3.5985 -3.1605 -2.9304 -2.7117 -2.2988 - β[2] 0.0210 0.0254 0.0278 0.0303 0.0348 - β[3] -0.0641 0.1088 0.2014 0.3020 0.4906 - β[4] 0.1240 0.1583 0.1769 0.1955 0.2313 - β[5] 0.1331 0.1587 0.1725 0.1865 0.2134 + λ 0.1763 0.3082 0.4331 0.6374 1.4639 + ν 0.3746 0.7738 1.1719 1.8817 7.2365 + β[1] -3.5830 -3.1509 -2.9295 -2.7051 -2.2834 + β[2] 0.0210 0.0255 0.0278 0.0302 0.0345 + β[3] -0.0670 0.1086 0.2025 0.2996 0.4815 + β[4] 0.1252 0.1586 0.1770 0.1952 0.2317 + β[5] 0.1327 0.1586 0.1724 0.1863 0.2136 ``` **Logistic Regression - with Uniform Prior** ```jldoctest examples julia> m2_21 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Uniform(),1.0); - + ,LogisticRegression(),Logit(),Prior_Uniform(),1.0); julia> m2_21.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ - β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ - β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ - β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ - β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + v 13.7852 180.2962 1.8030 2.6824 4217.9790 1.0006 ⋯ + β[1] -0.6624 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0103 0.0023 0.0000 0.0000 8889.1797 0.9999 ⋯ + β[3] -0.0034 0.1519 0.0015 0.0019 7495.4943 0.9999 ⋯ + β[4] 0.1710 0.0259 0.0003 0.0003 7518.2286 1.0000 ⋯ + β[5] 0.0637 0.0126 0.0001 0.0002 6785.0123 1.0000 ⋯ 1 column omitted julia> m2_21.quantiles @@ -853,29 +893,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - v 1.3158 2.2473 3.8181 8.1392 74.7266 - β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 - β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 - β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 - β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 - β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 + v 0.8333 1.5379 2.6688 6.2389 61.2451 + β[1] -0.6624 -0.6624 -0.6624 -0.6624 -0.6624 + β[2] 0.0058 0.0087 0.0103 0.0118 0.0147 + β[3] -0.3015 -0.1034 -0.0040 0.0998 0.2916 + β[4] 0.1211 0.1531 0.1708 0.1884 0.2225 + β[5] 0.0383 0.0554 0.0639 0.0722 0.0882 julia> m2_22 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Uniform(),1.0); - - + ,LogisticRegression(),Probit(),Prior_Uniform(),1.0); julia> m2_22.summaries Summary Statistics parameters mean std naive_se mcse ess rhat ⋯ Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ - β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ - β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ - β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ - β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + v 15.3310 222.0068 2.2201 4.0557 3034.3611 1.0003 ⋯ + β[1] -0.3434 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0099 0.0021 0.0000 0.0000 9458.2021 0.9999 ⋯ + β[3] -0.3434 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[4] 0.1789 0.0266 0.0003 0.0003 8784.2763 0.9999 ⋯ + β[5] 0.0608 0.0117 0.0001 0.0001 9076.6331 0.9999 ⋯ 1 column omitted julia> m2_22.quantiles @@ -883,28 +921,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - v 1.3158 2.2473 3.8181 8.1392 74.7266 - β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 - β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 - β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 - β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 - β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 + v 0.5648 1.1828 2.2363 5.0195 54.2485 + β[1] -0.3434 -0.3434 -0.3434 -0.3434 -0.3434 + β[2] 0.0057 0.0085 0.0099 0.0113 0.0141 + β[3] -0.3434 -0.3434 -0.3434 -0.3434 -0.3434 + β[4] 0.1281 0.1610 0.1784 0.1968 0.2315 + β[5] 0.0378 0.0529 0.0609 0.0689 0.0832 julia> m2_23 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Uniform(),1.0); - + ,LogisticRegression(),Cloglog(),Prior_Uniform(),1.0); julia> m2_23.summaries Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ - β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ - β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ - β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ - β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + v 7.3458 35.8188 0.3582 0.5261 4241.8175 0.9999 ⋯ + β[1] -0.3274 0.0000 0.0000 0.0000 NaN NaN ⋯ + β[2] 0.0097 0.0021 0.0000 0.0000 9364.4462 0.9999 ⋯ + β[3] -0.3274 0.0000 0.0000 0.0000 NaN NaN ⋯ + β[4] 0.1791 0.0261 0.0003 0.0002 9621.3881 0.9999 ⋯ + β[5] 0.0590 0.0113 0.0001 0.0001 9168.4454 0.9999 ⋯ 1 column omitted julia> m2_23.quantiles @@ -912,29 +949,27 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - v 1.3158 2.2473 3.8181 8.1392 74.7266 - β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 - β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 - β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 - β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 - β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 - + v 0.5398 1.1335 2.0818 4.7264 43.0439 + β[1] -0.3274 -0.3274 -0.3274 -0.3274 -0.3274 + β[2] 0.0055 0.0083 0.0097 0.0111 0.0137 + β[3] -0.3274 -0.3274 -0.3274 -0.3274 -0.3274 + β[4] 0.1298 0.1612 0.1788 0.1962 0.2318 + β[5] 0.0370 0.0515 0.0590 0.0665 0.0812 julia> m2_24 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Uniform(),1.0); - + ,LogisticRegression(),Cauchit(),Prior_Uniform(),1.0); julia> m2_24.summaries Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 14.9507 144.9283 1.4493 2.1998 4323.3028 1.0006 ⋯ - β[1] -1.0886 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0135 0.0023 0.0000 0.0000 8486.4851 1.0001 ⋯ - β[3] 0.0494 0.1478 0.0015 0.0017 6222.2810 0.9999 ⋯ - β[4] 0.1719 0.0256 0.0003 0.0003 7382.3021 1.0000 ⋯ - β[5] 0.0832 0.0124 0.0001 0.0002 6599.2571 0.9999 ⋯ + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 50.7212 2454.3435 24.5434 39.2977 4005.7567 1.0001 ⋯ + β[1] -0.5073 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[2] 0.0123 0.0021 0.0000 0.0000 9294.7660 0.9999 ⋯ + β[3] -0.5073 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ + β[4] 0.1826 0.0263 0.0003 0.0003 8350.3257 0.9999 ⋯ + β[5] 0.0761 0.0116 0.0001 0.0001 8318.5904 1.0000 ⋯ 1 column omitted julia> m2_24.quantiles @@ -942,149 +977,429 @@ Quantiles parameters 2.5% 25.0% 50.0% 75.0% 97.5% Symbol Float64 Float64 Float64 Float64 Float64 - v 1.3158 2.2473 3.8181 8.1392 74.7266 - β[1] -1.0886 -1.0886 -1.0886 -1.0886 -1.0886 - β[2] 0.0090 0.0119 0.0134 0.0150 0.0181 - β[3] -0.2424 -0.0513 0.0513 0.1512 0.3315 - β[4] 0.1221 0.1546 0.1715 0.1890 0.2228 - β[5] 0.0591 0.0750 0.0833 0.0914 0.1076 + v 0.7552 1.5016 2.7477 6.3105 68.6481 + β[1] -0.5073 -0.5073 -0.5073 -0.5073 -0.5073 + β[2] 0.0082 0.0109 0.0123 0.0137 0.0165 + β[3] -0.5073 -0.5073 -0.5073 -0.5073 -0.5073 + β[4] 0.1307 0.1651 0.1824 0.1998 0.2345 + β[5] 0.0533 0.0682 0.0760 0.0838 0.0985 + ``` ## Example 3: Poisson Regression **Poisson Regression - Likelihood analysis** ```jldoctest examples - sanction = dataset("Zelig", "sanction") - -m3_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression()); - -m3_1.fit - -m3_1.LogLike - -m3_1.AIC - -m3_1.BIC +78×8 DataFrame + Row │ Mil Coop Target Import Export Cost Num NCost + │ Int32 Int32 Int32 Int32 Int32 Int32 Int32 Cat… +─────┼─────────────────────────────────────────────────────────────────── + 1 │ 1 4 3 1 1 4 15 major loss + 2 │ 0 2 3 0 1 3 4 modest loss + 3 │ 0 1 3 1 0 2 1 little effect + 4 │ 1 1 3 1 1 2 1 little effect + 5 │ 0 1 3 1 1 2 1 little effect + 6 │ 0 1 3 0 1 2 1 little effect + 7 │ 1 2 2 0 1 2 3 little effect + 8 │ 0 1 3 0 0 2 3 little effect + ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ + 72 │ 0 2 2 0 0 1 8 net gain + 73 │ 1 3 1 1 1 2 14 little effect + 74 │ 0 2 1 0 0 1 2 net gain + 75 │ 0 1 3 0 1 2 1 little effect + 76 │ 0 4 3 1 0 2 13 little effect + 77 │ 0 1 2 0 0 1 1 net gain + 78 │ 1 3 1 1 1 2 10 little effect + 63 rows omitted + +julia> m3_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression()); + +julia> m3_1.fit +───────────────────────────────────────────────────────────────────────────────── + Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% +───────────────────────────────────────────────────────────────────────────────── +(Intercept) -1.91392 0.261667 -7.31 <1e-12 -2.42678 -1.40106 +Target 0.157769 0.0653822 2.41 0.0158 0.0296218 0.285915 +Coop 1.15127 0.0561861 20.49 <1e-92 1.04114 1.26139 +NCost: major loss -0.324051 0.230055 -1.41 0.1590 -0.774951 0.126848 +NCost: modest loss 1.71973 0.100518 17.11 <1e-64 1.52272 1.91674 +NCost: net gain 0.463907 0.16992 2.73 0.0063 0.13087 0.796944 +───────────────────────────────────────────────────────────────────────────────── + +julia> m3_1.LogLike +-284.33693448347356 + +julia> m3_1.AIC +580.6738689669471 + +julia> m3_1.BIC +594.8141219270847 ``` **Poisson Regression with Ridge Prior** ```jldoctest examples +julia> m3_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Ridge()); + +julia> m3_2.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ -m3_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Ridge()) + λ 1.3047 0.4939 0.0049 0.0080 4614.5363 1.0003 ⋯ + α -1.7955 0.2534 0.0025 0.0039 4454.7752 1.0000 ⋯ + β[1] 0.1390 0.0654 0.0007 0.0008 6391.9793 0.9999 ⋯ + β[2] 1.1322 0.0556 0.0006 0.0008 5857.7599 1.0002 ⋯ + β[3] -0.3259 0.2278 0.0023 0.0028 7027.6031 1.0000 ⋯ + β[4] 1.6974 0.0996 0.0010 0.0012 7459.8841 0.9999 ⋯ + β[5] 0.4036 0.1676 0.0017 0.0024 6099.1495 1.0001 ⋯ + 1 column omitted -m3_2.summaries +julia> m3_2.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 -m3_2.quantiles + λ 0.7118 0.9816 1.2004 1.4923 2.5232 + α -2.2925 -1.9675 -1.7963 -1.6226 -1.2942 + β[1] 0.0123 0.0950 0.1384 0.1840 0.2668 + β[2] 1.0240 1.0943 1.1326 1.1705 1.2402 + β[3] -0.7938 -0.4734 -0.3194 -0.1702 0.1013 + β[4] 1.5019 1.6310 1.6971 1.7643 1.8944 + β[5] 0.0683 0.2928 0.4049 0.5185 0.7256 ``` **Poisson Regression with Laplace Prior** ```jldoctest examples +julia> m3_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Laplace()); -m3_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Laplace()); +julia> m3_3.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ -m3_3.summaries + λ 1.0855 0.5268 0.0053 0.0077 5271.8825 1.0005 ⋯ + α -1.7913 0.2674 0.0027 0.0040 4231.1524 1.0000 ⋯ + β[1] 0.1358 0.0662 0.0007 0.0009 5785.6833 1.0000 ⋯ + β[2] 1.1320 0.0567 0.0006 0.0008 5446.3525 0.9999 ⋯ + β[3] -0.2906 0.2195 0.0022 0.0026 7570.4683 0.9999 ⋯ + β[4] 1.7025 0.0998 0.0010 0.0011 7436.8894 1.0000 ⋯ + β[5] 0.3949 0.1730 0.0017 0.0021 6648.4939 1.0001 ⋯ + 1 column omitted + +julia> m3_3.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 -m3_3.quantiles + λ 0.4543 0.7290 0.9624 1.2917 2.4373 + α -2.3164 -1.9738 -1.7953 -1.6079 -1.2586 + β[1] 0.0048 0.0922 0.1364 0.1796 0.2644 + β[2] 1.0202 1.0935 1.1321 1.1708 1.2427 + β[3] -0.7434 -0.4336 -0.2820 -0.1369 0.1038 + β[4] 1.5084 1.6358 1.7020 1.7679 1.8996 + β[5] 0.0503 0.2775 0.3959 0.5131 0.7315 ``` - **Poisson Regression with Cauchy Prior** +**Poisson Regression with Cauchy Prior** ```jldoctest examples -m3_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Cauchy()); +julia> m3_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Cauchy()); -m3_4.summaries +julia> m3_4.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.8456 0.4396 0.0044 0.0047 6775.8311 1.0001 ⋯ + α -1.7969 0.2640 0.0026 0.0037 5127.9411 0.9999 ⋯ + β[1] 0.1388 0.0654 0.0007 0.0007 7076.7634 0.9999 ⋯ + β[2] 1.1314 0.0562 0.0006 0.0008 6216.6410 0.9999 ⋯ + β[3] -0.2934 0.2153 0.0022 0.0026 6756.2170 0.9999 ⋯ + β[4] 1.7055 0.0983 0.0010 0.0011 7390.4902 0.9999 ⋯ + β[5] 0.3928 0.1675 0.0017 0.0019 6509.6692 0.9999 ⋯ + 1 column omitted + +julia> m3_4.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 -m3_4.quantiles + λ 0.2957 0.5448 0.7464 1.0367 1.9870 + α -2.3106 -1.9751 -1.7946 -1.6238 -1.2737 + β[1] 0.0120 0.0944 0.1382 0.1830 0.2674 + β[2] 1.0237 1.0934 1.1317 1.1692 1.2417 + β[3] -0.7479 -0.4322 -0.2864 -0.1464 0.1089 + β[4] 1.5156 1.6389 1.7055 1.7721 1.8945 + β[5] 0.0585 0.2811 0.3952 0.5066 0.7126 ``` **Poisson Regression with TDist Prior** ```jldoctest examples -m3_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_TDist()); +julia> m3_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_TDist()); -m3_5.summaries +julia> m3_5.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 0.9972 0.4285 0.0043 0.0047 8603.3650 0.9999 ⋯ + ν 2.9413 5.2859 0.0529 0.0916 3402.3197 1.0000 ⋯ + α -1.8112 0.2604 0.0026 0.0031 5515.2271 0.9999 ⋯ + β[1] 0.1410 0.0655 0.0007 0.0007 7259.2984 0.9999 ⋯ + β[2] 1.1344 0.0557 0.0006 0.0006 7189.8249 0.9999 ⋯ + β[3] -0.3121 0.2224 0.0022 0.0026 7858.4358 0.9999 ⋯ + β[4] 1.7029 0.0998 0.0010 0.0011 7737.1039 1.0000 ⋯ + β[5] 0.4039 0.1692 0.0017 0.0019 6815.5865 1.0004 ⋯ + 1 column omitted + +julia> m3_5.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 -m3_5.quantiles + λ 0.3985 0.7021 0.9209 1.2049 2.0244 + ν 0.5626 1.1544 1.8296 3.0831 12.0174 + α -2.3161 -1.9881 -1.8143 -1.6302 -1.3049 + β[1] 0.0134 0.0961 0.1410 0.1859 0.2687 + β[2] 1.0236 1.0970 1.1348 1.1720 1.2409 + β[3] -0.7690 -0.4572 -0.3059 -0.1591 0.1062 + β[4] 1.5070 1.6357 1.7014 1.7695 1.8992 + β[5] 0.0714 0.2891 0.4055 0.5202 0.7335 ``` **Poisson Regression with Uniform Prior** ```jldoctest examples -m3_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Uniform()); +julia> m3_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Uniform()); -m3_6.summaries +julia> m3_6.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 31.7237 269.4096 2.6941 4.6703 3141.7386 1.0002 ⋯ + α -1.6312 0.0000 0.0000 0.0000 NaN NaN ⋯ + β[1] 0.1113 0.0484 0.0005 0.0005 7517.0556 0.9999 ⋯ + β[2] 1.1174 0.0351 0.0004 0.0004 7513.8645 1.0000 ⋯ + β[3] -0.3724 0.2208 0.0022 0.0020 10488.8748 0.9999 ⋯ + β[4] 1.6312 0.0000 0.0000 0.0000 NaN NaN ⋯ + β[5] 0.3333 0.1345 0.0013 0.0012 10310.8481 1.0000 ⋯ + 1 column omitted -m3_6.quantiles +julia> m3_6.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 2.3312 4.1421 7.0190 15.1146 160.2559 + α -1.6312 -1.6312 -1.6312 -1.6312 -1.6312 + β[1] 0.0177 0.0783 0.1108 0.1441 0.2055 + β[2] 1.0485 1.0932 1.1182 1.1415 1.1844 + β[3] -0.8226 -0.5151 -0.3640 -0.2234 0.0370 + β[4] 1.6312 1.6312 1.6312 1.6312 1.6312 + β[5] 0.0619 0.2449 0.3343 0.4241 0.5893 ``` + ## Example 4: Negative Binomial Regression -**Negative Binomial Regression - Likelihood method** ```jldoctest examples -m4_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction, NegBinomRegression()); - -m4_1.fit +julia> sanction = dataset("Zelig", "sanction") +78×8 DataFrame + Row │ Mil Coop Target Import Export Cost Num NCost + │ Int32 Int32 Int32 Int32 Int32 Int32 Int32 Cat… +─────┼─────────────────────────────────────────────────────────────────── + 1 │ 1 4 3 1 1 4 15 major loss + 2 │ 0 2 3 0 1 3 4 modest loss + 3 │ 0 1 3 1 0 2 1 little effect + 4 │ 1 1 3 1 1 2 1 little effect + 5 │ 0 1 3 1 1 2 1 little effect + 6 │ 0 1 3 0 1 2 1 little effect + 7 │ 1 2 2 0 1 2 3 little effect + 8 │ 0 1 3 0 0 2 3 little effect + ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ + 72 │ 0 2 2 0 0 1 8 net gain + 73 │ 1 3 1 1 1 2 14 little effect + 74 │ 0 2 1 0 0 1 2 net gain + 75 │ 0 1 3 0 1 2 1 little effect + 76 │ 0 4 3 1 0 2 13 little effect + 77 │ 0 1 2 0 0 1 1 net gain + 78 │ 1 3 1 1 1 2 10 little effect + 63 rows omitted -m4_1.AIC +``` -m4_1.BIC +**Negative Binomial Regression - Likelihood method** +```jldoctest examples +julia> m4_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression()); + +julia> m4_1.fit +───────────────────────────────────────────────────────────────────────────────── + Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% +───────────────────────────────────────────────────────────────────────────────── +(Intercept) -1.10939 0.459677 -2.41 0.0158 -2.01034 -0.208444 +Target 0.0117398 0.142779 0.08 0.9345 -0.268101 0.291581 +Coop 1.0506 0.111556 9.42 <1e-20 0.831949 1.26924 +NCost: major loss -0.204244 0.508156 -0.40 0.6877 -1.20021 0.791723 +NCost: modest loss 1.27142 0.290427 4.38 <1e-04 0.702197 1.84065 +NCost: net gain 0.176797 0.254291 0.70 0.4869 -0.321604 0.675197 +───────────────────────────────────────────────────────────────────────────────── + +julia> m4_1.AIC +363.85804286542685 + +julia> m4_1.BIC +377.9982958255644 -m4_1.lambda_hat ``` **NegativeBinomial Regression with Ridge Prior** ```jldoctest examples +julia> m4_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Ridge()); -m4_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Ridge()); +julia> m4_2.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ -m4_2.summaries + λ 2.0413 0.4480 0.0045 0.0051 8307.4837 0.9999 ⋯ + α -1.0893 0.5193 0.0052 0.0087 3899.6757 0.9999 ⋯ + β[1] -0.0048 0.1616 0.0016 0.0023 5386.0221 0.9999 ⋯ + β[2] 1.0642 0.1311 0.0013 0.0018 5604.7062 0.9999 ⋯ + β[3] -0.1729 0.5552 0.0056 0.0058 8708.3320 1.0000 ⋯ + β[4] 1.2807 0.3178 0.0032 0.0034 8557.2897 0.9999 ⋯ + β[5] 0.1556 0.2840 0.0028 0.0036 6126.1145 0.9999 ⋯ + 1 column omitted + +julia> m4_2.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 -m4_2.quantiles + λ 1.3137 1.7227 1.9925 2.3068 3.0644 + α -2.1030 -1.4363 -1.0925 -0.7387 -0.0708 + β[1] -0.3276 -0.1114 -0.0035 0.1031 0.3128 + β[2] 0.8102 0.9757 1.0640 1.1518 1.3243 + β[3] -1.2103 -0.5564 -0.1824 0.1904 0.9515 + β[4] 0.6475 1.0692 1.2835 1.4937 1.9101 + β[5] -0.3947 -0.0331 0.1557 0.3436 0.7122 ``` **NegativeBinomial Regression with Laplace Prior** ```jldoctest examples -m4_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Laplace()); +julia> m4_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Laplace()); -m4_3.summaries +julia> m4_3.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 2.1062 0.4582 0.0046 0.0057 5683.7921 1.0001 ⋯ + α -1.0101 0.5154 0.0052 0.0084 3795.0884 1.0002 ⋯ + β[1] -0.0204 0.1590 0.0016 0.0022 5324.1385 1.0001 ⋯ + β[2] 1.0495 0.1329 0.0013 0.0018 4926.4231 1.0002 ⋯ + β[3] -0.1473 0.5059 0.0051 0.0054 7542.6944 1.0000 ⋯ + β[4] 1.2814 0.3226 0.0032 0.0038 6496.8709 1.0000 ⋯ + β[5] 0.1210 0.2777 0.0028 0.0039 6228.0313 1.0000 ⋯ + 1 column omitted + +julia> m4_3.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 -m4_3.quantiles + λ 1.3411 1.7873 2.0594 2.3793 3.1542 + α -2.0197 -1.3645 -1.0063 -0.6555 -0.0143 + β[1] -0.3337 -0.1265 -0.0194 0.0874 0.2908 + β[2] 0.7878 0.9605 1.0493 1.1380 1.3108 + β[3] -1.1584 -0.4804 -0.1489 0.1745 0.8704 + β[4] 0.6527 1.0626 1.2827 1.4935 1.9379 + β[5] -0.4316 -0.0628 0.1198 0.3065 0.6654 ``` - **Negative Binomial Regression with Cauchy Prior** +**Negative Binomial Regression with Cauchy Prior** ```jldoctest examples -m4_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Cauchy()); +m4_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Cauchy()) m4_4.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + λ 2.0321 0.4414 0.0044 0.0046 7966.3509 1.0000 ⋯ + α -1.0429 0.5163 0.0052 0.0092 3481.2902 0.9999 ⋯ + β[1] -0.0115 0.1626 0.0016 0.0024 4862.1372 0.9999 ⋯ + β[2] 1.0545 0.1318 0.0013 0.0019 5091.0562 0.9999 ⋯ + β[3] -0.1563 0.5484 0.0055 0.0058 7173.1471 0.9999 ⋯ + β[4] 1.2755 0.3303 0.0033 0.0036 7331.3107 1.0000 ⋯ + β[5] 0.1370 0.2782 0.0028 0.0038 5766.5502 0.9999 ⋯ + 1 column omitted m4_4.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + λ 1.3087 1.7197 1.9826 2.2938 3.0432 + α -2.0846 -1.3881 -1.0360 -0.6882 -0.0508 + β[1] -0.3277 -0.1201 -0.0135 0.1004 0.3104 + β[2] 0.8001 0.9644 1.0524 1.1424 1.3192 + β[3] -1.1840 -0.5340 -0.1662 0.1949 0.9594 + β[4] 0.6424 1.0565 1.2674 1.4899 1.9446 + β[5] -0.4143 -0.0500 0.1407 0.3254 0.6775 ``` **Negative Binomial Regression with TDist Prior** ```jldoctest examples -m4_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_TDist()); +julia> m4_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_TDist()); +ERROR: DomainError with Dual{ForwardDiff.Tag{Turing.Essential.var"#f#4"{DynamicPPL.TypedVarInfo{NamedTuple{(:λ, :ν, :α, :β), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:λ, Setfield.IdentityLens}, Int64}, Vector{Distributions.InverseGamma{Float64}}, Vector{AbstractPPL.VarName{:λ, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ν, Setfield.IdentityLens}, Int64}, Vector{Distributions.InverseGamma{Float64}}, Vector{AbstractPPL.VarName{:ν, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, Setfield.IdentityLens}, Int64}, Vector{Distributions.LocationScale{Float64, Distributions.Continuous, Distributions.TDist{Float64}}}, Vector{AbstractPPL.VarName{:α, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, Setfield.IdentityLens}, Int64}, Vector{Distributions.Product{Distributions.Continuous, Distributions.LocationScale{Float64, Distributions.Continuous, Distributions.TDist{Float64}}, FillArrays.Fill{Distributions.LocationScale{Float64, Distributions.Continuous, Distributions.TDist{Float64}}, 1, Tuple{Base.OneTo{Int64}}}}}, Vector{AbstractPPL.VarName{:β, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{CRRao.var"#NegBinomReg#19"{Float64}, (:X, :y), (), (), Tuple{Matrix{Float64}, Vector{Int32}}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.Sampler{Turing.Inference.NUTS{Turing.Essential.ForwardDiffAD{0}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}}(0.0,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN): +NegativeBinomial: the condition zero(p) < p <= one(p) is not satisfied. -m4_5.summaries +julia> m4_5.summaries +ERROR: UndefVarError: m4_5 not defined -m4_5.quantiles +julia> m4_5.quantiles +ERROR: UndefVarError: m4_5 not defined ``` **Negative Binomial Regression with Uniform Prior** ```jldoctest examples -m4_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Uniform(),1.0); +julia> m4_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Uniform(),1.0); -m4_6.summaries +julia> m4_6.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ -m4_6.quantiles + λ 3.2065 0.4036 0.0040 0.0054 4236.5829 1.0011 ⋯ + α -2.5288 0.0000 0.0000 0.0000 NaN NaN ⋯ + β[1] 0.3328 0.0984 0.0010 0.0027 204.3503 1.0604 ⋯ + β[2] 1.3577 0.0936 0.0009 0.0037 85.4193 1.1485 ⋯ + β[3] -1.5072 1.0686 0.0107 0.0999 21.9705 3.2882 ⋯ + β[4] 1.0054 0.3172 0.0032 0.0167 49.1963 1.3045 ⋯ + β[5] 0.5403 0.2348 0.0023 0.0097 75.8215 1.1720 ⋯ + 1 column omitted -``` +julia> m4_6.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + λ 2.6474 2.9139 3.1328 3.4106 4.1948 + α -2.5288 -2.5288 -2.5288 -2.5288 -2.5288 + β[1] 0.1367 0.2674 0.3346 0.3987 0.5259 + β[2] 1.1746 1.2938 1.3580 1.4202 1.5452 + β[3] -2.5288 -2.5288 -1.5590 -0.5322 0.3807 + β[4] 0.3983 0.7860 1.0072 1.2188 1.6254 + β[5] 0.0954 0.3776 0.5400 0.6997 1.0010 +``` \ No newline at end of file From acc9ee0346fad16a22ecd4e80d90d862854a211e Mon Sep 17 00:00:00 2001 From: Siddhant Chaudhary Date: Thu, 19 May 2022 18:44:07 +0200 Subject: [PATCH 5/9] Only keeping tests for two models. --- docs/src/examples.md | 1236 ------------------------------------------ 1 file changed, 1236 deletions(-) diff --git a/docs/src/examples.md b/docs/src/examples.md index 9dab6a2..293acb3 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -166,1240 +166,4 @@ Quantiles β[2] -4.5039 -3.3021 -2.6899 -2.0502 -0.6440 β[3] -0.2071 0.9672 1.5988 2.2439 3.7647 -``` - - - **Linear Regression - Laplace Prior** - -```jldoctest examples -julia> m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()); - -julia> m1_3.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 4.3182 3.3442 0.0334 0.0490 3968.7204 0.9999 ⋯ - σ 2.6657 0.3792 0.0038 0.0056 4690.6502 1.0000 ⋯ - α 29.0672 5.1669 0.0517 0.0918 3367.7350 1.0001 ⋯ - β[1] -0.0399 0.0105 0.0001 0.0002 4116.7824 1.0005 ⋯ - β[2] -2.7069 0.9341 0.0093 0.0170 3286.4012 1.0002 ⋯ - β[3] 1.5082 0.9373 0.0094 0.0163 3601.6346 1.0002 ⋯ - 1 column omitted - -julia> m1_3.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - v 1.2224 2.3903 3.4576 5.1836 12.4068 - σ 2.0359 2.3975 2.6246 2.8949 3.5178 - α 18.0169 25.8440 29.3179 32.5968 38.4400 - β[1] -0.0609 -0.0466 -0.0396 -0.0330 -0.0199 - β[2] -4.4455 -3.3403 -2.7409 -2.1152 -0.7174 - β[3] -0.2418 0.8759 1.4648 2.1019 3.4821 - -``` - - **Linear Regression - Cauchy Prior** -```jldoctest examples -julia> m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000); - -julia> m1_4.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - σ 2.5855 0.3416 0.0024 0.0036 9218.6691 1.0001 ⋯ - α 30.3875 4.6394 0.0328 0.0678 4559.8857 1.0001 ⋯ - β[1] -0.0394 0.0099 0.0001 0.0001 7652.1219 1.0000 ⋯ - β[2] -2.8435 0.8542 0.0060 0.0116 4998.6993 1.0001 ⋯ - β[3] 1.2513 0.8428 0.0060 0.0120 5011.2306 1.0000 ⋯ - 1 column omitted - -julia> m1_4.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - σ 2.0120 2.3452 2.5484 2.7877 3.3675 - α 20.9628 27.4262 30.4704 33.5255 39.1492 - β[1] -0.0591 -0.0459 -0.0393 -0.0328 -0.0201 - β[2] -4.4925 -3.4133 -2.8494 -2.2897 -1.1335 - β[3] -0.3345 0.6759 1.2385 1.7936 2.9585 - -``` - - **Linear Regression - T-Distributed Prior** - -```jldoctest examples -julia> m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); - -julia> m1_5.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - ν 1.0539 0.5597 0.0056 0.0070 5800.1893 1.0003 ⋯ - σ 2.6265 0.3686 0.0037 0.0047 6165.5244 0.9999 ⋯ - α 30.2167 4.8679 0.0487 0.1012 2225.6405 0.9999 ⋯ - β[1] -0.0393 0.0103 0.0001 0.0002 3319.3510 1.0005 ⋯ - β[2] -2.8300 0.8976 0.0090 0.0187 2396.9552 1.0001 ⋯ - β[3] 1.2837 0.8841 0.0088 0.0179 2334.0136 0.9999 ⋯ - 1 column omitted - -julia> m1_5.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - ν 0.3731 0.6686 0.9233 1.2896 2.4911 - σ 2.0385 2.3621 2.5841 2.8463 3.4736 - α 20.4434 27.0806 30.3379 33.4157 39.6471 - β[1] -0.0597 -0.0461 -0.0393 -0.0324 -0.0192 - β[2] -4.5979 -3.4317 -2.8360 -2.2500 -1.0505 - β[3] -0.4012 0.6970 1.2552 1.8472 3.0717 - -``` - - **Linear Regression - Uniform Prior** -```jldoctest examples -julia> m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); - -julia> m1_6.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - ν 1.0665 0.5900 0.0059 0.0081 5791.3987 1.0007 ⋯ - σ 2.6276 0.3678 0.0037 0.0056 3788.8270 0.9999 ⋯ - α 30.3304 4.7387 0.0474 0.0881 2697.4202 0.9999 ⋯ - β[1] -0.0394 0.0102 0.0001 0.0002 3969.3250 0.9999 ⋯ - β[2] -2.8421 0.8679 0.0087 0.0159 2813.1886 0.9999 ⋯ - β[3] 1.2646 0.8642 0.0086 0.0157 2858.6901 0.9999 ⋯ - 1 column omitted - -julia> m1_6.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - ν 0.3749 0.6698 0.9298 1.3095 2.5508 - σ 2.0306 2.3712 2.5893 2.8418 3.4723 - α 20.4677 27.3179 30.5001 33.5657 39.1292 - β[1] -0.0596 -0.0460 -0.0392 -0.0324 -0.0198 - β[2] -4.5194 -3.4288 -2.8534 -2.2704 -1.1239 - β[3] -0.3362 0.6886 1.2226 1.8201 3.0601 - -``` - -## Example 2: Logistic Regression -```jldoctest examples -julia> turnout = dataset("Zelig", "turnout") -2000×5 DataFrame - Row │ Race Age Educate Income Vote - │ Cat… Int32 Float64 Float64 Int32 -──────┼─────────────────────────────────────── - 1 │ white 60 14.0 3.3458 1 - 2 │ white 51 10.0 1.8561 0 - 3 │ white 24 12.0 0.6304 0 - 4 │ white 38 8.0 3.4183 1 - 5 │ white 25 12.0 2.7852 1 - 6 │ white 67 12.0 2.3866 1 - 7 │ white 40 12.0 4.2857 0 - 8 │ white 56 10.0 9.3205 1 - ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ - 1994 │ white 58 12.0 0.1936 0 - 1995 │ white 22 7.0 0.2364 0 - 1996 │ white 26 16.0 3.3834 0 - 1997 │ white 34 12.0 2.917 1 - 1998 │ white 51 16.0 7.8949 1 - 1999 │ white 22 10.0 2.4811 0 - 2000 │ white 59 10.0 0.5523 0 - 1985 rows omitted - -julia> m2_1 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Logit()); - -julia> m2_1.fit -──────────────────────────────────────────────────────────────────────────── - Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% -──────────────────────────────────────────────────────────────────────────── -(Intercept) -3.03426 0.325927 -9.31 <1e-19 -3.67307 -2.39546 -Age 0.0283543 0.00346034 8.19 <1e-15 0.0215722 0.0351365 -Race: white 0.250798 0.146457 1.71 0.0868 -0.0362521 0.537847 -Income 0.177112 0.0271516 6.52 <1e-10 0.123896 0.230328 -Educate 0.175634 0.0203308 8.64 <1e-17 0.135786 0.215481 -──────────────────────────────────────────────────────────────────────────── - -julia> m2_1.modelClass -"LogisticReg" - -julia> m2_1.LogLike --1011.9906318515575 - -julia> m2_1.AIC -2033.981263703115 - -julia> m2_1.BIC -2061.9857760008254 - -julia> m2_2 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Probit()); - -julia> m2_2.fit -──────────────────────────────────────────────────────────────────────────── - Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% -──────────────────────────────────────────────────────────────────────────── -(Intercept) -1.76141 0.188556 -9.34 <1e-20 -2.13097 -1.39185 -Age 0.0164973 0.00199897 8.25 <1e-15 0.0125794 0.0204152 -Race: white 0.162856 0.0876885 1.86 0.0633 -0.0090108 0.334722 -Income 0.0963117 0.0149675 6.43 <1e-09 0.066976 0.125647 -Educate 0.10417 0.0116713 8.93 <1e-18 0.0812949 0.127046 -──────────────────────────────────────────────────────────────────────────── - -julia> m2_2.BIC -2062.201026236795 - -julia> m2_3 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Cloglog()); - -julia> m2_3.fit -───────────────────────────────────────────────────────────────────────────── - Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% -───────────────────────────────────────────────────────────────────────────── -(Intercept) -1.94617 0.184123 -10.57 <1e-25 -2.30704 -1.58529 -Age 0.0147857 0.00184088 8.03 <1e-15 0.0111776 0.0183937 -Race: white 0.185139 0.087101 2.13 0.0335 0.014424 0.355854 -Income 0.0768268 0.0126411 6.08 <1e-08 0.0520506 0.101603 -Educate 0.0983976 0.0108857 9.04 <1e-18 0.077062 0.119733 -───────────────────────────────────────────────────────────────────────────── - -julia> m2_3.BIC -2064.69463374921 - -julia> m2_4 = @fitmodel((Vote ~ Age + Race +Income + Educate) - ,turnout,LogisticRegression(),Cauchit()); - -julia> m2_4.fit -──────────────────────────────────────────────────────────────────────────── - Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% -──────────────────────────────────────────────────────────────────────────── -(Intercept) -3.16889 0.384429 -8.24 <1e-15 -3.92235 -2.41542 -Age 0.0304105 0.00413473 7.35 <1e-12 0.0223066 0.0385144 -Race: white 0.181839 0.144766 1.26 0.2091 -0.101898 0.465576 -Income 0.235267 0.038152 6.17 <1e-09 0.16049 0.310043 -Educate 0.169276 0.0240098 7.05 <1e-11 0.122217 0.216334 -──────────────────────────────────────────────────────────────────────────── - -julia> m2_4.BIC -2078.946461750509 - -``` - - **Logistic Regression - with Ridge Prior** - -```jldoctest examples -julia> m2_5 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Ridge()); - -julia> m2_5.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 1.5314 0.6655 0.0067 0.0088 4736.5514 1.0003 ⋯ - β[1] -2.8619 0.3365 0.0034 0.0047 4505.8853 1.0004 ⋯ - β[2] 0.0270 0.0035 0.0000 0.0000 6544.5015 1.0002 ⋯ - β[3] 0.2279 0.1446 0.0014 0.0015 6968.9017 1.0000 ⋯ - β[4] 0.1773 0.0274 0.0003 0.0004 6716.7864 0.9999 ⋯ - β[5] 0.1676 0.0208 0.0002 0.0003 5279.6598 1.0001 ⋯ - 1 column omitted - -julia> m2_5.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.7570 1.1039 1.3921 1.7719 3.2092 - β[1] -3.5306 -3.0918 -2.8651 -2.6338 -2.2062 - β[2] 0.0203 0.0247 0.0270 0.0295 0.0338 - β[3] -0.0540 0.1311 0.2280 0.3264 0.5073 - β[4] 0.1243 0.1587 0.1767 0.1956 0.2327 - β[5] 0.1273 0.1535 0.1675 0.1815 0.2085 - -julia> m2_6 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Ridge(),1.0); - -julia> m2_6.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.9025 0.3651 0.0037 0.0054 4480.4540 1.0000 ⋯ - β[1] -1.6606 0.1915 0.0019 0.0030 4852.0370 0.9999 ⋯ - β[2] 0.0158 0.0020 0.0000 0.0000 7009.3493 1.0001 ⋯ - β[3] 0.1496 0.0856 0.0009 0.0009 7761.4054 0.9999 ⋯ - β[4] 0.0967 0.0148 0.0001 0.0001 7444.0661 1.0000 ⋯ - β[5] 0.0994 0.0118 0.0001 0.0002 5268.9316 0.9999 ⋯ - 1 column omitted - -julia> m2_6.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.4607 0.6606 0.8182 1.0396 1.8407 - β[1] -2.0383 -1.7904 -1.6583 -1.5311 -1.2863 - β[2] 0.0118 0.0144 0.0158 0.0171 0.0197 - β[3] -0.0173 0.0925 0.1488 0.2073 0.3180 - β[4] 0.0679 0.0865 0.0967 0.1066 0.1257 - β[5] 0.0768 0.0914 0.0993 0.1072 0.1231 - -julia> m2_7 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Ridge(),1.0); - -julia> m2_7.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.9928 0.3745 0.0037 0.0054 5503.9612 0.9999 ⋯ - β[1] -1.8666 0.1845 0.0018 0.0026 5165.8451 1.0001 ⋯ - β[2] 0.0142 0.0018 0.0000 0.0000 7814.5837 1.0002 ⋯ - β[3] 0.1730 0.0860 0.0009 0.0009 6881.1222 0.9999 ⋯ - β[4] 0.0771 0.0120 0.0001 0.0001 7628.3991 1.0000 ⋯ - β[5] 0.0947 0.0109 0.0001 0.0002 5465.7776 1.0000 ⋯ - 1 column omitted - -julia> m2_7.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.5248 0.7395 0.9092 1.1465 1.9612 - β[1] -2.2325 -1.9919 -1.8664 -1.7424 -1.5047 - β[2] 0.0107 0.0130 0.0142 0.0154 0.0178 - β[3] 0.0038 0.1149 0.1732 0.2303 0.3426 - β[4] 0.0542 0.0687 0.0770 0.0851 0.1006 - β[5] 0.0728 0.0875 0.0947 0.1021 0.1162 - -julia> m2_8 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Ridge(),1.0); - -julia> m2_8.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 1.5164 0.6110 0.0061 0.0088 5073.8844 0.9999 ⋯ - β[1] -2.9665 0.3808 0.0038 0.0055 3900.2655 1.0001 ⋯ - β[2] 0.0286 0.0043 0.0000 0.0001 4943.5487 1.0002 ⋯ - β[3] 0.1604 0.1508 0.0015 0.0018 6891.3045 0.9999 ⋯ - β[4] 0.2395 0.0397 0.0004 0.0005 6038.7928 0.9999 ⋯ - β[5] 0.1592 0.0237 0.0002 0.0003 4328.8957 1.0000 ⋯ - 1 column omitted - -julia> m2_8.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.7799 1.1154 1.3821 1.7577 3.0617 - β[1] -3.7283 -3.2133 -2.9612 -2.7075 -2.2404 - β[2] 0.0205 0.0257 0.0286 0.0315 0.0374 - β[3] -0.1449 0.0613 0.1635 0.2610 0.4474 - β[4] 0.1640 0.2126 0.2385 0.2651 0.3212 - β[5] 0.1148 0.1431 0.1583 0.1748 0.2070 - -``` - - **Logistic Regression - with Laplace Prior** -```jldoctest examples -julia> m2_9 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Laplace()); - -julia> m2_9.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.8729 0.5109 0.0051 0.0070 4168.8950 0.9999 ⋯ - β[1] -2.8745 0.3293 0.0033 0.0043 4525.5742 0.9999 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 6601.7518 0.9999 ⋯ - β[3] 0.2103 0.1424 0.0014 0.0013 7660.9194 1.0000 ⋯ - β[4] 0.1767 0.0271 0.0003 0.0003 7474.8745 1.0001 ⋯ - β[5] 0.1690 0.0202 0.0002 0.0003 4860.4472 1.0001 ⋯ - 1 column omitted - -julia> m2_9.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.3280 0.5428 0.7460 1.0415 2.1841 - β[1] -3.5072 -3.1004 -2.8769 -2.6541 -2.2327 - β[2] 0.0206 0.0250 0.0273 0.0297 0.0342 - β[3] -0.0614 0.1128 0.2082 0.3091 0.4884 - β[4] 0.1252 0.1582 0.1762 0.1947 0.2305 - β[5] 0.1297 0.1552 0.1691 0.1827 0.2079 - -julia> m2_10 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Laplace()); - -julia> m2_10.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.8677 0.5061 0.0051 0.0066 5223.5887 0.9999 ⋯ - β[1] -2.8772 0.3324 0.0033 0.0051 3975.5078 0.9999 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 6116.3599 0.9999 ⋯ - β[3] 0.2075 0.1449 0.0014 0.0017 6674.4916 0.9999 ⋯ - β[4] 0.1772 0.0274 0.0003 0.0003 6721.6931 1.0000 ⋯ - β[5] 0.1693 0.0205 0.0002 0.0003 4354.2790 0.9999 ⋯ - 1 column omitted - -julia> m2_10.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.3292 0.5522 0.7445 1.0321 2.1142 - β[1] -3.5240 -3.1023 -2.8816 -2.6542 -2.2191 - β[2] 0.0205 0.0250 0.0273 0.0297 0.0341 - β[3] -0.0676 0.1086 0.2058 0.3058 0.4905 - β[4] 0.1248 0.1584 0.1768 0.1960 0.2308 - β[5] 0.1296 0.1554 0.1693 0.1830 0.2104 - -julia> m2_11 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Laplace(),1.0); - -julia> m2_11.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.9072 0.4890 0.0049 0.0069 4738.4116 1.0004 ⋯ - β[1] -2.8796 0.3286 0.0033 0.0041 5837.4510 1.0000 ⋯ - β[2] 0.0273 0.0035 0.0000 0.0000 7668.8459 1.0001 ⋯ - β[3] 0.2121 0.1436 0.0014 0.0015 7916.5774 1.0002 ⋯ - β[4] 0.1777 0.0268 0.0003 0.0003 8707.6308 0.9999 ⋯ - β[5] 0.1691 0.0204 0.0002 0.0002 6188.7181 0.9999 ⋯ - 1 column omitted - -julia> m2_11.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.3746 0.5961 0.7889 1.0716 2.1763 - β[1] -3.5249 -3.1023 -2.8795 -2.6541 -2.2457 - β[2] 0.0207 0.0249 0.0273 0.0297 0.0341 - β[3] -0.0624 0.1129 0.2114 0.3114 0.4930 - β[4] 0.1253 0.1597 0.1772 0.1962 0.2305 - β[5] 0.1290 0.1553 0.1691 0.1827 0.2089 - -julia> m2_12 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Laplace(),1.0); - -julia> m2_12.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.8988 0.4514 0.0045 0.0063 5520.6699 1.0006 ⋯ - β[1] -2.8904 0.3304 0.0033 0.0043 4419.1261 0.9999 ⋯ - β[2] 0.0274 0.0034 0.0000 0.0000 5962.6203 0.9999 ⋯ - β[3] 0.2133 0.1421 0.0014 0.0017 6693.5568 1.0008 ⋯ - β[4] 0.1773 0.0271 0.0003 0.0003 7664.6326 0.9999 ⋯ - β[5] 0.1698 0.0203 0.0002 0.0003 4712.2368 1.0001 ⋯ - 1 column omitted - -julia> m2_12.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.3770 0.5991 0.7909 1.0748 2.0284 - β[1] -3.5400 -3.1123 -2.8919 -2.6659 -2.2510 - β[2] 0.0208 0.0250 0.0274 0.0297 0.0343 - β[3] -0.0553 0.1161 0.2109 0.3058 0.4987 - β[4] 0.1243 0.1587 0.1771 0.1953 0.2310 - β[5] 0.1308 0.1558 0.1695 0.1834 0.2095 - -``` - - **Logistic Regression - with Cauchy Prior** -```jldoctest examples -julia> m2_13 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Cauchy(),1.0); - -julia> m2_13.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.3060 0.2319 0.0023 0.0028 6670.6339 0.9999 ⋯ - β[1] -2.9196 0.3286 0.0033 0.0044 4636.5902 0.9999 ⋯ - β[2] 0.0280 0.0034 0.0000 0.0000 6407.9414 0.9999 ⋯ - β[3] 0.1791 0.1358 0.0014 0.0019 5612.7276 1.0000 ⋯ - β[4] 0.1773 0.0273 0.0003 0.0004 6538.4075 1.0014 ⋯ - β[5] 0.1725 0.0205 0.0002 0.0003 5017.4150 1.0000 ⋯ - 1 column omitted - -julia> m2_13.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.0648 0.1582 0.2433 0.3780 0.9132 - β[1] -3.5551 -3.1405 -2.9220 -2.6936 -2.2841 - β[2] 0.0213 0.0257 0.0279 0.0303 0.0348 - β[3] -0.0580 0.0786 0.1723 0.2712 0.4634 - β[4] 0.1232 0.1586 0.1774 0.1957 0.2306 - β[5] 0.1326 0.1585 0.1724 0.1866 0.2122 - -julia> m2_14 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Cauchy(),2.0,30000); - -julia> m2_14.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.3083 0.2464 0.0025 0.0030 6527.1307 0.9999 ⋯ - β[1] -2.9164 0.3268 0.0033 0.0054 4001.0802 0.9999 ⋯ - β[2] 0.0279 0.0034 0.0000 0.0000 5853.3102 0.9999 ⋯ - β[3] 0.1778 0.1384 0.0014 0.0017 6653.8555 0.9999 ⋯ - β[4] 0.1766 0.0270 0.0003 0.0004 5995.1423 1.0002 ⋯ - β[5] 0.1727 0.0202 0.0002 0.0003 4212.0545 1.0000 ⋯ - 1 column omitted - -julia> m2_14.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.0642 0.1553 0.2436 0.3806 0.9498 - β[1] -3.5544 -3.1398 -2.9155 -2.6942 -2.2855 - β[2] 0.0211 0.0256 0.0280 0.0303 0.0348 - β[3] -0.0658 0.0766 0.1706 0.2721 0.4679 - β[4] 0.1239 0.1584 0.1769 0.1946 0.2298 - β[5] 0.1332 0.1592 0.1727 0.1860 0.2127 - -julia> m2_15 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Cauchy(),1.0); - -julia> m2_15.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.3077 0.2368 0.0024 0.0031 5706.3441 1.0003 ⋯ - β[1] -2.9128 0.3295 0.0033 0.0042 5051.3410 0.9999 ⋯ - β[2] 0.0279 0.0035 0.0000 0.0000 6723.5109 0.9999 ⋯ - β[3] 0.1785 0.1376 0.0014 0.0017 6301.5177 0.9999 ⋯ - β[4] 0.1772 0.0270 0.0003 0.0003 7536.6524 0.9999 ⋯ - β[5] 0.1724 0.0203 0.0002 0.0002 5509.0935 0.9999 ⋯ - 1 column omitted - -julia> m2_15.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.0647 0.1573 0.2431 0.3769 0.9521 - β[1] -3.5601 -3.1328 -2.9126 -2.6877 -2.2779 - β[2] 0.0213 0.0255 0.0278 0.0302 0.0349 - β[3] -0.0625 0.0791 0.1714 0.2699 0.4611 - β[4] 0.1243 0.1590 0.1768 0.1952 0.2306 - β[5] 0.1329 0.1587 0.1723 0.1860 0.2117 - -julia> m2_16 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Cauchy(),1.0); - -julia> m2_16.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.3049 0.2370 0.0024 0.0029 5684.3206 1.0000 ⋯ - β[1] -2.9134 0.3314 0.0033 0.0046 5120.8859 1.0000 ⋯ - β[2] 0.0279 0.0034 0.0000 0.0000 6977.9876 0.9999 ⋯ - β[3] 0.1752 0.1372 0.0014 0.0015 6830.2857 1.0000 ⋯ - β[4] 0.1770 0.0267 0.0003 0.0003 8103.9753 0.9999 ⋯ - β[5] 0.1726 0.0204 0.0002 0.0003 5655.1416 0.9999 ⋯ - 1 column omitted - -julia> m2_16.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.0667 0.1556 0.2404 0.3788 0.9164 - β[1] -3.5774 -3.1275 -2.9059 -2.6959 -2.2603 - β[2] 0.0212 0.0255 0.0279 0.0303 0.0347 - β[3] -0.0675 0.0764 0.1688 0.2667 0.4589 - β[4] 0.1254 0.1584 0.1769 0.1950 0.2298 - β[5] 0.1331 0.1588 0.1724 0.1864 0.2138 - -``` - - **Logistic Regression - with T-Dist Prior** -```jldoctest examples -julia> m2_17 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_TDist(),1.0); - -julia> m2_17.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.5289 0.3511 0.0035 0.0056 4448.5392 0.9999 ⋯ - ν 2.2155 10.7265 0.1073 0.1899 3067.8091 1.0003 ⋯ - β[1] -2.9351 0.3287 0.0033 0.0040 5364.8317 1.0002 ⋯ - β[2] 0.0278 0.0035 0.0000 0.0000 7461.5560 0.9999 ⋯ - β[3] 0.2065 0.1429 0.0014 0.0017 7208.6730 0.9999 ⋯ - β[4] 0.1768 0.0278 0.0003 0.0003 7523.2925 0.9999 ⋯ - β[5] 0.1727 0.0206 0.0002 0.0002 5830.2383 1.0002 ⋯ - 1 column omitted - -julia> m2_17.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.1780 0.3040 0.4273 0.6348 1.4809 - ν 0.3755 0.7694 1.1673 1.8724 7.4220 - β[1] -3.5790 -3.1586 -2.9311 -2.7106 -2.3021 - β[2] 0.0210 0.0255 0.0278 0.0302 0.0350 - β[3] -0.0652 0.1084 0.2039 0.2996 0.4975 - β[4] 0.1230 0.1579 0.1765 0.1955 0.2328 - β[5] 0.1325 0.1587 0.1727 0.1864 0.2135 - -julia> m2_18 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_TDist(),1.0); - -julia> m2_18.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat e ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 4.8118 0.0029 0.0000 0.0003 21.0934 1.3204 ⋯ - ν 3.7561 0.0016 0.0000 0.0002 22.4348 1.1310 ⋯ - β[1] -22.6598 0.0004 0.0000 0.0000 22.2429 1.2323 ⋯ - β[2] 0.8228 0.0000 0.0000 0.0000 66.1383 1.1389 ⋯ - β[3] -2.0240 0.0004 0.0000 0.0000 21.0386 2.6936 ⋯ - β[4] -8.7439 0.0027 0.0000 0.0003 20.2520 2.8354 ⋯ - β[5] -6.9804 0.0062 0.0001 0.0006 20.2317 2.7320 ⋯ - 1 column omitted - -julia> m2_18.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 4.8048 4.8110 4.8128 4.8138 4.8151 - ν 3.7527 3.7552 3.7562 3.7574 3.7584 - β[1] -22.6608 -22.6600 -22.6598 -22.6596 -22.6592 - β[2] 0.8228 0.8228 0.8228 0.8228 0.8229 - β[3] -2.0247 -2.0243 -2.0240 -2.0237 -2.0234 - β[4] -8.7480 -8.7470 -8.7436 -8.7415 -8.7392 - β[5] -6.9900 -6.9861 -6.9801 -6.9747 -6.9706 - -julia> m2_19 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_TDist(),1.0); - -julia> m2_19.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat es ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.7808 0.4602 0.0046 0.0453 23.5174 1.2619 ⋯ - ν 3.7505 4.1940 0.0419 0.4159 21.9030 1.2467 ⋯ - β[1] -2.7421 0.3762 0.0038 0.0363 26.8105 1.1348 ⋯ - β[2] 0.0415 0.0723 0.0007 0.0072 27.8089 1.0384 ⋯ - β[3] 0.1168 0.4182 0.0042 0.0408 26.9065 1.0372 ⋯ - β[4] -0.0110 0.8895 0.0089 0.0882 27.0833 1.0456 ⋯ - β[5] 0.1890 0.1184 0.0012 0.0112 29.1688 1.0274 ⋯ - 1 column omitted - -julia> m2_19.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.2057 0.4125 0.6491 1.1041 1.8468 - ν 0.6845 1.3588 2.1210 3.7455 17.0191 - β[1] -3.3513 -3.0447 -2.7642 -2.4708 -2.0116 - β[2] 0.0195 0.0242 0.0269 0.0295 0.3964 - β[3] -1.6313 0.1010 0.2012 0.2958 0.4656 - β[4] -4.3298 0.1543 0.1750 0.1952 0.2313 - β[5] 0.1229 0.1497 0.1663 0.1821 0.6713 - -julia> m2_20 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_TDist(),1.0); - -julia> m2_20.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.5314 0.3468 0.0035 0.0044 4487.2447 1.0004 ⋯ - ν 1.9974 5.5248 0.0552 0.0980 3013.3594 0.9999 ⋯ - β[1] -2.9315 0.3305 0.0033 0.0040 5717.6278 1.0002 ⋯ - β[2] 0.0278 0.0035 0.0000 0.0000 7735.5418 1.0001 ⋯ - β[3] 0.2045 0.1403 0.0014 0.0016 8011.0127 1.0006 ⋯ - β[4] 0.1772 0.0273 0.0003 0.0003 7454.0591 0.9999 ⋯ - β[5] 0.1724 0.0206 0.0002 0.0003 5544.3491 1.0011 ⋯ - 1 column omitted - -julia> m2_20.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.1763 0.3082 0.4331 0.6374 1.4639 - ν 0.3746 0.7738 1.1719 1.8817 7.2365 - β[1] -3.5830 -3.1509 -2.9295 -2.7051 -2.2834 - β[2] 0.0210 0.0255 0.0278 0.0302 0.0345 - β[3] -0.0670 0.1086 0.2025 0.2996 0.4815 - β[4] 0.1252 0.1586 0.1770 0.1952 0.2317 - β[5] 0.1327 0.1586 0.1724 0.1863 0.2136 - -``` - - **Logistic Regression - with Uniform Prior** -```jldoctest examples -julia> m2_21 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Logit(),Prior_Uniform(),1.0); - -julia> m2_21.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 13.7852 180.2962 1.8030 2.6824 4217.9790 1.0006 ⋯ - β[1] -0.6624 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0103 0.0023 0.0000 0.0000 8889.1797 0.9999 ⋯ - β[3] -0.0034 0.1519 0.0015 0.0019 7495.4943 0.9999 ⋯ - β[4] 0.1710 0.0259 0.0003 0.0003 7518.2286 1.0000 ⋯ - β[5] 0.0637 0.0126 0.0001 0.0002 6785.0123 1.0000 ⋯ - 1 column omitted - -julia> m2_21.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - v 0.8333 1.5379 2.6688 6.2389 61.2451 - β[1] -0.6624 -0.6624 -0.6624 -0.6624 -0.6624 - β[2] 0.0058 0.0087 0.0103 0.0118 0.0147 - β[3] -0.3015 -0.1034 -0.0040 0.0998 0.2916 - β[4] 0.1211 0.1531 0.1708 0.1884 0.2225 - β[5] 0.0383 0.0554 0.0639 0.0722 0.0882 - -julia> m2_22 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Probit(),Prior_Uniform(),1.0); - -julia> m2_22.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 15.3310 222.0068 2.2201 4.0557 3034.3611 1.0003 ⋯ - β[1] -0.3434 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0099 0.0021 0.0000 0.0000 9458.2021 0.9999 ⋯ - β[3] -0.3434 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[4] 0.1789 0.0266 0.0003 0.0003 8784.2763 0.9999 ⋯ - β[5] 0.0608 0.0117 0.0001 0.0001 9076.6331 0.9999 ⋯ - 1 column omitted - -julia> m2_22.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - v 0.5648 1.1828 2.2363 5.0195 54.2485 - β[1] -0.3434 -0.3434 -0.3434 -0.3434 -0.3434 - β[2] 0.0057 0.0085 0.0099 0.0113 0.0141 - β[3] -0.3434 -0.3434 -0.3434 -0.3434 -0.3434 - β[4] 0.1281 0.1610 0.1784 0.1968 0.2315 - β[5] 0.0378 0.0529 0.0609 0.0689 0.0832 - -julia> m2_23 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cloglog(),Prior_Uniform(),1.0); - -julia> m2_23.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 7.3458 35.8188 0.3582 0.5261 4241.8175 0.9999 ⋯ - β[1] -0.3274 0.0000 0.0000 0.0000 NaN NaN ⋯ - β[2] 0.0097 0.0021 0.0000 0.0000 9364.4462 0.9999 ⋯ - β[3] -0.3274 0.0000 0.0000 0.0000 NaN NaN ⋯ - β[4] 0.1791 0.0261 0.0003 0.0002 9621.3881 0.9999 ⋯ - β[5] 0.0590 0.0113 0.0001 0.0001 9168.4454 0.9999 ⋯ - 1 column omitted - -julia> m2_23.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - v 0.5398 1.1335 2.0818 4.7264 43.0439 - β[1] -0.3274 -0.3274 -0.3274 -0.3274 -0.3274 - β[2] 0.0055 0.0083 0.0097 0.0111 0.0137 - β[3] -0.3274 -0.3274 -0.3274 -0.3274 -0.3274 - β[4] 0.1298 0.1612 0.1788 0.1962 0.2318 - β[5] 0.0370 0.0515 0.0590 0.0665 0.0812 - -julia> m2_24 = @fitmodel((Vote ~ Age + Race +Income + Educate),turnout - ,LogisticRegression(),Cauchit(),Prior_Uniform(),1.0); - -julia> m2_24.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 50.7212 2454.3435 24.5434 39.2977 4005.7567 1.0001 ⋯ - β[1] -0.5073 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[2] 0.0123 0.0021 0.0000 0.0000 9294.7660 0.9999 ⋯ - β[3] -0.5073 0.0000 0.0000 0.0000 20.5530 0.9999 ⋯ - β[4] 0.1826 0.0263 0.0003 0.0003 8350.3257 0.9999 ⋯ - β[5] 0.0761 0.0116 0.0001 0.0001 8318.5904 1.0000 ⋯ - 1 column omitted - -julia> m2_24.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - v 0.7552 1.5016 2.7477 6.3105 68.6481 - β[1] -0.5073 -0.5073 -0.5073 -0.5073 -0.5073 - β[2] 0.0082 0.0109 0.0123 0.0137 0.0165 - β[3] -0.5073 -0.5073 -0.5073 -0.5073 -0.5073 - β[4] 0.1307 0.1651 0.1824 0.1998 0.2345 - β[5] 0.0533 0.0682 0.0760 0.0838 0.0985 - -``` - -## Example 3: Poisson Regression - - **Poisson Regression - Likelihood analysis** -```jldoctest examples -sanction = dataset("Zelig", "sanction") -78×8 DataFrame - Row │ Mil Coop Target Import Export Cost Num NCost - │ Int32 Int32 Int32 Int32 Int32 Int32 Int32 Cat… -─────┼─────────────────────────────────────────────────────────────────── - 1 │ 1 4 3 1 1 4 15 major loss - 2 │ 0 2 3 0 1 3 4 modest loss - 3 │ 0 1 3 1 0 2 1 little effect - 4 │ 1 1 3 1 1 2 1 little effect - 5 │ 0 1 3 1 1 2 1 little effect - 6 │ 0 1 3 0 1 2 1 little effect - 7 │ 1 2 2 0 1 2 3 little effect - 8 │ 0 1 3 0 0 2 3 little effect - ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ - 72 │ 0 2 2 0 0 1 8 net gain - 73 │ 1 3 1 1 1 2 14 little effect - 74 │ 0 2 1 0 0 1 2 net gain - 75 │ 0 1 3 0 1 2 1 little effect - 76 │ 0 4 3 1 0 2 13 little effect - 77 │ 0 1 2 0 0 1 1 net gain - 78 │ 1 3 1 1 1 2 10 little effect - 63 rows omitted - -julia> m3_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression()); - -julia> m3_1.fit -───────────────────────────────────────────────────────────────────────────────── - Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% -───────────────────────────────────────────────────────────────────────────────── -(Intercept) -1.91392 0.261667 -7.31 <1e-12 -2.42678 -1.40106 -Target 0.157769 0.0653822 2.41 0.0158 0.0296218 0.285915 -Coop 1.15127 0.0561861 20.49 <1e-92 1.04114 1.26139 -NCost: major loss -0.324051 0.230055 -1.41 0.1590 -0.774951 0.126848 -NCost: modest loss 1.71973 0.100518 17.11 <1e-64 1.52272 1.91674 -NCost: net gain 0.463907 0.16992 2.73 0.0063 0.13087 0.796944 -───────────────────────────────────────────────────────────────────────────────── - -julia> m3_1.LogLike --284.33693448347356 - -julia> m3_1.AIC -580.6738689669471 - -julia> m3_1.BIC -594.8141219270847 - -``` - - **Poisson Regression with Ridge Prior** -```jldoctest examples -julia> m3_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Ridge()); - -julia> m3_2.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 1.3047 0.4939 0.0049 0.0080 4614.5363 1.0003 ⋯ - α -1.7955 0.2534 0.0025 0.0039 4454.7752 1.0000 ⋯ - β[1] 0.1390 0.0654 0.0007 0.0008 6391.9793 0.9999 ⋯ - β[2] 1.1322 0.0556 0.0006 0.0008 5857.7599 1.0002 ⋯ - β[3] -0.3259 0.2278 0.0023 0.0028 7027.6031 1.0000 ⋯ - β[4] 1.6974 0.0996 0.0010 0.0012 7459.8841 0.9999 ⋯ - β[5] 0.4036 0.1676 0.0017 0.0024 6099.1495 1.0001 ⋯ - 1 column omitted - -julia> m3_2.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.7118 0.9816 1.2004 1.4923 2.5232 - α -2.2925 -1.9675 -1.7963 -1.6226 -1.2942 - β[1] 0.0123 0.0950 0.1384 0.1840 0.2668 - β[2] 1.0240 1.0943 1.1326 1.1705 1.2402 - β[3] -0.7938 -0.4734 -0.3194 -0.1702 0.1013 - β[4] 1.5019 1.6310 1.6971 1.7643 1.8944 - β[5] 0.0683 0.2928 0.4049 0.5185 0.7256 - -``` - - **Poisson Regression with Laplace Prior** -```jldoctest examples -julia> m3_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Laplace()); - -julia> m3_3.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 1.0855 0.5268 0.0053 0.0077 5271.8825 1.0005 ⋯ - α -1.7913 0.2674 0.0027 0.0040 4231.1524 1.0000 ⋯ - β[1] 0.1358 0.0662 0.0007 0.0009 5785.6833 1.0000 ⋯ - β[2] 1.1320 0.0567 0.0006 0.0008 5446.3525 0.9999 ⋯ - β[3] -0.2906 0.2195 0.0022 0.0026 7570.4683 0.9999 ⋯ - β[4] 1.7025 0.0998 0.0010 0.0011 7436.8894 1.0000 ⋯ - β[5] 0.3949 0.1730 0.0017 0.0021 6648.4939 1.0001 ⋯ - 1 column omitted - -julia> m3_3.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.4543 0.7290 0.9624 1.2917 2.4373 - α -2.3164 -1.9738 -1.7953 -1.6079 -1.2586 - β[1] 0.0048 0.0922 0.1364 0.1796 0.2644 - β[2] 1.0202 1.0935 1.1321 1.1708 1.2427 - β[3] -0.7434 -0.4336 -0.2820 -0.1369 0.1038 - β[4] 1.5084 1.6358 1.7020 1.7679 1.8996 - β[5] 0.0503 0.2775 0.3959 0.5131 0.7315 - -``` - -**Poisson Regression with Cauchy Prior** -```jldoctest examples -julia> m3_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Cauchy()); - -julia> m3_4.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.8456 0.4396 0.0044 0.0047 6775.8311 1.0001 ⋯ - α -1.7969 0.2640 0.0026 0.0037 5127.9411 0.9999 ⋯ - β[1] 0.1388 0.0654 0.0007 0.0007 7076.7634 0.9999 ⋯ - β[2] 1.1314 0.0562 0.0006 0.0008 6216.6410 0.9999 ⋯ - β[3] -0.2934 0.2153 0.0022 0.0026 6756.2170 0.9999 ⋯ - β[4] 1.7055 0.0983 0.0010 0.0011 7390.4902 0.9999 ⋯ - β[5] 0.3928 0.1675 0.0017 0.0019 6509.6692 0.9999 ⋯ - 1 column omitted - -julia> m3_4.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.2957 0.5448 0.7464 1.0367 1.9870 - α -2.3106 -1.9751 -1.7946 -1.6238 -1.2737 - β[1] 0.0120 0.0944 0.1382 0.1830 0.2674 - β[2] 1.0237 1.0934 1.1317 1.1692 1.2417 - β[3] -0.7479 -0.4322 -0.2864 -0.1464 0.1089 - β[4] 1.5156 1.6389 1.7055 1.7721 1.8945 - β[5] 0.0585 0.2811 0.3952 0.5066 0.7126 - -``` - - **Poisson Regression with TDist Prior** -```jldoctest examples -julia> m3_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_TDist()); - -julia> m3_5.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 0.9972 0.4285 0.0043 0.0047 8603.3650 0.9999 ⋯ - ν 2.9413 5.2859 0.0529 0.0916 3402.3197 1.0000 ⋯ - α -1.8112 0.2604 0.0026 0.0031 5515.2271 0.9999 ⋯ - β[1] 0.1410 0.0655 0.0007 0.0007 7259.2984 0.9999 ⋯ - β[2] 1.1344 0.0557 0.0006 0.0006 7189.8249 0.9999 ⋯ - β[3] -0.3121 0.2224 0.0022 0.0026 7858.4358 0.9999 ⋯ - β[4] 1.7029 0.0998 0.0010 0.0011 7737.1039 1.0000 ⋯ - β[5] 0.4039 0.1692 0.0017 0.0019 6815.5865 1.0004 ⋯ - 1 column omitted - -julia> m3_5.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 0.3985 0.7021 0.9209 1.2049 2.0244 - ν 0.5626 1.1544 1.8296 3.0831 12.0174 - α -2.3161 -1.9881 -1.8143 -1.6302 -1.3049 - β[1] 0.0134 0.0961 0.1410 0.1859 0.2687 - β[2] 1.0236 1.0970 1.1348 1.1720 1.2409 - β[3] -0.7690 -0.4572 -0.3059 -0.1591 0.1062 - β[4] 1.5070 1.6357 1.7014 1.7695 1.8992 - β[5] 0.0714 0.2891 0.4055 0.5202 0.7335 - -``` - - **Poisson Regression with Uniform Prior** -```jldoctest examples -julia> m3_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,PoissonRegression(),Prior_Uniform()); - -julia> m3_6.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 31.7237 269.4096 2.6941 4.6703 3141.7386 1.0002 ⋯ - α -1.6312 0.0000 0.0000 0.0000 NaN NaN ⋯ - β[1] 0.1113 0.0484 0.0005 0.0005 7517.0556 0.9999 ⋯ - β[2] 1.1174 0.0351 0.0004 0.0004 7513.8645 1.0000 ⋯ - β[3] -0.3724 0.2208 0.0022 0.0020 10488.8748 0.9999 ⋯ - β[4] 1.6312 0.0000 0.0000 0.0000 NaN NaN ⋯ - β[5] 0.3333 0.1345 0.0013 0.0012 10310.8481 1.0000 ⋯ - 1 column omitted - -julia> m3_6.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 2.3312 4.1421 7.0190 15.1146 160.2559 - α -1.6312 -1.6312 -1.6312 -1.6312 -1.6312 - β[1] 0.0177 0.0783 0.1108 0.1441 0.2055 - β[2] 1.0485 1.0932 1.1182 1.1415 1.1844 - β[3] -0.8226 -0.5151 -0.3640 -0.2234 0.0370 - β[4] 1.6312 1.6312 1.6312 1.6312 1.6312 - β[5] 0.0619 0.2449 0.3343 0.4241 0.5893 - -``` - -## Example 4: Negative Binomial Regression - -```jldoctest examples -julia> sanction = dataset("Zelig", "sanction") -78×8 DataFrame - Row │ Mil Coop Target Import Export Cost Num NCost - │ Int32 Int32 Int32 Int32 Int32 Int32 Int32 Cat… -─────┼─────────────────────────────────────────────────────────────────── - 1 │ 1 4 3 1 1 4 15 major loss - 2 │ 0 2 3 0 1 3 4 modest loss - 3 │ 0 1 3 1 0 2 1 little effect - 4 │ 1 1 3 1 1 2 1 little effect - 5 │ 0 1 3 1 1 2 1 little effect - 6 │ 0 1 3 0 1 2 1 little effect - 7 │ 1 2 2 0 1 2 3 little effect - 8 │ 0 1 3 0 0 2 3 little effect - ⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ - 72 │ 0 2 2 0 0 1 8 net gain - 73 │ 1 3 1 1 1 2 14 little effect - 74 │ 0 2 1 0 0 1 2 net gain - 75 │ 0 1 3 0 1 2 1 little effect - 76 │ 0 4 3 1 0 2 13 little effect - 77 │ 0 1 2 0 0 1 1 net gain - 78 │ 1 3 1 1 1 2 10 little effect - 63 rows omitted - -``` - -**Negative Binomial Regression - Likelihood method** -```jldoctest examples -julia> m4_1 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression()); - -julia> m4_1.fit -───────────────────────────────────────────────────────────────────────────────── - Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95% -───────────────────────────────────────────────────────────────────────────────── -(Intercept) -1.10939 0.459677 -2.41 0.0158 -2.01034 -0.208444 -Target 0.0117398 0.142779 0.08 0.9345 -0.268101 0.291581 -Coop 1.0506 0.111556 9.42 <1e-20 0.831949 1.26924 -NCost: major loss -0.204244 0.508156 -0.40 0.6877 -1.20021 0.791723 -NCost: modest loss 1.27142 0.290427 4.38 <1e-04 0.702197 1.84065 -NCost: net gain 0.176797 0.254291 0.70 0.4869 -0.321604 0.675197 -───────────────────────────────────────────────────────────────────────────────── - -julia> m4_1.AIC -363.85804286542685 - -julia> m4_1.BIC -377.9982958255644 - -``` - - **NegativeBinomial Regression with Ridge Prior** -```jldoctest examples -julia> m4_2 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Ridge()); - -julia> m4_2.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 2.0413 0.4480 0.0045 0.0051 8307.4837 0.9999 ⋯ - α -1.0893 0.5193 0.0052 0.0087 3899.6757 0.9999 ⋯ - β[1] -0.0048 0.1616 0.0016 0.0023 5386.0221 0.9999 ⋯ - β[2] 1.0642 0.1311 0.0013 0.0018 5604.7062 0.9999 ⋯ - β[3] -0.1729 0.5552 0.0056 0.0058 8708.3320 1.0000 ⋯ - β[4] 1.2807 0.3178 0.0032 0.0034 8557.2897 0.9999 ⋯ - β[5] 0.1556 0.2840 0.0028 0.0036 6126.1145 0.9999 ⋯ - 1 column omitted - -julia> m4_2.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 1.3137 1.7227 1.9925 2.3068 3.0644 - α -2.1030 -1.4363 -1.0925 -0.7387 -0.0708 - β[1] -0.3276 -0.1114 -0.0035 0.1031 0.3128 - β[2] 0.8102 0.9757 1.0640 1.1518 1.3243 - β[3] -1.2103 -0.5564 -0.1824 0.1904 0.9515 - β[4] 0.6475 1.0692 1.2835 1.4937 1.9101 - β[5] -0.3947 -0.0331 0.1557 0.3436 0.7122 - -``` - - **NegativeBinomial Regression with Laplace Prior** -```jldoctest examples -julia> m4_3 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Laplace()); - -julia> m4_3.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 2.1062 0.4582 0.0046 0.0057 5683.7921 1.0001 ⋯ - α -1.0101 0.5154 0.0052 0.0084 3795.0884 1.0002 ⋯ - β[1] -0.0204 0.1590 0.0016 0.0022 5324.1385 1.0001 ⋯ - β[2] 1.0495 0.1329 0.0013 0.0018 4926.4231 1.0002 ⋯ - β[3] -0.1473 0.5059 0.0051 0.0054 7542.6944 1.0000 ⋯ - β[4] 1.2814 0.3226 0.0032 0.0038 6496.8709 1.0000 ⋯ - β[5] 0.1210 0.2777 0.0028 0.0039 6228.0313 1.0000 ⋯ - 1 column omitted - -julia> m4_3.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 1.3411 1.7873 2.0594 2.3793 3.1542 - α -2.0197 -1.3645 -1.0063 -0.6555 -0.0143 - β[1] -0.3337 -0.1265 -0.0194 0.0874 0.2908 - β[2] 0.7878 0.9605 1.0493 1.1380 1.3108 - β[3] -1.1584 -0.4804 -0.1489 0.1745 0.8704 - β[4] 0.6527 1.0626 1.2827 1.4935 1.9379 - β[5] -0.4316 -0.0628 0.1198 0.3065 0.6654 - -``` - - -**Negative Binomial Regression with Cauchy Prior** -```jldoctest examples -m4_4 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Cauchy()) - -m4_4.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 2.0321 0.4414 0.0044 0.0046 7966.3509 1.0000 ⋯ - α -1.0429 0.5163 0.0052 0.0092 3481.2902 0.9999 ⋯ - β[1] -0.0115 0.1626 0.0016 0.0024 4862.1372 0.9999 ⋯ - β[2] 1.0545 0.1318 0.0013 0.0019 5091.0562 0.9999 ⋯ - β[3] -0.1563 0.5484 0.0055 0.0058 7173.1471 0.9999 ⋯ - β[4] 1.2755 0.3303 0.0033 0.0036 7331.3107 1.0000 ⋯ - β[5] 0.1370 0.2782 0.0028 0.0038 5766.5502 0.9999 ⋯ - 1 column omitted - -m4_4.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 1.3087 1.7197 1.9826 2.2938 3.0432 - α -2.0846 -1.3881 -1.0360 -0.6882 -0.0508 - β[1] -0.3277 -0.1201 -0.0135 0.1004 0.3104 - β[2] 0.8001 0.9644 1.0524 1.1424 1.3192 - β[3] -1.1840 -0.5340 -0.1662 0.1949 0.9594 - β[4] 0.6424 1.0565 1.2674 1.4899 1.9446 - β[5] -0.4143 -0.0500 0.1407 0.3254 0.6775 - -``` - - **Negative Binomial Regression with TDist Prior** -```jldoctest examples -julia> m4_5 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_TDist()); -ERROR: DomainError with Dual{ForwardDiff.Tag{Turing.Essential.var"#f#4"{DynamicPPL.TypedVarInfo{NamedTuple{(:λ, :ν, :α, :β), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:λ, Setfield.IdentityLens}, Int64}, Vector{Distributions.InverseGamma{Float64}}, Vector{AbstractPPL.VarName{:λ, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ν, Setfield.IdentityLens}, Int64}, Vector{Distributions.InverseGamma{Float64}}, Vector{AbstractPPL.VarName{:ν, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, Setfield.IdentityLens}, Int64}, Vector{Distributions.LocationScale{Float64, Distributions.Continuous, Distributions.TDist{Float64}}}, Vector{AbstractPPL.VarName{:α, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, Setfield.IdentityLens}, Int64}, Vector{Distributions.Product{Distributions.Continuous, Distributions.LocationScale{Float64, Distributions.Continuous, Distributions.TDist{Float64}}, FillArrays.Fill{Distributions.LocationScale{Float64, Distributions.Continuous, Distributions.TDist{Float64}}, 1, Tuple{Base.OneTo{Int64}}}}}, Vector{AbstractPPL.VarName{:β, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{CRRao.var"#NegBinomReg#19"{Float64}, (:X, :y), (), (), Tuple{Matrix{Float64}, Vector{Int32}}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.Sampler{Turing.Inference.NUTS{Turing.Essential.ForwardDiffAD{0}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}}(0.0,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN): -NegativeBinomial: the condition zero(p) < p <= one(p) is not satisfied. - -julia> m4_5.summaries -ERROR: UndefVarError: m4_5 not defined - -julia> m4_5.quantiles -ERROR: UndefVarError: m4_5 not defined - -``` - - **Negative Binomial Regression with Uniform Prior** -```jldoctest examples -julia> m4_6 = @fitmodel((Num ~ Target + Coop + NCost), sanction,NegBinomRegression(),Prior_Uniform(),1.0); - -julia> m4_6.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - λ 3.2065 0.4036 0.0040 0.0054 4236.5829 1.0011 ⋯ - α -2.5288 0.0000 0.0000 0.0000 NaN NaN ⋯ - β[1] 0.3328 0.0984 0.0010 0.0027 204.3503 1.0604 ⋯ - β[2] 1.3577 0.0936 0.0009 0.0037 85.4193 1.1485 ⋯ - β[3] -1.5072 1.0686 0.0107 0.0999 21.9705 3.2882 ⋯ - β[4] 1.0054 0.3172 0.0032 0.0167 49.1963 1.3045 ⋯ - β[5] 0.5403 0.2348 0.0023 0.0097 75.8215 1.1720 ⋯ - 1 column omitted - -julia> m4_6.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - λ 2.6474 2.9139 3.1328 3.4106 4.1948 - α -2.5288 -2.5288 -2.5288 -2.5288 -2.5288 - β[1] 0.1367 0.2674 0.3346 0.3987 0.5259 - β[2] 1.1746 1.2938 1.3580 1.4202 1.5452 - β[3] -2.5288 -2.5288 -1.5590 -0.5322 0.3807 - β[4] 0.3983 0.7860 1.0072 1.2188 1.6254 - β[5] 0.0954 0.3776 0.5400 0.6997 1.0010 - ``` \ No newline at end of file From 49ea30ec264adba5abc06c9b805de66186afa146 Mon Sep 17 00:00:00 2001 From: Siddhant Chaudhary Date: Thu, 19 May 2022 18:52:02 +0200 Subject: [PATCH 6/9] Fixing architecture in documentation CI. --- .github/workflows/documentation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 2d7148c..ab47929 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -16,7 +16,7 @@ jobs: - uses: julia-actions/setup-julia@latest with: version: '1.7.2' - arch: x86_64 + arch: x64 - name: Install documentation dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy From 5501184247929600cc2b1404d85823e40780b605 Mon Sep 17 00:00:00 2001 From: Siddhant Chaudhary Date: Thu, 19 May 2022 19:11:17 +0200 Subject: [PATCH 7/9] Adding all doctests for the first set of models. --- docs/src/examples.md | 124 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/docs/src/examples.md b/docs/src/examples.md index 293acb3..17fbcf3 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -166,4 +166,128 @@ Quantiles β[2] -4.5039 -3.3021 -2.6899 -2.0502 -0.6440 β[3] -0.2071 0.9672 1.5988 2.2439 3.7647 +``` + +**Linear Regression - Laplace Prior** + +```jldoctest examples +julia> m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()); + +julia> m1_3.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + v 4.3182 3.3442 0.0334 0.0490 3968.7204 0.9999 ⋯ + σ 2.6657 0.3792 0.0038 0.0056 4690.6502 1.0000 ⋯ + α 29.0672 5.1669 0.0517 0.0918 3367.7350 1.0001 ⋯ + β[1] -0.0399 0.0105 0.0001 0.0002 4116.7824 1.0005 ⋯ + β[2] -2.7069 0.9341 0.0093 0.0170 3286.4012 1.0002 ⋯ + β[3] 1.5082 0.9373 0.0094 0.0163 3601.6346 1.0002 ⋯ + 1 column omitted + +julia> m1_3.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + v 1.2224 2.3903 3.4576 5.1836 12.4068 + σ 2.0359 2.3975 2.6246 2.8949 3.5178 + α 18.0169 25.8440 29.3179 32.5968 38.4400 + β[1] -0.0609 -0.0466 -0.0396 -0.0330 -0.0199 + β[2] -4.4455 -3.3403 -2.7409 -2.1152 -0.7174 + β[3] -0.2418 0.8759 1.4648 2.1019 3.4821 + +``` + + **Linear Regression - Cauchy Prior** +```jldoctest examples +julia> m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000); + +julia> m1_4.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + σ 2.5855 0.3416 0.0024 0.0036 9218.6691 1.0001 ⋯ + α 30.3875 4.6394 0.0328 0.0678 4559.8857 1.0001 ⋯ + β[1] -0.0394 0.0099 0.0001 0.0001 7652.1219 1.0000 ⋯ + β[2] -2.8435 0.8542 0.0060 0.0116 4998.6993 1.0001 ⋯ + β[3] 1.2513 0.8428 0.0060 0.0120 5011.2306 1.0000 ⋯ + 1 column omitted + +julia> m1_4.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + σ 2.0120 2.3452 2.5484 2.7877 3.3675 + α 20.9628 27.4262 30.4704 33.5255 39.1492 + β[1] -0.0591 -0.0459 -0.0393 -0.0328 -0.0201 + β[2] -4.4925 -3.4133 -2.8494 -2.2897 -1.1335 + β[3] -0.3345 0.6759 1.2385 1.7936 2.9585 + +``` + + **Linear Regression - T-Distributed Prior** + +```jldoctest examples +julia> m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); + +julia> m1_5.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + ν 1.0539 0.5597 0.0056 0.0070 5800.1893 1.0003 ⋯ + σ 2.6265 0.3686 0.0037 0.0047 6165.5244 0.9999 ⋯ + α 30.2167 4.8679 0.0487 0.1012 2225.6405 0.9999 ⋯ + β[1] -0.0393 0.0103 0.0001 0.0002 3319.3510 1.0005 ⋯ + β[2] -2.8300 0.8976 0.0090 0.0187 2396.9552 1.0001 ⋯ + β[3] 1.2837 0.8841 0.0088 0.0179 2334.0136 0.9999 ⋯ + 1 column omitted + +julia> m1_5.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + ν 0.3731 0.6686 0.9233 1.2896 2.4911 + σ 2.0385 2.3621 2.5841 2.8463 3.4736 + α 20.4434 27.0806 30.3379 33.4157 39.6471 + β[1] -0.0597 -0.0461 -0.0393 -0.0324 -0.0192 + β[2] -4.5979 -3.4317 -2.8360 -2.2500 -1.0505 + β[3] -0.4012 0.6970 1.2552 1.8472 3.0717 + +``` + + **Linear Regression - Uniform Prior** +```jldoctest examples +julia> m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); + +julia> m1_6.summaries +Summary Statistics + parameters mean std naive_se mcse ess rhat ⋯ + Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ + + ν 1.0665 0.5900 0.0059 0.0081 5791.3987 1.0007 ⋯ + σ 2.6276 0.3678 0.0037 0.0056 3788.8270 0.9999 ⋯ + α 30.3304 4.7387 0.0474 0.0881 2697.4202 0.9999 ⋯ + β[1] -0.0394 0.0102 0.0001 0.0002 3969.3250 0.9999 ⋯ + β[2] -2.8421 0.8679 0.0087 0.0159 2813.1886 0.9999 ⋯ + β[3] 1.2646 0.8642 0.0086 0.0157 2858.6901 0.9999 ⋯ + 1 column omitted + +julia> m1_6.quantiles +Quantiles + parameters 2.5% 25.0% 50.0% 75.0% 97.5% + Symbol Float64 Float64 Float64 Float64 Float64 + + ν 0.3749 0.6698 0.9298 1.3095 2.5508 + σ 2.0306 2.3712 2.5893 2.8418 3.4723 + α 20.4677 27.3179 30.5001 33.5657 39.1292 + β[1] -0.0596 -0.0460 -0.0392 -0.0324 -0.0198 + β[2] -4.5194 -3.4288 -2.8534 -2.2704 -1.1239 + β[3] -0.3362 0.6886 1.2226 1.8201 3.0601 + ``` \ No newline at end of file From 0866c866e1edfced360c4862ecea34075398074d Mon Sep 17 00:00:00 2001 From: ShouvikGhosh2048 Date: Tue, 24 May 2022 13:21:13 +0530 Subject: [PATCH 8/9] Revert back to two examples to check GH actions (along with a closing triple quote) --- docs/src/examples.md | 124 ------------------------------------------- 1 file changed, 124 deletions(-) diff --git a/docs/src/examples.md b/docs/src/examples.md index 17fbcf3..293acb3 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -166,128 +166,4 @@ Quantiles β[2] -4.5039 -3.3021 -2.6899 -2.0502 -0.6440 β[3] -0.2071 0.9672 1.5988 2.2439 3.7647 -``` - -**Linear Regression - Laplace Prior** - -```jldoctest examples -julia> m1_3 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Laplace()); - -julia> m1_3.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - v 4.3182 3.3442 0.0334 0.0490 3968.7204 0.9999 ⋯ - σ 2.6657 0.3792 0.0038 0.0056 4690.6502 1.0000 ⋯ - α 29.0672 5.1669 0.0517 0.0918 3367.7350 1.0001 ⋯ - β[1] -0.0399 0.0105 0.0001 0.0002 4116.7824 1.0005 ⋯ - β[2] -2.7069 0.9341 0.0093 0.0170 3286.4012 1.0002 ⋯ - β[3] 1.5082 0.9373 0.0094 0.0163 3601.6346 1.0002 ⋯ - 1 column omitted - -julia> m1_3.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - v 1.2224 2.3903 3.4576 5.1836 12.4068 - σ 2.0359 2.3975 2.6246 2.8949 3.5178 - α 18.0169 25.8440 29.3179 32.5968 38.4400 - β[1] -0.0609 -0.0466 -0.0396 -0.0330 -0.0199 - β[2] -4.4455 -3.3403 -2.7409 -2.1152 -0.7174 - β[3] -0.2418 0.8759 1.4648 2.1019 3.4821 - -``` - - **Linear Regression - Cauchy Prior** -```jldoctest examples -julia> m1_4 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_Cauchy(),20000); - -julia> m1_4.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - σ 2.5855 0.3416 0.0024 0.0036 9218.6691 1.0001 ⋯ - α 30.3875 4.6394 0.0328 0.0678 4559.8857 1.0001 ⋯ - β[1] -0.0394 0.0099 0.0001 0.0001 7652.1219 1.0000 ⋯ - β[2] -2.8435 0.8542 0.0060 0.0116 4998.6993 1.0001 ⋯ - β[3] 1.2513 0.8428 0.0060 0.0120 5011.2306 1.0000 ⋯ - 1 column omitted - -julia> m1_4.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - σ 2.0120 2.3452 2.5484 2.7877 3.3675 - α 20.9628 27.4262 30.4704 33.5255 39.1492 - β[1] -0.0591 -0.0459 -0.0393 -0.0328 -0.0201 - β[2] -4.4925 -3.4133 -2.8494 -2.2897 -1.1335 - β[3] -0.3345 0.6759 1.2385 1.7936 2.9585 - -``` - - **Linear Regression - T-Distributed Prior** - -```jldoctest examples -julia> m1_5 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); - -julia> m1_5.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - ν 1.0539 0.5597 0.0056 0.0070 5800.1893 1.0003 ⋯ - σ 2.6265 0.3686 0.0037 0.0047 6165.5244 0.9999 ⋯ - α 30.2167 4.8679 0.0487 0.1012 2225.6405 0.9999 ⋯ - β[1] -0.0393 0.0103 0.0001 0.0002 3319.3510 1.0005 ⋯ - β[2] -2.8300 0.8976 0.0090 0.0187 2396.9552 1.0001 ⋯ - β[3] 1.2837 0.8841 0.0088 0.0179 2334.0136 0.9999 ⋯ - 1 column omitted - -julia> m1_5.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - ν 0.3731 0.6686 0.9233 1.2896 2.4911 - σ 2.0385 2.3621 2.5841 2.8463 3.4736 - α 20.4434 27.0806 30.3379 33.4157 39.6471 - β[1] -0.0597 -0.0461 -0.0393 -0.0324 -0.0192 - β[2] -4.5979 -3.4317 -2.8360 -2.2500 -1.0505 - β[3] -0.4012 0.6970 1.2552 1.8472 3.0717 - -``` - - **Linear Regression - Uniform Prior** -```jldoctest examples -julia> m1_6 = @fitmodel((MPG ~ HP + WT+Gear),df,LinearRegression(),Prior_TDist()); - -julia> m1_6.summaries -Summary Statistics - parameters mean std naive_se mcse ess rhat ⋯ - Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯ - - ν 1.0665 0.5900 0.0059 0.0081 5791.3987 1.0007 ⋯ - σ 2.6276 0.3678 0.0037 0.0056 3788.8270 0.9999 ⋯ - α 30.3304 4.7387 0.0474 0.0881 2697.4202 0.9999 ⋯ - β[1] -0.0394 0.0102 0.0001 0.0002 3969.3250 0.9999 ⋯ - β[2] -2.8421 0.8679 0.0087 0.0159 2813.1886 0.9999 ⋯ - β[3] 1.2646 0.8642 0.0086 0.0157 2858.6901 0.9999 ⋯ - 1 column omitted - -julia> m1_6.quantiles -Quantiles - parameters 2.5% 25.0% 50.0% 75.0% 97.5% - Symbol Float64 Float64 Float64 Float64 Float64 - - ν 0.3749 0.6698 0.9298 1.3095 2.5508 - σ 2.0306 2.3712 2.5893 2.8418 3.4723 - α 20.4677 27.3179 30.5001 33.5657 39.1292 - β[1] -0.0596 -0.0460 -0.0392 -0.0324 -0.0198 - β[2] -4.5194 -3.4288 -2.8534 -2.2704 -1.1239 - β[3] -0.3362 0.6886 1.2226 1.8201 3.0601 - ``` \ No newline at end of file From d56013d4d91595e712e3192c659378f6013d9fae Mon Sep 17 00:00:00 2001 From: Siddhant Chaudhary Date: Tue, 24 May 2022 14:03:17 +0530 Subject: [PATCH 9/9] Adding newline before ending ticks. --- docs/src/examples.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/examples.md b/docs/src/examples.md index 293acb3..607db4c 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -7,6 +7,7 @@ julia> Logging.disable_logging(Logging.Warn); CRRao.setprogress!(false); julia> CRRao.set_rng(StableRNG(1234)) StableRNGs.LehmerRNG(state=0x000000000000000000000000000009a5) + ``` ## Example 1: Linear Regression