diff --git "a/36/replication_package/lucid_code.html" "b/36/replication_package/lucid_code.html" new file mode 100644--- /dev/null +++ "b/36/replication_package/lucid_code.html" @@ -0,0 +1,4116 @@ + + + + + + + + + + + + + + +Replication Code for Lucid Survey Data Analysis + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ +
+ +
+

1 Introduction

+

This markdown file contains the code necessary to run the analysis using data collected from the 2018 Lucid survey (Studies 1 and 2).

+

When compiling in markdown, verify that this markdown file is saved in the same folder as the other replication. R Markdown will set the working directory to this folder by default. When running the code line-by-line, set the working directory to the same location as these materials are located.

+
+
+

2 Setup

+
# Clear working environment
+rm(list = ls())
+
+# Log output
+sink(file = "lucid_code_log.txt") 
+
+# Create folder to store figures
+dir.create("lucid_figures")
+
+# Install required packages
+#install.packages(c("grDevices", "ggplot2", "dplyr", "estimatr", "psych", "knitr"))
+
+# Load packages
+library(grDevices)  # for plotting
+library(ggplot2)    # for plotting
+library(dplyr)      # for data cleaning
+library(estimatr)   # for robust standard errors
+library(psych)      # for calculating measures of internal consistency
+library(knitr)      # for compiling markdown
+
+# Load post functions (see README file for description)
+source("post.R")
+source("postSim.R")
+
+# Function for running lm function with common set of controls
+quick_lm <- function(outcome = my_outcome, keepers = my_keepers, 
+                     new = NULL, data = my_data){
+  keepers <- paste(keepers, collapse = " + ")
+  new <- paste(new, collapse =  " + ")
+  rhs <- paste(keepers, new, sep = " + ")
+  model <- paste0(outcome, " ~ ", rhs)
+  fit <- lm(as.formula(model), data = data)
+}
+
+# Function for running glm function with common set of controls
+quick_glm <- function(outcome = my_outcome, keepers = my_keepers, 
+                      new = NULL, data = my_data){
+  keepers <- paste(keepers, collapse = " + ") # character string of IVs separated by "+"
+  new <- paste(new, collapse =  " + ") # new variables
+  rhs <- paste(keepers, new, sep = " + ") #paste keepers and new
+  model <- paste0(outcome, " ~ ", rhs) #
+  fit <- glm(as.formula(model), data = data, family = binomial(link = "logit"))
+}
+
+# Function to make colors in base R plots transparent
+makeTransparent<-function(someColor, alpha=100)
+{
+  newColor<-col2rgb(someColor)
+  apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2],
+                                              blue=curcoldata[3],alpha=alpha, 
+                                              maxColorValue=255)})
+}
+
+# Standard error
+std_error <- function(var){
+  sd(var, na.rm = T) / sqrt(length(var[!is.na(var)]))
+}
+
+# Function to put variables on a 0-1 scale
+rescale_01 <- function(x, max){
+  (x-1)/(max-1)
+}
+
+# Set seed
+set.seed(4453)
+
+# Set number of simulations for post-estimation to 10000
+nsims <- 10000
+
+# Import data
+data_original <- read.csv("lucid_data.csv", na.strings=c("","NA"))
+
+
+

3 Data Cleanining

+
+

3.1 Attention Checks

+

As discussed in the manuscript, respondents who failed at least one of the two attention checks were prevented from completing the remainder of the survey. Additionally, respondents who proceeded to use a mobile device for the survey (despite being instructed that they would be unable to complete the survey) were prevented from completing the survey. 1.4% of respondents were removed for using a mobile device, 5.9% were removed for failing the Trump attention check (correctly identifying Trump as the president of the United States), and 6.9% were removed for failing the numeracy attention check (correctly adding 2 + 2).

+
## 
+##                  numeracy qualtrics_detected_mobile                     trump 
+##                       145                        29                       124
+
## 
+##                  numeracy qualtrics_detected_mobile                     trump 
+##                  6.859035                  1.371807                  5.865658 
+##                      <NA> 
+##                 85.903500
+
## [1] 1816
+
+
+

3.2 Party ID and Ideology

+
# Political Party ------------------------------------------------------------------------
+data$pid7 <- ifelse(data$pid_gen == 1, data$demstr, 
+                    ifelse(data$pid_gen == 2, data$repstr,
+                           ifelse(data$pid_gen > 2, data$pidlean, NA)))
+
+# Indicator for Republican (vs. Democrat) ------------------------------------------------
+# where Republican = 1, Democrat = 0, and Independent = NA
+data$rep_dem <- ifelse(data$pid7 < 4, 0, 
+                       ifelse(data$pid7 > 4, 1, NA))
+
+table(data$pid7, data$rep_dem, useNA = "always")   
+
##       
+##          0   1 <NA>
+##   1    360   0    0
+##   2    242   0    0
+##   3    122   0    0
+##   4      0   0  264
+##   5      0 130    0
+##   6      0 189    0
+##   7      0 274    0
+##   <NA>   0   0  235
+
# Ideology -------------------------------------------------------------------------------
+data$ideo7 <- data$ideo
+
+data$ideo7[data$ideo7 == -99] <- NA
+
+table(data$ideo, data$ideo7, useNA = "always")
+
##       
+##          1   2   3   4   5   6   7 <NA>
+##   -99    0   0   0   0   0   0   0    7
+##   1    117   0   0   0   0   0   0    0
+##   2      0 249   0   0   0   0   0    0
+##   3      0   0 172   0   0   0   0    0
+##   4      0   0   0 574   0   0   0    0
+##   5      0   0   0   0 164   0   0    0
+##   6      0   0   0   0   0 280   0    0
+##   7      0   0   0   0   0   0 144    0
+##   <NA>   0   0   0   0   0   0   0  109
+
# Indicator for Conservative (vs. Liberal) -----------------------------------------------
+data$con_lib <- ifelse(data$ideo7 < 4, 0,
+                       ifelse(data$ideo7 > 4, 1, NA))
+
+table(data$ideo7, data$con_lib, useNA = "always")                                  
+
##       
+##          0   1 <NA>
+##   1    117   0    0
+##   2    249   0    0
+##   3    172   0    0
+##   4      0   0  574
+##   5      0 164    0
+##   6      0 280    0
+##   7      0 144    0
+##   <NA>   0   0  116
+
+
+

3.3 Experiment 1 (Evidence Interpretation Task)

+

Respondents were randomly assigned to received evidence about one of five salient political issues (: a concealed handgun ban, a minimum wage increase, affirmative action, sanctuary cities, or abortion. Respondents were also randomized to receive evidence that supported the liberal or conservative position on the assigned issue, which was achieved by changing the words “increase” and “decrease” in the column names in the evidence table (, see Appendix for full question wording).

+
+

3.3.1 Create Variables Indicating Issue and Evidence Direction Condition Assignment

+
# Create variable for evidence direction condition ---------------------------------------
+data$exp1_condition <- 
+  ifelse(data$DO.BR.FL_133 == "Direction Experiments--Condition A", "A", 
+         ifelse(data$DO.BR.FL_133 == "Direction Experiments-- Condition B", "B", "foo"))
+
+table(data$exp1_condition, data$DO.BR.FL_133, useNA = "always")
+
##       
+##        Direction Experiments-- Condition B Direction Experiments--Condition A
+##   A                                      0                                786
+##   B                                    784                                  0
+##   <NA>                                   0                                  0
+##       
+##        <NA>
+##   A       0
+##   B       0
+##   <NA>  246
+
# Create variable for issue condition assignment -----------------------------------------
+table(data$pol_issue, useNA = "always") # distribution of issue asssignment variable
+
## 
+##                    abortion          affirmative action 
+##                         364                         366 
+## carrying concealed handguns    raising the minimum wage 
+##                         364                         357 
+##            sanctuary cities                        <NA> 
+##                         365                           0
+
# Create character versoion of issue assignment variable
+data$pol_issue.ch <- as.character(data$pol_issue)
+
+table(data$pol_issue, data$pol_issue.ch, useNA = "always")
+
##                              
+##                               abortion affirmative action
+##   abortion                         364                  0
+##   affirmative action                 0                366
+##   carrying concealed handguns        0                  0
+##   raising the minimum wage           0                  0
+##   sanctuary cities                   0                  0
+##   <NA>                               0                  0
+##                              
+##                               carrying concealed handguns
+##   abortion                                              0
+##   affirmative action                                    0
+##   carrying concealed handguns                         364
+##   raising the minimum wage                              0
+##   sanctuary cities                                      0
+##   <NA>                                                  0
+##                              
+##                               raising the minimum wage sanctuary cities <NA>
+##   abortion                                           0                0    0
+##   affirmative action                                 0                0    0
+##   carrying concealed handguns                        0                0    0
+##   raising the minimum wage                         357                0    0
+##   sanctuary cities                                   0              365    0
+##   <NA>                                               0                0    0
+
# drop original pol_issue variable
+data <- select(data, -c(pol_issue)) 
+
+# create final issue assignment variable with abbreviated level names
+data$exp1_issue <-
+  ifelse(data$pol_issue.ch == "abortion", "abort", 
+      ifelse(data$pol_issue.ch == "affirmative action", "aa", 
+            ifelse(data$pol_issue.ch == "sanctuary cities", "imm",
+                    ifelse(data$pol_issue.ch == "raising the minimum wage", "wage", 
+                          ifelse(data$pol_issue.ch == "carrying concealed handguns", "gun", 
+                                 "foo")))))
+
+table(data$exp1_issue, data$pol_issue.ch, useNA = "always")
+
##        
+##         abortion affirmative action carrying concealed handguns
+##   aa           0                366                           0
+##   abort      364                  0                           0
+##   gun          0                  0                         364
+##   imm          0                  0                           0
+##   wage         0                  0                           0
+##   <NA>         0                  0                           0
+##        
+##         raising the minimum wage sanctuary cities <NA>
+##   aa                           0                0    0
+##   abort                        0                0    0
+##   gun                          0                0    0
+##   imm                          0              365    0
+##   wage                       357                0    0
+##   <NA>                         0                0    0
+
# remove pol_issue.ch
+data <- select(data,-c(pol_issue.ch)) 
+
+# Create indicator for whether evidence points toward liberal position -------------------
+data$exp1_liberal_evidence <- 
+  ifelse(data$exp1_condition == "A" & data$exp1_issue %in% c("gun", "wage", "imm"), 1,
+         ifelse(data$exp1_condition == "B" & data$exp1_issue %in% c("aa", "abort"), 1, 0))
+
+table(data$exp1_liberal_evidence, data$exp1_condition, data$exp1_issue)
+
## , ,  = aa
+## 
+##    
+##       A   B
+##   0 160   0
+##   1   0 164
+## 
+## , ,  = abort
+## 
+##    
+##       A   B
+##   0 164   0
+##   1   0 151
+## 
+## , ,  = gun
+## 
+##    
+##       A   B
+##   0   0 153
+##   1 159   0
+## 
+## , ,  = imm
+## 
+##    
+##       A   B
+##   0   0 158
+##   1 150   0
+## 
+## , ,  = wage
+## 
+##    
+##       A   B
+##   0   0 158
+##   1 153   0
+
+
+

3.3.2 Combine Outcome Measure Colummns Into Single Variable

+

Which survey questions respondents answer is dependent on the experimental condition each is assigned to. For instance, a respondent assigned to the gun control condition received questions about gun control and not, for instance, immigration. Therefore, responses to experimental outcomes are recorded in different columns in the data set. To combine to different outcome measures responses into a single column, we sum across all outcome measures (regardless of whether the respondent was assigned to answer each measure). Because the rowSums() function ignores missing values (i.e., NAs), and outcome measures which respondents were not assigned to answer contain NAs, the column containing the sum contains the appropriate outcome response for each respondent.

+

There are 10 columns in which the experimental outcome can be recorded, as there are 10 experimental conditions: 5 (issue condition = abortion, affirmative action, gun control, minimum wage, or immigration) X 2 (evidence direction condition = A or B). In the code below we use the following shorthand to refer to each issue: abort = abortion, aa = affirmative action, gun = gun control, wage = minimum wage, imm = immigration.

+
# Combine Experiment 1 Outcomes Into Single Variable (ex)
+# Create vector of outcome variable names from survey 
+# (A = direction condition A, B = direction condition B)
+exp1_A <- c("exp1_abort_A", "exp1_aa_A", "exp1_gun_A", "exp1_wage_A", "exp1_imm_A") 
+exp1_B <- c("exp1_abort_B", "exp1_aa_B", "exp1_gun_B", "exp1_wage_B", "exp1_imm_B")
+
+# all outcome variables are integer vectors
+apply(data[,c(exp1_A, exp1_B)], 2, class)
+
## exp1_abort_A    exp1_aa_A   exp1_gun_A  exp1_wage_A   exp1_imm_A exp1_abort_B 
+##    "integer"    "integer"    "integer"    "integer"    "integer"    "integer" 
+##    exp1_aa_B   exp1_gun_B  exp1_wage_B   exp1_imm_B 
+##    "integer"    "integer"    "integer"    "integer"
+
# 4 unique values of outcome variables
+apply(data[,c(exp1_A, exp1_B)], 2, unique)
+
## $exp1_abort_A
+## [1] NA  1  2
+## 
+## $exp1_aa_A
+## [1]   2  NA   1 -99
+## 
+## $exp1_gun_A
+## [1]  NA   2   1 -99
+## 
+## $exp1_wage_A
+## [1]  NA   2   1 -99
+## 
+## $exp1_imm_A
+## [1]  NA   1   2 -99
+## 
+## $exp1_abort_B
+## [1] NA  1  2
+## 
+## $exp1_aa_B
+## [1]  NA   1   2 -99
+## 
+## $exp1_gun_B
+## [1]  NA   2   1 -99
+## 
+## $exp1_wage_B
+## [1]  NA   2   1 -99
+## 
+## $exp1_imm_B
+## [1]  NA   1   2 -99
+
# Sum across columns for respondents in evidence direction condition A
+data$response_1A <- rowSums(data[,exp1_A], na.rm=T) 
+data$response_1A[data$response_1A == -99] <- NA # recode skipped responses as NA
+data$response_1A[data$response_1A == 0] <- NA # recode 0 as NA, since NA + NA = 0
+head(data[1:200,c(exp1_A, "response_1A")]) # verify method
+
##    exp1_abort_A exp1_aa_A exp1_gun_A exp1_wage_A exp1_imm_A response_1A
+## 3            NA         2         NA          NA         NA           2
+## 5            NA        NA         NA          NA         NA          NA
+## 9            NA         1         NA          NA         NA           1
+## 10           NA        NA         NA          NA         NA          NA
+## 15           NA        NA         NA          NA         NA          NA
+## 18           NA        NA          2          NA         NA           2
+
# Sum across columns for respondents in evidence direction condition B
+data$response_1B <- rowSums(data[,exp1_B], na.rm=T)
+data$response_1B[data$response_1B == -99] <- NA # recode skipped, but seen responses as NA
+data$response_1B[data$response_1B == 0] <- NA # recode 0 as NA, since NA + NA = 0
+head(data[1:200,c(exp1_B, "response_1B")]) # verify method
+
##    exp1_abort_B exp1_aa_B exp1_gun_B exp1_wage_B exp1_imm_B response_1B
+## 3            NA        NA         NA          NA         NA          NA
+## 5             1        NA         NA          NA         NA           1
+## 9            NA        NA         NA          NA         NA          NA
+## 10           NA        NA          2          NA         NA           2
+## 15           NA        NA          1          NA         NA           1
+## 18           NA        NA         NA          NA         NA          NA
+
# Create one response variable for experiment 1, combining direction conditions A and B.
+data$exp1_response <- 
+  ifelse(data$exp1_condition == "A", data$response_1A, 
+         ifelse(data$exp1_condition == "B", data$response_1B, 9999))
+
+# Verify method
+table(data$exp1_response, data$response_1A, useNA = "always")
+
##       
+##          1   2 <NA>
+##   1    493   0  403
+##   2      0 280  371
+##   <NA>   0   0  269
+
table(data$exp1_response, data$response_1B, useNA = "always")
+
##       
+##          1   2 <NA>
+##   1    403   0  493
+##   2      0 371  280
+##   <NA>   0   0  269
+
+
+

3.3.3 Create Indicator for Correct Response for Experiment 1

+

1 indicates correct interpretation of the evidence and 0 indicates incorrect interpretation. In direction condition A, the 2nd response option is correct. In direction condition B, the 1st response option is correct.

+
data$exp1_correct <- 
+  ifelse(data$exp1_condition == "A" & data$exp1_response == 2, 1,
+         ifelse(data$exp1_condition == "A" & data$exp1_response == 1, 0,  
+                ifelse(data$exp1_condition == "B" & data$exp1_response == 2, 0,
+                       ifelse(data$exp1_condition == "B" & data$exp1_response == 1, 1, 
+                              9999)))) 
+table(data$exp1_correct, useNA = "always")
+
## 
+##    0    1 <NA> 
+##  864  683  269
+
+
+
+

3.4 Experiment 2

+

For Experiment 2, respondents were randomly assigned to receive evidence about one of the four remaining issues that they were not assigned to in the Experiment 1 (i.e., if a respondent in Experiment 1 received information about gun control, they were randomly assigned to receive information about either a minimum wage increase, affirmative action, sanctuary cities, or abortion in Experiment 2). Respondents were again randomized to receive evidence that supported either the liberal or conservative position on the assigned issue. Below we follow the same approach as followed in Experiment 1, above.

+
+

3.4.1 Create Variables Indicating Issue and Evidence Direction Condition Assignment

+
# Create variable for experimental condition
+data$exp2_condition <- 
+  ifelse(data$DO.BR.FL_137 == "Quality Experiments-- Condition A", "A", 
+         ifelse(data$DO.BR.FL_137 == "Quality Experiments-- Condition B", "B", NA))
+
+table(data$DO.BR.FL_137, data$exp2_condition)
+
##                                    
+##                                       A   B
+##   Quality Experiments-- Condition A 778   0
+##   Quality Experiments-- Condition B   0 781
+
# Create variable indicating the issue each respondent received
+data$pol_issue_2.ch <- as.character(data$pol_issue_2)
+
+table(data$pol_issue_2, data$pol_issue_2.ch)
+
##                              
+##                               abortion affirmative action
+##   abortion                         353                  0
+##   affirmative action                 0                369
+##   carrying concealed handguns        0                  0
+##   raising the minimum wage           0                  0
+##   sanctuary cities                   0                  0
+##                              
+##                               carrying concealed handguns
+##   abortion                                              0
+##   affirmative action                                    0
+##   carrying concealed handguns                         362
+##   raising the minimum wage                              0
+##   sanctuary cities                                      0
+##                              
+##                               raising the minimum wage sanctuary cities
+##   abortion                                           0                0
+##   affirmative action                                 0                0
+##   carrying concealed handguns                        0                0
+##   raising the minimum wage                         361                0
+##   sanctuary cities                                   0              371
+
# Drop original pol_issue variable
+data <- select(data, -c(pol_issue_2)) 
+
+data$exp2_issue <- 
+  ifelse(data$pol_issue_2.ch == "abortion", "abort", 
+         ifelse(data$pol_issue_2.ch == "affirmative action", "aa", 
+                ifelse(data$pol_issue_2.ch == "sanctuary cities", "imm", 
+                       ifelse(data$pol_issue_2.ch == "raising the minimum wage", "wage", 
+                              ifelse(data$pol_issue_2.ch == "carrying concealed handguns", 
+                                     "gun", NA)))))
+
+table(data$pol_issue_2.ch, data$exp2_issue)
+
##                              
+##                                aa abort gun imm wage
+##   abortion                      0   353   0   0    0
+##   affirmative action          369     0   0   0    0
+##   carrying concealed handguns   0     0 362   0    0
+##   raising the minimum wage      0     0   0   0  361
+##   sanctuary cities              0     0   0 371    0
+
# Drop original pol_issue.ch variable
+data <- select(data, -c(pol_issue_2.ch)) 
+
+# Create indicator for whether evidence points toward liberal position
+data$exp2_liberal_evidence <- ifelse(data$exp2_condition == "A" & 
+                                       data$exp2_issue %in% c("aa", "abort"), 1,
+                                     ifelse(data$exp2_condition == "B" &
+                                        data$exp2_issue %in% c("gun", "wage", "imm"), 1, 0))
+
+
+

3.4.2 Aggregate Outcome Responses Across Conditions

+

Following the same procedure used in Experiment 1, we create a common response variable for Experiment 2 below. Because there are two outcomes in Experiment 2, the ‘sample size’ and ‘causal claim’ outcomes, we create two common response variables.

+
# Exp 2 (Condition A, Sample Size)
+exp2_A_sample   <- c("exp2_abort_A_1", "exp2_aa_A_1", "exp2_gun_A_1", 
+                     "exp2_wage_A_1", "exp2_imm_A_1") 
+
+# Exp 2 (Condition B, Sample Size)
+exp2_B_sample   <- c("exp2_abort_B_1", "exp2_aa_B_1", "exp2_gun_B_1", 
+                     "exp2_wage_B_1", "exp2_imm_B_1")
+
+# Exp 2 (Condition A, Causal)
+exp2_A_causal <- c("exp2_abort_A_2", "exp2_aa_A_2", "exp2_gun_A_2", 
+                   "exp2_wage_A_2", "exp2_imm_A_2") 
+
+# Exp 2 (Condition B, Causal)
+exp2_B_causal <- c("exp2_abort_B_2", "exp2_aa_B_2", "exp2_gun_B_2", 
+                   "exp2_wage_B_2", "exp2_imm_B_2") 
+
+# Collapse response variable for sample size question
+data$exp2_sample <- rowSums(data[,c(exp2_A_sample, exp2_B_sample)], na.rm=T)
+data$exp2_sample[data$exp2_sample == 0] <- NA
+data$exp2_sample[data$exp2_sample == -99] <- NA
+
+head(data[1:200,c(exp2_A_sample, exp2_B_sample, "exp2_sample")]) # verify method
+
##    exp2_abort_A_1 exp2_aa_A_1 exp2_gun_A_1 exp2_wage_A_1 exp2_imm_A_1
+## 3              NA          NA           NA            NA           NA
+## 5              NA          NA           NA             4           NA
+## 9              NA          NA           NA            NA           NA
+## 10             NA          NA           NA            NA           NA
+## 15             NA          NA           NA             3           NA
+## 18             NA           6           NA            NA           NA
+##    exp2_abort_B_1 exp2_aa_B_1 exp2_gun_B_1 exp2_wage_B_1 exp2_imm_B_1
+## 3              NA          NA            4            NA           NA
+## 5              NA          NA           NA            NA           NA
+## 9              NA          NA           NA             4           NA
+## 10              4          NA           NA            NA           NA
+## 15             NA          NA           NA            NA           NA
+## 18             NA          NA           NA            NA           NA
+##    exp2_sample
+## 3            4
+## 5            4
+## 9            4
+## 10           4
+## 15           3
+## 18           6
+
# Reverse code sample size outcome, such that high values indicate size is sufficient 
+# (note: causal outcome is already coded such that higher values indicate that a causal
+# claim can be made)
+table(data$exp2_sample)
+
## 
+##   1   2   3   4   5   6   7 
+##  69  79  93 698 246 159 207
+
data$exp2_goodSample <- 8 - data$exp2_sample
+table(data$exp2_sample, data$exp2_goodSample) 
+
##    
+##       1   2   3   4   5   6   7
+##   1   0   0   0   0   0   0  69
+##   2   0   0   0   0   0  79   0
+##   3   0   0   0   0  93   0   0
+##   4   0   0   0 698   0   0   0
+##   5   0   0 246   0   0   0   0
+##   6   0 159   0   0   0   0   0
+##   7 207   0   0   0   0   0   0
+
# Create response variable for causal inference outcome
+data$exp2_goodCausal <- rowSums(data[,c(exp2_A_causal, exp2_B_causal)], na.rm=T)
+data$exp2_goodCausal[data$exp2_goodCausal == 0] <- NA #recode unseen skipped responses as NA
+data$exp2_goodCausal[data$exp2_goodCausal == -99] <- NA #recode seen skipped responses as NA
+
+head(data[1:200,c(exp2_A_causal, exp2_B_causal, "exp2_goodCausal")]) # verify method
+
##    exp2_abort_A_2 exp2_aa_A_2 exp2_gun_A_2 exp2_wage_A_2 exp2_imm_A_2
+## 3              NA          NA           NA            NA           NA
+## 5              NA          NA           NA             5           NA
+## 9              NA          NA           NA            NA           NA
+## 10             NA          NA           NA            NA           NA
+## 15             NA          NA           NA             6           NA
+## 18             NA           7           NA            NA           NA
+##    exp2_abort_B_2 exp2_aa_B_2 exp2_gun_B_2 exp2_wage_B_2 exp2_imm_B_2
+## 3              NA          NA            7            NA           NA
+## 5              NA          NA           NA            NA           NA
+## 9              NA          NA           NA             5           NA
+## 10              4          NA           NA            NA           NA
+## 15             NA          NA           NA            NA           NA
+## 18             NA          NA           NA            NA           NA
+##    exp2_goodCausal
+## 3                7
+## 5                5
+## 9                5
+## 10               4
+## 15               6
+## 18               7
+
+
+
+

3.5 Create Congeniality Variables For Experiments 1 and 2

+

Now that we have collapsed outcome variables into a single outcome variable for Experiment 1 and two outcome variables for Experiment 2 (sample size and causal claim outcomes), we next determine whether each respondent received congenial or uncongenial information, based upon both the direction of the information they were randomly assigned to receive and their prior beliefs. As explained in the manuscript and appendix, we operationalize prior beliefs separately in three ways: the respondent’s party identification, self-reported ideology, and position on the issue they received information about. For each operationalization, we create two congeniality variables: a continuous version (e.g., 1 = very uncongenial, 7 = very congenial) and a binary version (e.g., 0 = uncongenial, 1 = congenial). For the binary measure, moderates and non-leaning Independents (for the party ID and ideology operationalizations of congeniality, respectively), are coded as NA. For the continuous measures, they are coded as 4, at the center of the continuous measures, which range from 1-7. Issue positions are measured on a 6-point scale, so the issue position operationalization of congeniality does not face a similar issue.

+
+

3.5.1 Experiment 1

+
# Party ID -------------------------------------------
+
+# Binary
+data$exp1_congenial_pid_binary <- 
+  ifelse(data$pid7 < 4 & data$exp1_liberal_evidence == 1, 1, 
+         ifelse(data$pid7 > 4 & data$exp1_liberal_evidence == 0, 1,
+                ifelse(data$pid7 == 4, NA, 0)))
+head(data[,c("exp1_congenial_pid_binary", "exp1_liberal_evidence", "pid7")]) # verify
+
##    exp1_congenial_pid_binary exp1_liberal_evidence pid7
+## 3                          1                     0    5
+## 5                          0                     1    6
+## 9                         NA                     0    4
+## 10                         0                     0    1
+## 15                        NA                     0    4
+## 18                         1                     1    2
+
# Continuous
+data$exp1_congenial_pid_cont <- 
+  ifelse(data$exp1_liberal_evidence == 1, 8 - data$pid7, 
+         ifelse(data$exp1_liberal_evidence == 0, data$pid7, NA))
+
+data$exp1_congenial_pid_cont.s <- as.numeric(scale(data$exp1_congenial_pid_cont))
+
+head(data[,c("exp1_congenial_pid_cont", "exp1_liberal_evidence", "pid7")]) # verify
+
##    exp1_congenial_pid_cont exp1_liberal_evidence pid7
+## 3                        5                     0    5
+## 5                        2                     1    6
+## 9                        4                     0    4
+## 10                       1                     0    1
+## 15                       4                     0    4
+## 18                       6                     1    2
+
# Ideology ---------------------------------------------------
+
+# Binary
+data$exp1_congenial_ideo_binary <- 
+  ifelse(data$ideo7 < 4 & data$exp1_liberal_evidence == 1, 1, 
+         ifelse(data$ideo7 > 4 & data$exp1_liberal_evidence == 0, 1, 
+                ifelse(data$ideo7 == 4, NA, 0))) 
+data[1:10,c("exp1_congenial_ideo_binary", "exp1_liberal_evidence", "ideo7")] # verify
+
##    exp1_congenial_ideo_binary exp1_liberal_evidence ideo7
+## 3                           0                     0     1
+## 5                           0                     1     5
+## 9                           0                     0     1
+## 10                          0                     0     1
+## 15                          0                     0     3
+## 18                          1                     1     3
+## 19                          1                     1     3
+## 23                          1                     1     2
+## 25                          1                     1     3
+## 28                         NA                     0     4
+
# Continuous
+data$exp1_congenial_ideo_cont <- 
+  ifelse(data$exp1_liberal_evidence == 1, 8 - data$ideo7,
+         ifelse(data$exp1_liberal_evidence == 0, data$ideo7, NA))
+
+data$exp1_congenial_ideo_cont.s <- as.numeric(scale(data$exp1_congenial_ideo_cont))
+
+data[1:10,c("exp1_congenial_ideo_cont", "exp1_liberal_evidence", "ideo7")] # verify
+
##    exp1_congenial_ideo_cont exp1_liberal_evidence ideo7
+## 3                         1                     0     1
+## 5                         3                     1     5
+## 9                         1                     0     1
+## 10                        1                     0     1
+## 15                        3                     0     3
+## 18                        5                     1     3
+## 19                        5                     1     3
+## 23                        6                     1     2
+## 25                        5                     1     3
+## 28                        4                     0     4
+
# Issue Position -------------------------------------------------------------------------
+
+# Continuous
+
+# distributions of issue position variables (note that skipped responses = -99)
+apply(data[,c("issue_gun", "issue_aa", "issue_abort", "issue_imm", "issue_wage")], 
+      2, function(x) table(x, useNA = "always"))
+
##      issue_gun issue_aa issue_abort issue_imm issue_wage
+## -99          3        5           3         3          4
+## 1          581      394         566       435        198
+## 2          172      186         185       181        116
+## 3          189      270         211       215        180
+## 4          220      337         191       203        228
+## 5          242      240         183       178        261
+## 6          312      287         380       504        732
+## <NA>        97       97          97        97         97
+
# recode NAs
+data$issue_gun[data$issue_gun == -99] <- NA
+data$issue_aa[data$issue_aa == -99] <- NA
+data$issue_abort[data$issue_abort == -99] <- NA
+data$issue_imm[data$issue_imm == -99] <- NA
+data$issue_wage[data$issue_wage == -99] <- NA
+
+# verify
+apply(data[,c("issue_gun", "issue_aa", "issue_abort", "issue_imm", "issue_wage")], 
+      2, function(x) table(x, useNA = "always"))
+
##      issue_gun issue_aa issue_abort issue_imm issue_wage
+## 1          581      394         566       435        198
+## 2          172      186         185       181        116
+## 3          189      270         211       215        180
+## 4          220      337         191       203        228
+## 5          242      240         183       178        261
+## 6          312      287         380       504        732
+## <NA>       100      102         100       100        101
+
# recode such that higher values indicate liberal positions
+data$gun_lib <- 7 -data$issue_gun
+data$wage_lib  <- data$issue_wage # already coded in correct direction
+data$aa_lib <- data$issue_aa # already coded in correct direction
+data$imm_lib <- 7 - data$issue_imm
+data$abort_lib <- 7 - data$issue_abort
+
+issue_vars <- c("gun_lib", "wage_lib", "aa_lib", "imm_lib", "wage_lib")
+
+psych::alpha(data[,issue_vars]) # verify all load in same direction
+
## Warning in cor.smooth(r): Matrix was not positive definite, smoothing was done
+
## Warning in cor.smooth(R): Matrix was not positive definite, smoothing was done
+
## In smc, smcs < 0 were set to .0
+## In smc, smcs < 0 were set to .0
+
## Warning in cor.smooth(R): Matrix was not positive definite, smoothing was done
+
## In smc, smcs < 0 were set to .0
+
## Warning in cor.smooth(R): Matrix was not positive definite, smoothing was done
+
## In smc, smcs < 0 were set to .0
+
## Warning in cor.smooth(R): Matrix was not positive definite, smoothing was done
+
## In smc, smcs < 0 were set to .0
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, issue_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean  sd median_r
+##       0.78      0.78    0.69      0.42 3.6 0.0085  3.9 1.3     0.35
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.76 0.78 0.8 
+## 
+##  Reliability if an item is dropped:
+##            raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
+## gun_lib         0.77      0.78    0.62      0.47 3.6   0.0091 0.070  0.38
+## wage_lib        0.69      0.69    0.65      0.35 2.2   0.0120 0.012  0.33
+## aa_lib          0.78      0.78    0.65      0.47 3.6   0.0093 0.073  0.35
+## imm_lib         0.76      0.76    0.60      0.45 3.2   0.0098 0.080  0.37
+## wage_lib.1      0.69      0.69    0.65      0.35 2.2   0.0120 0.012  0.33
+## 
+##  Item statistics 
+##               n raw.r std.r r.cor r.drop mean  sd
+## gun_lib    1716  0.66  0.65  0.55   0.45  3.8 1.9
+## wage_lib   1715  0.83  0.84  0.68   0.71  4.4 1.8
+## aa_lib     1714  0.63  0.64  0.51   0.43  3.4 1.8
+## imm_lib    1716  0.71  0.69  0.61   0.50  3.4 2.0
+## wage_lib.1 1715  0.83  0.84  0.68   0.71  4.4 1.8
+## 
+## Non missing response frequency for each item
+##               1    2    3    4    5    6 miss
+## gun_lib    0.18 0.14 0.13 0.11 0.10 0.34 0.06
+## wage_lib   0.12 0.07 0.10 0.13 0.15 0.43 0.06
+## aa_lib     0.23 0.11 0.16 0.20 0.14 0.17 0.06
+## imm_lib    0.29 0.10 0.12 0.13 0.11 0.25 0.06
+## wage_lib.1 0.12 0.07 0.10 0.13 0.15 0.43 0.06
+
# Initialize continuous issue congeniality measure
+data$exp1_congenial_issue_cont <- NA
+
+# note that the logic of the creation of this continuous congeniality measure is 
+# similar to that of the continuous pid and ideology congeniality measures, though
+# in this case the issue positions are coded such that liberal values are larger
+# (for pid and ideology, larger values indicated more conservative views). This difference 
+# is taken into account below. 
+
+data$exp1_congenial_issue_cont <- 
+  ifelse(data$exp1_issue == "gun" & data$exp1_liberal_evidence == 1, data$gun_lib,
+  ifelse(data$exp1_issue == "gun" & data$exp1_liberal_evidence == 0, 7 - data$gun_lib,
+  
+  ifelse(data$exp1_issue == "wage" & data$exp1_liberal_evidence == 1, data$wage_lib, 
+  ifelse(data$exp1_issue == "wage" & data$exp1_liberal_evidence == 0, 7 - data$wage_lib,
+         
+  ifelse(data$exp1_issue == "aa" & data$exp1_liberal_evidence == 1, data$aa_lib, 
+  ifelse(data$exp1_issue == "aa" & data$exp1_liberal_evidence == 0, 7 - data$aa_lib,
+   
+  ifelse(data$exp1_issue == "imm" & data$exp1_liberal_evidence == 1, data$imm_lib, 
+  ifelse(data$exp1_issue == "imm" & data$exp1_liberal_evidence == 0, 7 - data$imm_lib,
+                                    
+  ifelse(data$exp1_issue == "abort" & data$exp1_liberal_evidence == 1, data$abort_lib, 
+  ifelse(data$exp1_issue == "abort" & data$exp1_liberal_evidence == 0, 7 - data$abort_lib, 
+         NA))))))))))
+
+data$exp1_congenial_issue_cont.s <- as.numeric(scale(data$exp1_congenial_issue_cont))
+
+
+# Binary
+
+data$exp1_congenial_issue_binary <- ifelse(data$exp1_congenial_issue_cont >= 4, 1, 
+                                          ifelse(data$exp1_congenial_issue_cont < 4, 0, NA))
+
+table(data$exp1_congenial_issue_binary, data$exp1_congenial_issue_cont) # verify
+
##    
+##       1   2   3   4   5   6
+##   0 398 195 205   0   0   0
+##   1   0   0   0 182 190 396
+
# Check correlations between continuous pid, ideology, and issue measures of congeniality
+
+cor(data$exp1_congenial_pid_cont.s, 
+    data$exp1_congenial_ideo_cont.s, 
+    use = "complete.obs")
+
## [1] 0.6472544
+
cor(data$exp1_congenial_pid_cont.s, 
+    data$exp1_congenial_issue_cont.s, 
+    use = "complete.obs")
+
## [1] 0.4415604
+
cor(data$exp1_congenial_ideo_cont.s, 
+    data$exp1_congenial_issue_cont.s, 
+    use = "complete.obs")
+
## [1] 0.4017106
+
+
+

3.5.2 Experiment 2

+
# Party ID -------------------------------------------------------------------------------
+
+# Binary
+
+data$exp2_congenial_pid_binary <- 
+  ifelse(data$pid7 < 4 & data$exp2_liberal_evidence == 1, 1, 
+         ifelse(data$pid7 > 4 & data$exp2_liberal_evidence == 0, 1, 
+                ifelse(data$pid7 == 4, NA, 0)))
+
+# Continous
+
+data$exp2_congenial_pid_cont <- 
+  ifelse(data$exp2_liberal_evidence == 1, 8 - data$pid7, 
+         ifelse(data$exp2_liberal_evidence == 0, data$pid7, NA))
+
+data$exp2_congenial_pid_cont.s <- as.numeric(scale(data$exp2_congenial_pid_cont))
+
+
+# Ideology -------------------------------------------------------------------------------
+
+# Binary
+
+data$exp2_congenial_ideo_binary <- 
+  ifelse(data$ideo7 < 4 & data$exp2_liberal_evidence == 1, 1, 
+         ifelse(data$ideo7 > 4 & data$exp2_liberal_evidence == 0, 1, 
+                ifelse(data$ideo7 == 4, NA, 0))) 
+
+# Continuous ideo congeniality measure ---------------------------------------------------
+
+data$exp2_congenial_ideo_cont <- 
+  ifelse(data$exp2_liberal_evidence == 1, 8 - data$ideo7, 
+         ifelse(data$exp2_liberal_evidence == 0, data$ideo7, NA))
+
+data$exp2_congenial_ideo_cont.s <- as.numeric(scale(data$exp2_congenial_ideo_cont))
+
+# Issue Position -------------------------------------------------------------------------
+
+# Continuous
+
+# Initialize continuous congeniality measure
+data$exp2_congenial_issue_cont <- NA
+
+data$exp2_congenial_issue_cont <- 
+  ifelse(data$exp2_issue == "gun" & data$exp2_liberal_evidence == 1, data$gun_lib, 
+  ifelse(data$exp2_issue == "gun" & data$exp2_liberal_evidence == 0, 7 - data$gun_lib,
+                                               
+  ifelse(data$exp2_issue == "wage" & data$exp2_liberal_evidence == 1, data$wage_lib, 
+  ifelse(data$exp2_issue == "wage" & data$exp2_liberal_evidence == 0, 7 - data$wage_lib,
+                                                             
+  ifelse(data$exp2_issue == "aa" & data$exp2_liberal_evidence == 1, data$aa_lib, 
+  ifelse(data$exp2_issue == "aa" & data$exp2_liberal_evidence == 0, 7 - data$aa_lib,
+                                                                           
+  ifelse(data$exp2_issue == "imm" & data$exp2_liberal_evidence == 1, data$imm_lib, 
+  ifelse(data$exp2_issue == "imm" & data$exp2_liberal_evidence == 0, 7 - data$imm_lib,
+                                                                                         
+  ifelse(data$exp2_issue == "abort" & data$exp2_liberal_evidence == 1, data$abort_lib, 
+  ifelse(data$exp2_issue == "abort" & data$exp2_liberal_evidence == 0, 7 - data$abort_lib, 
+         NA))))))))))
+
+data$exp2_congenial_issue_cont.s <- as.numeric(scale(data$exp2_congenial_issue_cont))
+
+# Binary
+
+data$exp2_congenial_issue_binary <- 
+  ifelse(data$exp2_congenial_issue_cont >= 4, 1,
+         ifelse(data$exp2_congenial_issue_cont < 4, 0, NA))
+
+
+
+

3.6 Standardize Outcomes for Experiment 2

+
# Unstandardized version of DVs
+data$exp2_goodSample_uns <- data$exp2_goodSample
+data$exp2_goodCausal_uns <- data$exp2_goodCausal
+
+# Drop original variables
+data <- data %>% dplyr::select(-exp2_goodSample, -exp2_goodCausal)
+
+colnames(data)[grep(colnames(data), pattern = "good")] # verify
+
## [1] "exp2_goodSample_uns" "exp2_goodCausal_uns"
+
# Standardize DVs
+data$exp2_goodSample <- as.numeric(scale(data$exp2_goodSample_uns))
+data$exp2_goodCausal <- as.numeric(scale(data$exp2_goodCausal_uns))
+
+table(data$exp2_goodSample, data$exp2_goodSample_uns)
+
##                     
+##                        1   2   3   4   5   6   7
+##   -1.70055014543059  207   0   0   0   0   0   0
+##   -1.02873321329538    0 159   0   0   0   0   0
+##   -0.356916281160165   0   0 246   0   0   0   0
+##   0.314900650975048    0   0   0 698   0   0   0
+##   0.986717583110261    0   0   0   0  93   0   0
+##   1.65853451524547     0   0   0   0   0  79   0
+##   2.33035144738069     0   0   0   0   0   0  69
+
table(data$exp2_goodCausal, data$exp2_goodCausal_uns)
+
##                     
+##                        1   2   3   4   5   6   7
+##   -2.32254579302489   85   0   0   0   0   0   0
+##   -1.71020223496628    0  90   0   0   0   0   0
+##   -1.09785867690767    0   0  96   0   0   0   0
+##   -0.485515118849059   0   0   0 338   0   0   0
+##   0.12682843920955     0   0   0   0 352   0   0
+##   0.739171997268159    0   0   0   0   0 348   0
+##   1.35151555532677     0   0   0   0   0   0 236
+
+
+

3.7 Put Continuous Congeniality Measures on 0-1 Scale

+
data$exp1_congenial_issue_cont.01 <- (data$exp1_congenial_issue_cont -1)/5
+data$exp1_congenial_ideo_cont.01 <- (data$exp1_congenial_ideo_cont -1)/6
+data$exp1_congenial_pid_cont.01 <- (data$exp1_congenial_pid_cont -1)/6
+
+data$exp2_congenial_issue_cont.01 <- (data$exp2_congenial_issue_cont -1)/5
+data$exp2_congenial_ideo_cont.01 <- (data$exp2_congenial_ideo_cont -1)/6
+data$exp2_congenial_pid_cont.01 <- (data$exp2_congenial_pid_cont -1)/6
+
+with(data,table(exp1_congenial_pid_cont.01, exp1_congenial_pid_cont))
+
##                           exp1_congenial_pid_cont
+## exp1_congenial_pid_cont.01   1   2   3   4   5   6   7
+##          0                 320   0   0   0   0   0   0
+##          0.166666666666667   0 227   0   0   0   0   0
+##          0.333333333333333   0   0 108   0   0   0   0
+##          0.5                 0   0   0 261   0   0   0
+##          0.666666666666667   0   0   0   0 143   0   0
+##          0.833333333333333   0   0   0   0   0 202   0
+##          1                   0   0   0   0   0   0 309
+
with(data,table(exp1_congenial_ideo_cont.01, exp1_congenial_ideo_cont))
+
##                            exp1_congenial_ideo_cont
+## exp1_congenial_ideo_cont.01   1   2   3   4   5   6   7
+##           0                 107   0   0   0   0   0   0
+##           0.166666666666667   0 256   0   0   0   0   0
+##           0.333333333333333   0   0 153   0   0   0   0
+##           0.5                 0   0   0 519   0   0   0
+##           0.666666666666667   0   0   0   0 161   0   0
+##           0.833333333333333   0   0   0   0   0 232   0
+##           1                   0   0   0   0   0   0 138
+
with(data,table(exp1_congenial_issue_cont.01, exp1_congenial_issue_cont))
+
##                             exp1_congenial_issue_cont
+## exp1_congenial_issue_cont.01   1   2   3   4   5   6
+##                          0   398   0   0   0   0   0
+##                          0.2   0 195   0   0   0   0
+##                          0.4   0   0 205   0   0   0
+##                          0.6   0   0   0 182   0   0
+##                          0.8   0   0   0   0 190   0
+##                          1     0   0   0   0   0 396
+
with(data,table(exp2_congenial_pid_cont.01, exp2_congenial_pid_cont))
+
##                           exp2_congenial_pid_cont
+## exp2_congenial_pid_cont.01   1   2   3   4   5   6   7
+##          0                 324   0   0   0   0   0   0
+##          0.166666666666667   0 204   0   0   0   0   0
+##          0.333333333333333   0   0 123   0   0   0   0
+##          0.5                 0   0   0 260   0   0   0
+##          0.666666666666667   0   0   0   0 128   0   0
+##          0.833333333333333   0   0   0   0   0 220   0
+##          1                   0   0   0   0   0   0 300
+
with(data,table(exp2_congenial_ideo_cont.01, exp2_congenial_ideo_cont))
+
##                            exp2_congenial_ideo_cont
+## exp2_congenial_ideo_cont.01   1   2   3   4   5   6   7
+##           0                 125   0   0   0   0   0   0
+##           0.166666666666667   0 232   0   0   0   0   0
+##           0.333333333333333   0   0 159   0   0   0   0
+##           0.5                 0   0   0 516   0   0   0
+##           0.666666666666667   0   0   0   0 152   0   0
+##           0.833333333333333   0   0   0   0   0 253   0
+##           1                   0   0   0   0   0   0 118
+
with(data,table(exp2_congenial_issue_cont.01, exp2_congenial_issue_cont))
+
##                             exp2_congenial_issue_cont
+## exp2_congenial_issue_cont.01   1   2   3   4   5   6
+##                          0   413   0   0   0   0   0
+##                          0.2   0 155   0   0   0   0
+##                          0.4   0   0 196   0   0   0
+##                          0.6   0   0   0 204   0   0
+##                          0.8   0   0   0   0 175   0
+##                          1     0   0   0   0   0 413
+
+
+

3.8 Need for Closure

+
# Original coding: low values = open, high values = closed
+
+nfc_vars <- paste("nfc", 1:10, sep = "_")
+apply(data[,nfc_vars], 2, function(x) table(x, useNA = "always"))
+
##      nfc_1 nfc_2 nfc_3 nfc_4 nfc_5 nfc_6 nfc_7 nfc_8 nfc_9 nfc_10
+## -99      5     4     1     2     3     5     7     4    16      2
+## 1       70   137    56    48   169    78    65    40    31     48
+## 2       85   178   109    87   258   123    90    73    71    101
+## 3      163   309   197   182   374   285   223   176   155    241
+## 4      415   409   437   409   446   490   538   492   478    489
+## 5      472   285   428   449   221   306   360   452   443    366
+## 6      368   256   350   400   106   290   294   340   383    330
+## <NA>   238   238   238   239   239   239   239   239   239    239
+
data[,nfc_vars][data[,nfc_vars] == -99] <- NA
+
+data$nfc_mean <- rowMeans(data[,nfc_vars], na.rm=T)
+
+data$nfc_mean.s <- as.numeric(scale(data$nfc_mean))
+
+psych::alpha(data[,nfc_vars])
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, nfc_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
+##       0.84      0.84    0.85      0.35 5.4 0.0056  4.2 0.85     0.34
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.83 0.84 0.85 
+## 
+##  Reliability if an item is dropped:
+##        raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
+## nfc_1       0.83      0.84    0.84      0.36 5.1   0.0060 0.0128  0.34
+## nfc_2       0.83      0.83    0.84      0.36 5.1   0.0059 0.0132  0.35
+## nfc_3       0.82      0.82    0.83      0.34 4.7   0.0063 0.0102  0.33
+## nfc_4       0.82      0.82    0.83      0.34 4.6   0.0064 0.0130  0.32
+## nfc_5       0.84      0.85    0.85      0.38 5.5   0.0056 0.0101  0.35
+## nfc_6       0.83      0.83    0.83      0.35 4.9   0.0061 0.0133  0.33
+## nfc_7       0.83      0.83    0.83      0.35 4.9   0.0061 0.0135  0.34
+## nfc_8       0.82      0.82    0.82      0.34 4.6   0.0064 0.0089  0.33
+## nfc_9       0.82      0.82    0.82      0.34 4.6   0.0064 0.0079  0.33
+## nfc_10      0.82      0.82    0.82      0.34 4.5   0.0065 0.0126  0.32
+## 
+##  Item statistics 
+##           n raw.r std.r r.cor r.drop mean  sd
+## nfc_1  1573  0.58  0.58  0.51   0.46  4.4 1.3
+## nfc_2  1574  0.61  0.59  0.52   0.47  3.8 1.5
+## nfc_3  1577  0.68  0.68  0.65   0.58  4.3 1.3
+## nfc_4  1575  0.70  0.70  0.66   0.60  4.5 1.3
+## nfc_5  1574  0.49  0.48  0.38   0.35  3.4 1.4
+## nfc_6  1572  0.63  0.63  0.57   0.52  4.1 1.4
+## nfc_7  1570  0.63  0.63  0.57   0.53  4.2 1.3
+## nfc_8  1573  0.69  0.70  0.69   0.60  4.4 1.2
+## nfc_9  1561  0.71  0.72  0.71   0.62  4.5 1.2
+## nfc_10 1575  0.72  0.72  0.69   0.64  4.3 1.3
+## 
+## Non missing response frequency for each item
+##           1    2    3    4    5    6 miss
+## nfc_1  0.04 0.05 0.10 0.26 0.30 0.23 0.13
+## nfc_2  0.09 0.11 0.20 0.26 0.18 0.16 0.13
+## nfc_3  0.04 0.07 0.12 0.28 0.27 0.22 0.13
+## nfc_4  0.03 0.06 0.12 0.26 0.29 0.25 0.13
+## nfc_5  0.11 0.16 0.24 0.28 0.14 0.07 0.13
+## nfc_6  0.05 0.08 0.18 0.31 0.19 0.18 0.13
+## nfc_7  0.04 0.06 0.14 0.34 0.23 0.19 0.14
+## nfc_8  0.03 0.05 0.11 0.31 0.29 0.22 0.13
+## nfc_9  0.02 0.05 0.10 0.31 0.28 0.25 0.14
+## nfc_10 0.03 0.06 0.15 0.31 0.23 0.21 0.13
+
+
+

3.9 5-item Mini IPIP

+
pip_vars_original <- paste("pip", 1:5, sep = "_")
+
+apply(data[,pip_vars_original], 2, function(x) table(x, useNA = "always"))
+
##      pip_1 pip_2 pip_3 pip_4 pip_5
+## -99     28    10    34    30     6
+## 1       42   245   248   553    30
+## 2      129   427   414   504    85
+## 3      305   511   500   217   276
+## 4      636   281   313   196   736
+## 5      435   101    65    74   441
+## <NA>   241   241   242   242   242
+
# Recode NAs
+data[,pip_vars_original][data[,pip_vars_original] == -99] <- NA
+
+# Verify
+apply(data[,pip_vars_original], 2, function(x) table(x, useNA = "always"))
+
##      pip_1 pip_2 pip_3 pip_4 pip_5
+## 1       42   245   248   553    30
+## 2      129   427   414   504    85
+## 3      305   511   500   217   276
+## 4      636   281   313   196   736
+## 5      435   101    65    74   441
+## <NA>   269   251   276   272   248
+
# Reverse code two of the PIP variables
+
+# pip_1: 5 =  have vivid imagination 
+data$pip_1.r <- 6 - data$pip_1
+table(data$pip_1, data$pip_1.r) #check
+
##    
+##       1   2   3   4   5
+##   1   0   0   0   0  42
+##   2   0   0   0 129   0
+##   3   0   0 305   0   0
+##   4   0 636   0   0   0
+##   5 435   0   0   0   0
+
# pip_5: 5 = love to think up new ways of doing things
+data$pip_5.r <- 6 - data$pip_5
+table(data$pip_5, data$pip_5.r) #check
+
##    
+##       1   2   3   4   5
+##   1   0   0   0   0  30
+##   2   0   0   0  85   0
+##   3   0   0 276   0   0
+##   4   0 736   0   0   0
+##   5 441   0   0   0   0
+
# Variables that do not need to be recoded:
+# pip_2: 5 = not interested in abstract ideas
+# pip_3: 5 = have difficulty understanding abstract ideas
+# pip_4: 5 = do not have good imagination
+
+pip_vars <- c("pip_1.r", "pip_2", "pip_3", "pip_4", "pip_5.r")
+
+psych::alpha(data[,pip_vars]) # verify that items load in same direction
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, pip_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N  ase mean   sd median_r
+##       0.73      0.73    0.71      0.35 2.7 0.01  2.4 0.74     0.31
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.71 0.73 0.75 
+## 
+##  Reliability if an item is dropped:
+##         raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
+## pip_1.r      0.68      0.68    0.64      0.35 2.1    0.012 0.0108  0.31
+## pip_2        0.69      0.69    0.64      0.36 2.3    0.012 0.0067  0.35
+## pip_3        0.66      0.67    0.62      0.33 2.0    0.013 0.0107  0.31
+## pip_4        0.67      0.67    0.63      0.33 2.0    0.013 0.0159  0.28
+## pip_5.r      0.70      0.70    0.67      0.36 2.3    0.012 0.0168  0.34
+## 
+##  Item statistics 
+##            n raw.r std.r r.cor r.drop mean   sd
+## pip_1.r 1547  0.68  0.69  0.59   0.48  2.2 1.01
+## pip_2   1565  0.68  0.67  0.56   0.46  2.7 1.12
+## pip_3   1540  0.73  0.72  0.63   0.53  2.7 1.09
+## pip_4   1544  0.74  0.72  0.62   0.52  2.2 1.19
+## pip_5.r 1568  0.63  0.66  0.52   0.44  2.1 0.92
+## 
+## Non missing response frequency for each item
+##            1    2    3    4    5 miss
+## pip_1.r 0.28 0.41 0.20 0.08 0.03 0.15
+## pip_2   0.16 0.27 0.33 0.18 0.06 0.14
+## pip_3   0.16 0.27 0.32 0.20 0.04 0.15
+## pip_4   0.36 0.33 0.14 0.13 0.05 0.15
+## pip_5.r 0.28 0.47 0.18 0.05 0.02 0.14
+
# Create mean mini-IPIP score
+data$pip_mean <- rowMeans(data[,pip_vars], na.rm=T)
+
+
+

3.10 Schwartz Values

+
# Direction of original coding: higher values correspond to...
+# sv1 = open
+# sv2 = closed
+# sv3 = closed
+# sv4 = open
+# sv5 = closed
+
+sv_vars_original <- c("sv_1", "sv_2", "sv_3", "sv_4", "sv_5")
+
+apply(data[,sv_vars_original], 2, table)
+
##     sv_1 sv_2 sv_3 sv_4 sv_5
+## -99    7    3    7    4    6
+## 1     91   74  149  412  206
+## 2    273  215  284  361  284
+## 3    502  396  436  390  376
+## 4    409  439  376  259  364
+## 5    289  444  318  144  334
+
data[,sv_vars_original][data[,sv_vars_original] == -99] <- NA 
+
+apply(data[,sv_vars_original], 2, table) # verify
+
##   sv_1 sv_2 sv_3 sv_4 sv_5
+## 1   91   74  149  412  206
+## 2  273  215  284  361  284
+## 3  502  396  436  390  376
+## 4  409  439  376  259  364
+## 5  289  444  318  144  334
+
# Variables that need to be recoded are 1 and 4:
+data$sv_1.r <- 6 - data$sv_1
+table(data$sv_1, data$sv_1.r)
+
##    
+##       1   2   3   4   5
+##   1   0   0   0   0  91
+##   2   0   0   0 273   0
+##   3   0   0 502   0   0
+##   4   0 409   0   0   0
+##   5 289   0   0   0   0
+
data$sv_4.r <- 6 - data$sv_4
+table(data$sv_4, data$sv_4.r)
+
##    
+##       1   2   3   4   5
+##   1   0   0   0   0 412
+##   2   0   0   0 361   0
+##   3   0   0 390   0   0
+##   4   0 259   0   0   0
+##   5 144   0   0   0   0
+
sv_vars <- c("sv_1.r", "sv_2", "sv_3", "sv_4.r", "sv_5")
+
+psych::alpha(data[,sv_vars])
+
## Warning in psych::alpha(data[, sv_vars]): Some items were negatively correlated with the total scale and probably 
+## should be reversed.  
+## To do this, run the function again with the 'check.keys=TRUE' option
+
## Some items ( sv_1.r ) were negatively correlated with the total scale and 
+## probably should be reversed.  
+## To do this, run the function again with the 'check.keys=TRUE' option
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, sv_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r  S/N   ase mean   sd median_r
+##       0.43      0.43    0.48      0.13 0.76 0.021  3.2 0.68      0.1
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.39 0.43 0.47 
+## 
+##  Reliability if an item is dropped:
+##        raw_alpha std.alpha G6(smc) average_r  S/N alpha se var.r   med.r
+## sv_1.r      0.48      0.49    0.47     0.194 0.96    0.020 0.033  0.2040
+## sv_2        0.30      0.30    0.34     0.096 0.42    0.027 0.047 -0.0027
+## sv_3        0.30      0.30    0.34     0.098 0.44    0.027 0.040  0.0497
+## sv_4.r      0.39      0.37    0.39     0.130 0.60    0.023 0.056  0.0979
+## sv_5        0.40      0.39    0.43     0.140 0.65    0.023 0.053  0.1034
+## 
+##  Item statistics 
+##           n raw.r std.r r.cor r.drop mean  sd
+## sv_1.r 1564  0.40  0.42  0.17  0.066  2.7 1.1
+## sv_2   1568  0.62  0.63  0.51  0.338  3.6 1.2
+## sv_3   1563  0.63  0.63  0.51  0.326  3.3 1.2
+## sv_4.r 1566  0.56  0.56  0.39  0.210  3.4 1.3
+## sv_5   1564  0.56  0.53  0.32  0.199  3.2 1.3
+## 
+## Non missing response frequency for each item
+##           1    2    3    4    5 miss
+## sv_1.r 0.18 0.26 0.32 0.17 0.06 0.14
+## sv_2   0.05 0.14 0.25 0.28 0.28 0.14
+## sv_3   0.10 0.18 0.28 0.24 0.20 0.14
+## sv_4.r 0.09 0.17 0.25 0.23 0.26 0.14
+## sv_5   0.13 0.18 0.24 0.23 0.21 0.14
+
# Create mean variable
+data$sv_mean <- rowMeans(data[,sv_vars], na.rm=T)
+
+
+

3.11 Create Openness Index (NFC, IPIP, & Schwartz)

+

We create a mean openness trait index by averaging all items from each scale.

+
# Check consistency among 3 mean variables
+psych::alpha(data[,c("nfc_mean", "pip_mean", "sv_mean")])
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, c("nfc_mean", "pip_mean", "sv_mean")])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
+##        0.6      0.61    0.53      0.34 1.5 0.017  3.3 0.57     0.37
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.56 0.6 0.63 
+## 
+##  Reliability if an item is dropped:
+##          raw_alpha std.alpha G6(smc) average_r  S/N alpha se var.r med.r
+## nfc_mean      0.54      0.54    0.37      0.37 1.17    0.022    NA  0.37
+## pip_mean      0.61      0.61    0.44      0.44 1.60    0.018    NA  0.44
+## sv_mean       0.34      0.34    0.20      0.20 0.51    0.031    NA  0.20
+## 
+##  Item statistics 
+##             n raw.r std.r r.cor r.drop mean   sd
+## nfc_mean 1578  0.77  0.73  0.52   0.38  4.2 0.85
+## pip_mean 1575  0.69  0.70  0.44   0.33  2.4 0.74
+## sv_mean  1569  0.79  0.81  0.67   0.53  3.2 0.68
+
# put mean indices on 0-1 scale
+data$nfc_mean.01 <- rescale_01(data$nfc_mean, max = 6)
+data$pip_mean.01 <- rescale_01(data$pip_mean, max = 5)
+data$sv_mean.01 <- rescale_01(data$sv_mean, max = 5)
+
+# create mean trait index
+data$trait_index <- rowMeans(data[,c("nfc_mean.01", "pip_mean.01", "sv_mean.01")], na.rm=T)
+
+data$trait_index.s <- as.numeric(scale(data$trait_index))
+
+
+

3.12 Political Identity

+
# recode NAs
+data[,paste("pol_id_", 1:4, sep = "")][data[,paste("pol_id_", 1:4, sep = "")] == -99] <- NA
+
+# high values correspond to strong political identity for items 2 and 4. So recode items 
+# 1 and 3. 
+data$pol_id_1.r <- 6 - data$pol_id_1
+data$pol_id_2.r <- data$pol_id_2
+data$pol_id_3.r <- 6 - data$pol_id_3
+data$pol_id_4.r <- data$pol_id_4
+
+pol_id_vars <- c("pol_id_1.r", "pol_id_2.r", "pol_id_3.r", "pol_id_4.r")
+psych::alpha(data[,pol_id_vars]) #good
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, pol_id_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean sd median_r
+##        0.8       0.8    0.77      0.51 4.1 0.0077    3  1     0.47
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.79 0.8 0.82 
+## 
+##  Reliability if an item is dropped:
+##            raw_alpha std.alpha G6(smc) average_r S/N alpha se   var.r med.r
+## pol_id_1.r      0.77      0.78    0.73      0.54 3.5   0.0095 0.02510  0.46
+## pol_id_2.r      0.73      0.73    0.64      0.47 2.7   0.0112 0.00028  0.46
+## pol_id_3.r      0.79      0.79    0.74      0.56 3.8   0.0086 0.01861  0.49
+## pol_id_4.r      0.71      0.71    0.63      0.46 2.5   0.0116 0.00052  0.46
+## 
+##  Item statistics 
+##               n raw.r std.r r.cor r.drop mean  sd
+## pol_id_1.r 1572  0.76  0.76  0.63   0.57  2.7 1.2
+## pol_id_2.r 1573  0.82  0.83  0.78   0.67  3.3 1.3
+## pol_id_3.r 1564  0.75  0.74  0.59   0.53  3.0 1.3
+## pol_id_4.r 1567  0.84  0.84  0.80   0.69  3.1 1.2
+## 
+## Non missing response frequency for each item
+##               1    2    3    4    5 miss
+## pol_id_1.r 0.21 0.24 0.28 0.17 0.10 0.13
+## pol_id_2.r 0.12 0.15 0.25 0.31 0.18 0.13
+## pol_id_3.r 0.17 0.22 0.24 0.23 0.15 0.14
+## pol_id_4.r 0.13 0.17 0.26 0.29 0.14 0.14
+
# create mean score
+data$pol_id <- rowMeans(data[,pol_id_vars], na.rm=T)
+
+data$pol_id.s <- as.numeric(scale(data$pol_id))
+
+
+

3.13 Partisan Identity

+
# 4 = being party member is extremely important to me
+data$huddy_import <- ifelse(data$pid7 < 4, data$dem_import,
+                            ifelse(data$pid7 > 4, data$rep_import, NA))
+
+# 4 = party describes me extremely well
+data$huddy_describe <- ifelse(data$pid7 < 4, data$dem_describe, 
+                              ifelse(data$pid7 > 4, data$rep_describe, NA))
+
+# 5 = use "we" all the time to talk about party
+data$huddy_we <- ifelse(data$pid7 < 4, data$dem_we, 
+                        ifelse(data$pid7 > 4, data$rep_we, NA))
+
+# 4 = think about myself as party member a great deal
+data$huddy_think <- ifelse(data$pid7 < 4, data$dem_think, 
+                           ifelse(data$pid7 > 4, data$rep_think, NA))
+
+huddy_vars <- c("huddy_import", "huddy_describe", "huddy_we", "huddy_think")
+
+# distribution of responses
+apply(data[,huddy_vars], 2, function(x) table(x, useNA = "always"))
+
## $huddy_import
+## x
+##  -99    1    2    3    4 <NA> 
+##    2   90  411  505  308  500 
+## 
+## $huddy_describe
+## x
+##  -99    1    2    3    4 <NA> 
+##    7   43  363  629  274  500 
+## 
+## $huddy_we
+## x
+##  -99    1    2    3    4    5 <NA> 
+##    2  211  256  341  320  186  500 
+## 
+## $huddy_think
+## x
+##  -99    1    2    3    4 <NA> 
+##    3   44  253  556  460  500
+
# recode NAs
+data[,huddy_vars][data[,huddy_vars] < 0] <- NA
+
+# create standardized mean scale
+data$huddy_import.01 <- rescale_01(data$huddy_import, max = 4)
+data$huddy_describe.01 <- rescale_01(data$huddy_describe, max = 4)
+data$huddy_we.01 <- rescale_01(data$huddy_we, max = 5)
+data$huddy_think.01 <- rescale_01(data$huddy_think, max = 4)
+
+# verify
+apply(data[,paste(huddy_vars, ".01", sep = "")], 2, range, na.rm=T)
+
##      huddy_import.01 huddy_describe.01 huddy_we.01 huddy_think.01
+## [1,]               0                 0           0              0
+## [2,]               1                 1           1              1
+
psych::alpha(data[,c("huddy_import.01", "huddy_describe.01", 
+                     "huddy_we.01", "huddy_think.01")])
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, c("huddy_import.01", "huddy_describe.01", 
+##     "huddy_we.01", "huddy_think.01")])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
+##       0.88      0.89    0.87      0.66 7.9 0.0046  0.6 0.25     0.65
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.87 0.88 0.89 
+## 
+##  Reliability if an item is dropped:
+##                   raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r
+## huddy_import.01        0.82      0.83    0.78      0.62 5.0   0.0072 0.0056
+## huddy_describe.01      0.84      0.84    0.78      0.64 5.3   0.0067 0.0048
+## huddy_we.01            0.90      0.90    0.86      0.74 8.7   0.0042 0.0028
+## huddy_think.01         0.84      0.85    0.81      0.65 5.7   0.0066 0.0170
+##                   med.r
+## huddy_import.01    0.60
+## huddy_describe.01  0.60
+## huddy_we.01        0.72
+## huddy_think.01     0.60
+## 
+##  Item statistics 
+##                      n raw.r std.r r.cor r.drop mean   sd
+## huddy_import.01   1314  0.90  0.90  0.88   0.81 0.59 0.29
+## huddy_describe.01 1309  0.88  0.89  0.86   0.79 0.62 0.26
+## huddy_we.01       1314  0.81  0.80  0.68   0.64 0.50 0.32
+## huddy_think.01    1313  0.87  0.87  0.81   0.77 0.70 0.27
+## 
+## Non missing response frequency for each item
+##                      0 0.25 0.333333333333333  0.5 0.666666666666667 0.75    1
+## huddy_import.01   0.07 0.00              0.31 0.00              0.38 0.00 0.23
+## huddy_describe.01 0.03 0.00              0.28 0.00              0.48 0.00 0.21
+## huddy_we.01       0.16 0.19              0.00 0.26              0.00 0.24 0.14
+## huddy_think.01    0.03 0.00              0.19 0.00              0.42 0.00 0.35
+##                   miss
+## huddy_import.01   0.28
+## huddy_describe.01 0.28
+## huddy_we.01       0.28
+## huddy_think.01    0.28
+
data$huddy_id <- rowMeans(data[,c("huddy_import.01", "huddy_describe.01", 
+                                    "huddy_we.01", "huddy_think.01")], na.rm=T)
+
+data$huddy_id.s <- as.numeric(scale(data$huddy_id))
+
+
+

3.14 Numeracy

+
rasch_vars <- c("rasch_1", "rasch_2", "rasch_3", "rasch_4", 
+                     "rasch_5", "rasch_6", "rasch_7")
+
+#apply(data[,rasch_vars], 2, function(x) table(x, useNA = "always"))
+apply(data[,rasch_vars], 2, class)
+
##     rasch_1     rasch_2     rasch_3     rasch_4     rasch_5     rasch_6 
+## "character" "character" "character" "character" "character" "character" 
+##     rasch_7 
+## "character"
+
# recode character vectors as numeric vectors
+data$rasch_1.r <- as.numeric(as.character(data$rasch_1))
+
## Warning: NAs introduced by coercion
+
data$rasch_2.r <- as.numeric(as.character(data$rasch_2))
+
## Warning: NAs introduced by coercion
+
data$rasch_3.r <- as.numeric(as.character(data$rasch_3))
+data$rasch_4.r <- as.numeric(as.character(data$rasch_4))
+data$rasch_5.r <- as.numeric(as.character(data$rasch_5))
+
## Warning: NAs introduced by coercion
+
data$rasch_6.r <- as.numeric(as.character(data$rasch_6))
+data$rasch_7.r <- as.numeric(as.character(data$rasch_7))
+
## Warning: NAs introduced by coercion
+
rasch_vars.r <- paste(rasch_vars, ".r", sep = "")
+
+# recode NAs
+data[,rasch_vars.r][data[,rasch_vars.r] == -99] <- NA
+
+# create indicators for correct responses
+data$rasch_1_corr <- ifelse(data$rasch_1.r == 10, 1, 0)
+data$rasch_2_corr <- ifelse(data$rasch_2.r == .1, 1, 0)
+data$rasch_6_corr <- ifelse(data$rasch_6.r == 20, 1, 0)
+data$rasch_5_corr <- ifelse(data$rasch_5.r == 100, 1, 0)
+data$rasch_3_corr <- ifelse(data$rasch_3.r == 4, 1, 0)
+data$rasch_4_corr <- ifelse(data$rasch_4.r == 29, 1, 0)
+data$rasch_7_corr <- ifelse(data$rasch_7.r == 500, 1, 0)
+
+rasch_corr_vars <- c("rasch_1_corr", "rasch_2_corr", "rasch_3_corr", "rasch_4_corr", 
+                "rasch_5_corr", "rasch_6_corr", "rasch_7_corr")
+
+apply(data[,rasch_corr_vars], 2, function(x) 
+  round(prop.table(table(x)),2))
+
##   rasch_1_corr rasch_2_corr rasch_3_corr rasch_4_corr rasch_5_corr rasch_6_corr
+## 0         0.42          0.8         0.79         0.91         0.33         0.29
+## 1         0.58          0.2         0.21         0.09         0.67         0.71
+##   rasch_7_corr
+## 0         0.52
+## 1         0.48
+
psych::alpha(data[,rasch_corr_vars])
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, rasch_corr_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
+##       0.68      0.69    0.66      0.24 2.2 0.011 0.42 0.26     0.23
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.65 0.68 0.7 
+## 
+##  Reliability if an item is dropped:
+##              raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
+## rasch_1_corr      0.64      0.65    0.62      0.24 1.9    0.013 0.0049  0.22
+## rasch_2_corr      0.62      0.63    0.60      0.22 1.7    0.013 0.0043  0.22
+## rasch_3_corr      0.64      0.65    0.62      0.24 1.9    0.013 0.0041  0.22
+## rasch_4_corr      0.65      0.65    0.61      0.24 1.9    0.013 0.0036  0.23
+## rasch_5_corr      0.67      0.68    0.65      0.26 2.1    0.012 0.0032  0.26
+## rasch_6_corr      0.65      0.66    0.63      0.25 2.0    0.013 0.0055  0.26
+## rasch_7_corr      0.61      0.63    0.59      0.22 1.7    0.014 0.0045  0.21
+## 
+##  Item statistics 
+##                 n raw.r std.r r.cor r.drop  mean   sd
+## rasch_1_corr 1651  0.62  0.58  0.47   0.39 0.581 0.49
+## rasch_2_corr 1650  0.62  0.65  0.56   0.46 0.203 0.40
+## rasch_3_corr 1596  0.57  0.59  0.49   0.38 0.211 0.41
+## rasch_4_corr 1579  0.52  0.59  0.49   0.39 0.087 0.28
+## rasch_5_corr 1632  0.52  0.50  0.34   0.28 0.673 0.47
+## rasch_6_corr 1633  0.58  0.56  0.43   0.36 0.708 0.45
+## rasch_7_corr 1580  0.68  0.65  0.57   0.48 0.485 0.50
+## 
+## Non missing response frequency for each item
+##                 0    1 miss
+## rasch_1_corr 0.42 0.58 0.09
+## rasch_2_corr 0.80 0.20 0.09
+## rasch_3_corr 0.79 0.21 0.12
+## rasch_4_corr 0.91 0.09 0.13
+## rasch_5_corr 0.33 0.67 0.10
+## rasch_6_corr 0.29 0.71 0.10
+## rasch_7_corr 0.52 0.48 0.13
+
# create mean numeracy score
+data$numeracy <- rowMeans(data[,rasch_corr_vars], na.rm=T)
+
+data$numeracy.s <- as.numeric(scale(data$numeracy))
+
+
+

3.15 Political Knowledge

+
apply(data[,c("pk_speaker", "pk_senterm", "pk_cj", "pk_merkel", "pk_putin")], 2, 
+      function(x) table(x, useNA = "always"))
+
##      pk_speaker pk_senterm pk_cj pk_merkel pk_putin
+## -99           5          4     9         5        3
+## 1           166        280    35        93       11
+## 2            30        351    60       948     1556
+## 3            27        756   817        33       17
+## 4          1210         94    52       101       14
+## 888         272        225   734       527      106
+## <NA>        106        106   109       109      109
+
data$pk_speaker_corr <- ifelse(data$pk_speaker == 4, 1, 
+                               ifelse(data$pk_speaker == -99, NA, 0))
+
+data$pk_senterm_corr <- ifelse(data$pk_senterm == 3, 1, 
+                               ifelse(data$pk_senterm == -99, NA, 0))
+
+data$pk_cj_corr <- ifelse(data$pk_cj == 3, 1, 
+                               ifelse(data$pk_cj == -99, NA, 0))
+
+data$pk_merkel_corr <- ifelse(data$pk_merkel == 2, 1, 
+                               ifelse(data$pk_merkel == -99, NA, 0))
+
+data$pk_putin_corr <- ifelse(data$pk_putin == 2, 1, 
+                               ifelse(data$pk_putin == -99, NA, 0))
+
+pk_vars <- c("pk_speaker_corr", "pk_senterm_corr", "pk_cj_corr", 
+             "pk_merkel_corr", "pk_putin_corr")
+
+apply(data[,pk_vars], 2, function(x) prop.table(table(x)))
+
##   pk_speaker_corr pk_senterm_corr pk_cj_corr pk_merkel_corr pk_putin_corr
+## 0       0.2903226       0.5568581  0.5188457      0.4430082    0.08685446
+## 1       0.7096774       0.4431419  0.4811543      0.5569918    0.91314554
+
data$pk_mean <- rowMeans(data[,pk_vars])
+
+data$pk_mean.s <- as.numeric(scale(data$pk_mean))
+
+cor(data$pk_mean, data$pk_mean.s, use = "complete.obs")
+
## [1] 1
+
psych::alpha(data[,pk_vars])
+
## 
+## Reliability analysis   
+## Call: psych::alpha(x = data[, pk_vars])
+## 
+##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
+##       0.73      0.72     0.7      0.34 2.6 0.0095 0.62 0.31     0.34
+## 
+##  lower alpha upper     95% confidence boundaries
+## 0.71 0.73 0.75 
+## 
+##  Reliability if an item is dropped:
+##                 raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
+## pk_speaker_corr      0.68      0.67    0.63      0.34 2.0    0.011 0.0225  0.32
+## pk_senterm_corr      0.71      0.70    0.66      0.37 2.4    0.010 0.0163  0.35
+## pk_cj_corr           0.63      0.64    0.58      0.30 1.8    0.013 0.0075  0.30
+## pk_merkel_corr       0.62      0.63    0.57      0.30 1.7    0.014 0.0075  0.30
+## pk_putin_corr        0.73      0.73    0.69      0.40 2.7    0.010 0.0113  0.39
+## 
+##  Item statistics 
+##                    n raw.r std.r r.cor r.drop mean   sd
+## pk_speaker_corr 1705  0.69  0.70  0.58   0.48 0.71 0.45
+## pk_senterm_corr 1706  0.67  0.64  0.48   0.42 0.44 0.50
+## pk_cj_corr      1698  0.78  0.76  0.70   0.60 0.48 0.50
+## pk_merkel_corr  1702  0.79  0.77  0.72   0.61 0.56 0.50
+## pk_putin_corr   1704  0.49  0.58  0.40   0.34 0.91 0.28
+## 
+## Non missing response frequency for each item
+##                    0    1 miss
+## pk_speaker_corr 0.29 0.71 0.06
+## pk_senterm_corr 0.56 0.44 0.06
+## pk_cj_corr      0.52 0.48 0.06
+## pk_merkel_corr  0.44 0.56 0.06
+## pk_putin_corr   0.09 0.91 0.06
+
+
+

3.16 Demographics

+
# Gender
+table(data$gender, useNA = "always")
+
## 
+##  -99    1    2 <NA> 
+##    2  725  821  268
+
data$female <- ifelse(data$gender == 2, 1, 0)
+data$female[data$female == -99] <- NA
+table(data$female, useNA = "always")
+
## 
+##    0    1 <NA> 
+##  727  821  268
+
table(data$gender, data$female)
+
##      
+##         0   1
+##   -99   2   0
+##   1   725   0
+##   2     0 821
+
# Race
+
+# note: respondents were instructed to check all that apply: 1 = White, 2 = Black, 
+# 3 = Hispanic, 4 = Asian, 5 = Native American, 6 = Middle Eastern, 
+# 7 = Mixed race, 8 = Other 
+
+# recode NAs
+data$race_1[data$race_1 == -99] <- NA
+data$race_2[data$race_2 == -99] <- NA
+data$race_3[data$race_3 == -99] <- NA
+data$race_4[data$race_4 == -99] <- NA
+data$race_5[data$race_5 == -99] <- NA
+data$race_6[data$race_6 == -99] <- NA
+data$race_7[data$race_7 == -99] <- NA
+
+data$race_white <- ifelse(data$race_1 == 1, 1, 0)
+data$race_black <- ifelse(data$race_2 == 1, 1, 0)
+data$race_other <- ifelse(data$race_4 == 1 | data$race_5 == 1 | data$race_6 == 1 | 
+                       data$race_7 == 1 | data$race_8 == 1, 1, 0)
+
+data$hispanic <- ifelse(data$race_3 == 1, 1, 0)
+
+data$nonhisp_black <- ifelse(data$race_2 == 1 & data$race_3 != 1, 1, 0)
+
+# Education
+# 1 = < HS, 2 = HS, 3 = some college, 4 = 2-yr college, 5 = 4-yr college, 6 = postgrad
+data$education <- data$educatt
+data$education[data$education == -99] <- NA
+
+data$edu_lesshs <- ifelse(data$education == 1, 1, 0)
+data$edu_hs <- ifelse(data$education == 2, 1, 0)
+data$edu_somecollege <- ifelse(data$education == 3 | data$education == 4, 1, 0)
+data$edu_college <- ifelse(data$education == 5, 1, 0)
+data$edu_grad <- ifelse(data$education == 6, 1, 0)
+
+# Age
+data$age <- 2018-data$birthyr
+
+data$age.s <- as.numeric(scale(data$age))
+
+# Income
+# 1-13 scale, where 1 = < 10k, 13 = > 150k
+data$income <- data$faminc
+
+data$income[data$income == -99] <- NA
+
+data$income.s <- as.numeric(scale(data$income))
+
+
+
+

4 Analysis: Ideological Asymmetries in PMR

+
+

4.1 Model Nominclature

+

The following naming syntax is used when running models and calculating quantities of interest below.

+
    +
  • mx = model x (where m1 = asymmetry model and m2 = moderation model)
  • +
  • ex = experiment x +
      +
    • where: +
        +
      • e1 = experiment 1
      • +
      • e2ss = experiment 2, sample size outcome
      • +
      • e2cc = experiment 2, causal claim outcome
      • +
    • +
  • +
  • iss/ideo/pid/ = operationalization of R’s L/R dimension +
      +
    • where, iss = issue; ideo = ideology; pid = party id.
    • +
  • +
  • lib/con = data is subsetted to those individuals who received either +
      +
    • liberal (lib) or conservative (con) -leaning evidence
    • +
  • +
  • Example: m1_e1_iss_lib +
      +
    • m1 = model 1 (asymmetry model)
    • +
    • e1 = experiment 1
    • +
    • iss = L/R dimension defined by R’s issue position
    • +
    • lib = R received liberal-leaning evidence in experiment 1
    • +
  • +
+

The first set of models (“Asymmetry Models”) explore the extent to which there exists an asymmetry in how liberals and conservatives engage in politically motivated reasoning. In total, there are 18 models (3 (interpretation DV, sample size DV, causal claim DV) X 3 (L/R dimension = issue stance, ideology, party ID) X 2 (liberal, conservative evidence)): we run separate models for each of the 3 experimental outcomes: Experiment 1 (interpretation outcome), Experiment 2 (sample size outcome), and Experiment 2 (causal claim outcome), each operationalization of the respondent’s L/R dimension (issue stance, self-reported ideology, and self-reported party ID), and respondents’ who were randomly assigned to receive liberal- or conservative-leaning evidence. \

+

Order of Asymmetry Models:

+
    +
  • Models for Experiment 1 +
      +
    • L/R = issue position, liberal evidence
    • +
    • L/R = issue position, conservative evidence
    • +
    • L/R = ideology, liberal evidence
    • +
    • L/R = ideology, conservative evidence
    • +
    • L/R = party ID, liberal evidence
    • +
    • L/R = party ID, conservative evidence
    • +
  • +
  • Models for Experiment 2 (Sample Size outcome) +
      +
    • L/R = issue position, liberal evidence
    • +
    • L/R = issue position, conservative evidence
    • +
    • L/R = ideology, liberal evidence
    • +
    • L/R = ideology, conservative evidence
    • +
    • L/R = party ID, liberal evidence
    • +
    • L/R = party ID, conservative evidence
    • +
  • +
  • Models for Experiment 2 (Causal Claim outcome) +
      +
    • L/R = issue position, liberal evidence
    • +
    • L/R = issue position, conservative evidence
    • +
    • L/R = ideology, liberal evidence
    • +
    • L/R = ideology, conservative evidence
    • +
    • L/R = party ID, liberal evidence
    • +
    • L/R = party ID, conservative evidence
    • +
  • +
+
+
+

4.2 Create common set of controls for all models

+
+
+

4.3 Create Variable for Whether R Has Liberal or Conservative View on the Issue They Received Evidence About

+

Higher values indicate more conservative-leaning evidence.

+
# Experiment 1 ---------------------------------------------------------------------------
+
+# continuous
+data$exp1_issue_lr <- ifelse(data$exp1_issue == "gun", 7 - data$gun_lib,
+                      ifelse(data$exp1_issue == "aa", 7 - data$aa_lib,
+                      ifelse(data$exp1_issue == "wage", 7 - data$wage_lib,
+                      ifelse(data$exp1_issue == "abort", 7 - data$abort_lib, 
+                      ifelse(data$exp1_issue == "imm", 7 - data$imm_lib, NA)))))
+
+table(data$exp1_issue_lr, useNA = "always")
+
## 
+##    1    2    3    4    5    6 <NA> 
+##  522  222  231  199  188  353  101
+
# binary
+data$exp1_issue_lr_binary <- ifelse(data$exp1_issue_lr <= 3, 0, 
+                                    ifelse(data$exp1_issue_lr >= 4, 1, NA))
+
+table(data$exp1_issue_lr, data$exp1_issue_lr_binary, useNA = "always")
+
##       
+##          0   1 <NA>
+##   1    522   0    0
+##   2    222   0    0
+##   3    231   0    0
+##   4      0 199    0
+##   5      0 188    0
+##   6      0 353    0
+##   <NA>   0   0  101
+
# Experiment 2 ---------------------------------------------------------------------------
+
+# continuous
+data$exp2_issue_lr <- ifelse(data$exp2_issue == "gun", 7 - data$gun_lib, 
+                      ifelse(data$exp2_issue == "aa", 7 - data$aa_lib, 
+                      ifelse(data$exp2_issue == "wage", 7 - data$wage_lib, 
+                      ifelse(data$exp2_issue == "abort", 7 - data$abort_lib, 
+                      ifelse(data$exp2_issue == "imm", 7 - data$imm_lib, NA)))))
+
+# binary
+data$exp2_issue_lr_binary <- ifelse(data$exp2_issue_lr <= 3, 0, 
+                                    ifelse(data$exp2_issue_lr >= 4, 1, NA))
+
+table(data$exp2_issue_lr, data$exp2_issue_lr_binary, useNA = "always")
+
##       
+##          0   1 <NA>
+##   1    534   0    0
+##   2    196   0    0
+##   3    221   0    0
+##   4      0 219    0
+##   5      0 173    0
+##   6      0 373    0
+##   <NA>   0   0  100
+
+
+

4.4 Binary Models (For Main Analysis)

+
+
+

4.5 Binary Post-Estimation

+
+
+

4.6 Binary Figures

+

We create separate figures for each experimental outcome (Experiment 1; Experiment 2 Sample Size outcome; and Experiment 2 Causal Claim outcome). For each of these experimental outcomes, we create separate figures for each operationalization of a respondent’s L/R dimension (issue position, ideology, and party ID). This results in 9 figures. For the sake of simplicity, the 3 figures for each experimental outcome are horizontally arranged in a single pdf file.

+
## quartz_off_screen 
+##                 2
+
## quartz_off_screen 
+##                 2
+
## quartz_off_screen 
+##                 2
+
## quartz_off_screen 
+##                 2
+
+
+

4.7 Continuous Models (For Appendix)

+
+
+

4.8 Continuous Post-Estimation

+
+
+

4.9 Continuous Figures

+

We create separate figures for each experimental outcome (Experiment 1; Experiment 2, Sample Size outcome; and Experiment 2, Causal Claim outcome). For each of these experimental outcomes, we create separate figures for each operationalization of a respondent’s L/R dimension (issue position, ideology, and party ID). This results in 9 figures. For simplicity, the 3 figures for each experimental outcome are horizontally arranged in a single pdf file. Each of the 3 figures in each PDF uses a different operationalization of a respondent’s L/R orientation.

+
## quartz_off_screen 
+##                 2
+
## quartz_off_screen 
+##                 2
+
## quartz_off_screen 
+##                 2
+
## quartz_off_screen 
+##                 2
+
+
+
+

5 Analysis: The Moderating Effect of Epistemic Needs

+
+

5.1 Need for Closure

+

These models use NFC as a measure of openness. In the next section we run identical models using the Trait Index instead.

+
# Experiment 1----------------------------------------------------------------------------
+
+# Issue Position
+m2a_e1_iss <- quick_glm(outcome = "exp1_correct", 
+                        new = "exp1_congenial_issue_binary*pol_id.s + 
+                        exp1_congenial_issue_binary*nfc_mean.s + 
+                        exp1_congenial_issue_binary*numeracy.s + 
+                        exp1_congenial_issue_binary*pk_mean.s", 
+                        keepers = m2_e1_controls, 
+                        data = data)
+
+# Ideology
+m2a_e1_ideo <- quick_glm(outcome = "exp1_correct", 
+                         new = "exp1_congenial_ideo_binary*pol_id.s + 
+                         exp1_congenial_ideo_binary*nfc_mean.s + 
+                         exp1_congenial_ideo_binary*numeracy.s + 
+                         exp1_congenial_ideo_binary*pk_mean.s", 
+                         keepers = m2_e1_controls, 
+                         data = data)
+
+# Party ID
+m2a_e1_pid <- quick_glm(outcome = "exp1_correct", 
+                        new = "exp1_congenial_pid_binary*huddy_id.s + 
+                        exp1_congenial_pid_binary*nfc_mean.s +
+                        exp1_congenial_pid_binary*numeracy.s + 
+                        exp1_congenial_pid_binary*pk_mean.s", 
+                        keepers = m2_e1_controls, 
+                        data = data)
+
+# Experiment 2 (Sample Size)--------------------------------------------------------------
+
+# Issue Positions
+m2a_e2ss_iss <- quick_lm(outcome = "exp2_goodSample", 
+                         new = "exp2_congenial_issue_binary*pol_id.s + 
+                         exp2_congenial_issue_binary*nfc_mean.s + 
+                         exp2_congenial_issue_binary*pk_mean.s",                          
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+# Ideology
+m2a_e2ss_ideo <- quick_lm(outcome = "exp2_goodSample", 
+                          new = "exp2_congenial_ideo_binary*pol_id.s + 
+                          exp2_congenial_ideo_binary*nfc_mean.s + 
+                          exp2_congenial_ideo_binary*pk_mean.s",                            
+                          keepers = m2_e2_controls, 
+                          data = data)
+
+# Party ID
+m2a_e2ss_pid <- quick_lm(outcome = "exp2_goodSample", 
+                         new = "exp2_congenial_pid_binary*huddy_id.s + 
+                         exp2_congenial_pid_binary*nfc_mean.s + 
+                         exp2_congenial_pid_binary*pk_mean.s",    
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+# Experiment 2 (Causal Claim)-------------------------------------------------------------
+
+# Issue Positions
+m2a_e2cc_iss <- quick_lm(outcome = "exp2_goodCausal", 
+                         new = "exp2_congenial_issue_binary*pol_id.s + 
+                         exp2_congenial_issue_binary*nfc_mean.s + 
+                         exp2_congenial_issue_binary*pk_mean.s",                
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+# Ideology
+m2a_e2cc_ideo <- quick_lm(outcome = "exp2_goodCausal", 
+                          new = "exp2_congenial_ideo_binary*pol_id.s + 
+                          exp2_congenial_ideo_binary*nfc_mean.s + 
+                          exp2_congenial_ideo_binary*pk_mean.s",    
+                          keepers = m2_e2_controls, 
+                          data = data)
+
+# Party ID
+m2a_e2cc_pid <- quick_lm(outcome = "exp2_goodCausal", 
+                         new = "exp2_congenial_pid_binary*huddy_id.s + 
+                         exp2_congenial_pid_binary*nfc_mean.s + 
+                         exp2_congenial_pid_binary*pk_mean.s",    
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+
+

5.2 Trait Index

+

In these models we subsitute the Openness Trait Index for NFC.

+
# Experiment 1----------------------------------------------------------------------------
+
+# Issue Position
+m2b_e1_iss <- quick_glm(outcome = "exp1_correct", 
+                        new = "exp1_congenial_issue_binary*pol_id.s + 
+                        exp1_congenial_issue_binary*trait_index.s + 
+                        exp1_congenial_issue_binary*numeracy.s + 
+                        exp1_congenial_issue_binary*pk_mean.s", 
+                        keepers = m2_e1_controls, 
+                        data = data)
+
+# Ideology
+m2b_e1_ideo <- quick_glm(outcome = "exp1_correct", 
+                         new = "exp1_congenial_ideo_binary*pol_id.s + 
+                         exp1_congenial_ideo_binary*trait_index.s + 
+                         exp1_congenial_ideo_binary*numeracy.s + 
+                         exp1_congenial_ideo_binary*pk_mean.s", 
+                         keepers = m2_e1_controls, 
+                         data = data)
+
+# Party ID
+m2b_e1_pid <- quick_glm(outcome = "exp1_correct", 
+                        new = "exp1_congenial_pid_binary*huddy_id.s + 
+                        exp1_congenial_pid_binary*trait_index.s +
+                        exp1_congenial_pid_binary*numeracy.s + 
+                        exp1_congenial_pid_binary*pk_mean.s", 
+                        keepers = m2_e1_controls, 
+                        data = data)
+
+# Experiment 2 (Sample Size) -------------------------------------------------------------
+
+# Issue Positions
+m2b_e2ss_iss <- quick_lm(outcome = "exp2_goodSample", 
+                         new = "exp2_congenial_issue_binary*pol_id.s + 
+                         exp2_congenial_issue_binary*trait_index.s + 
+                         exp2_congenial_issue_binary*pk_mean.s",                          
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+# Ideology
+m2b_e2ss_ideo <- quick_lm(outcome = "exp2_goodSample", 
+                          new = "exp2_congenial_ideo_binary*pol_id.s + 
+                          exp2_congenial_ideo_binary*trait_index.s + 
+                          exp2_congenial_ideo_binary*pk_mean.s",                            
+                          keepers = m2_e2_controls, 
+                          data = data)
+
+# Party ID
+m2b_e2ss_pid <- quick_lm(outcome = "exp2_goodSample", 
+                         new = "exp2_congenial_pid_binary*huddy_id.s + 
+                         exp2_congenial_pid_binary*trait_index.s + 
+                         exp2_congenial_pid_binary*pk_mean.s",    
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+# Experiment 2 (Causal Claim)-------------------------------------------------------------
+
+# Issue Positions
+m2b_e2cc_iss <- quick_lm(outcome = "exp2_goodCausal", 
+                         new = "exp2_congenial_issue_binary*pol_id.s + 
+                         exp2_congenial_issue_binary*trait_index.s + 
+                         exp2_congenial_issue_binary*pk_mean.s",                
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+# Ideology
+m2b_e2cc_ideo <- quick_lm(outcome = "exp2_goodCausal", 
+                          new = "exp2_congenial_ideo_binary*pol_id.s + 
+                          exp2_congenial_ideo_binary*trait_index.s + 
+                          exp2_congenial_ideo_binary*pk_mean.s",    
+                          keepers = m2_e2_controls, 
+                          data = data)
+
+# Party ID
+m2b_e2cc_pid <- quick_lm(outcome = "exp2_goodCausal", 
+                         new = "exp2_congenial_pid_binary*huddy_id.s + 
+                         exp2_congenial_pid_binary*trait_index.s + 
+                         exp2_congenial_pid_binary*pk_mean.s",    
+                         keepers = m2_e2_controls, 
+                         data = data)
+
+
+

5.3 Figures

+
rownames <- c("m2a_e1_iss", "m2a_e1_ideo", "m2a_e1_pid", 
+              "m2a_e2ss_iss", "m2a_e2ss_ideo", "m2a_e2ss_pid", 
+              "m2a_e2cc_iss", "m2a_e2cc_ideo", "m2a_e2cc_pid") 
+
+nfc_bin <- rbind(
+  as.data.frame(summary(m2a_e1_iss)$coef)[21,1:2],
+  as.data.frame(summary(m2a_e1_ideo)$coef)[21,1:2],
+  as.data.frame(summary(m2a_e1_pid)$coef)[21,1:2],
+  
+  as.data.frame(summary(m2a_e2ss_iss)$coef)[20,1:2],
+  as.data.frame(summary(m2a_e2ss_ideo)$coef)[20,1:2],
+  as.data.frame(summary(m2a_e2ss_pid)$coef)[20,1:2],
+  
+  as.data.frame(summary(m2a_e2cc_iss)$coef)[20,1:2],
+  as.data.frame(summary(m2a_e2cc_ideo)$coef)[20,1:2],
+  as.data.frame(summary(m2a_e2cc_pid)$coef)[20,1:2]
+)
+
+ti_bin <- rbind(
+  as.data.frame(summary(m2b_e1_iss)$coef)[21,1:2],
+  as.data.frame(summary(m2b_e1_ideo)$coef)[21,1:2],
+  as.data.frame(summary(m2b_e1_pid)$coef)[21,1:2],
+  
+  as.data.frame(summary(m2b_e2ss_iss)$coef)[20,1:2],
+  as.data.frame(summary(m2b_e2ss_ideo)$coef)[20,1:2],
+  as.data.frame(summary(m2b_e2ss_pid)$coef)[20,1:2],
+  
+  as.data.frame(summary(m2b_e2cc_iss)$coef)[20,1:2],
+  as.data.frame(summary(m2b_e2cc_ideo)$coef)[20,1:2],
+  as.data.frame(summary(m2b_e2cc_pid)$coef)[20,1:2]
+)
+
+id_bin <- rbind(
+  as.data.frame(summary(m2a_e1_iss)$coef)[20,1:2],
+  as.data.frame(summary(m2a_e1_ideo)$coef)[20,1:2],
+  as.data.frame(summary(m2a_e1_pid)$coef)[20,1:2],
+  
+  as.data.frame(summary(m2a_e2ss_iss)$coef)[19,1:2],
+  as.data.frame(summary(m2a_e2ss_ideo)$coef)[19,1:2],
+  as.data.frame(summary(m2a_e2ss_pid)$coef)[19,1:2],
+  
+  as.data.frame(summary(m2a_e2cc_iss)$coef)[19,1:2],
+  as.data.frame(summary(m2a_e2cc_ideo)$coef)[19,1:2],
+  as.data.frame(summary(m2a_e2cc_pid)$coef)[19,1:2]
+)
+
+pk_bin <- rbind(
+  as.data.frame(summary(m2a_e1_iss)$coef)[23,1:2],
+  as.data.frame(summary(m2a_e1_ideo)$coef)[23,1:2],
+  as.data.frame(summary(m2a_e1_pid)$coef)[23,1:2],
+  
+  as.data.frame(summary(m2a_e2ss_iss)$coef)[21,1:2],
+  as.data.frame(summary(m2a_e2ss_ideo)$coef)[21,1:2],
+  as.data.frame(summary(m2a_e2ss_pid)$coef)[21,1:2],
+  
+  as.data.frame(summary(m2a_e2cc_iss)$coef)[21,1:2],
+  as.data.frame(summary(m2a_e2cc_ideo)$coef)[21,1:2],
+  as.data.frame(summary(m2a_e2cc_pid)$coef)[21,1:2]
+)
+
+num_bin <- rbind(
+  as.data.frame(summary(m2a_e1_iss)$coef)[22,1:2],
+  as.data.frame(summary(m2a_e1_ideo)$coef)[22,1:2],
+  as.data.frame(summary(m2a_e1_pid)$coef)[22,1:2]
+)
+
+
+# NFC Plot--------------------------------------------------------------------------------
+
+pdf("lucid_figures/nfc_openness_b_2way.pdf", height = 4, width = 8)
+
+par(mfrow = c(2,3), pch = 16, mar = c(0,2,1,0), oma = c(.5,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Evidence Interpretation")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = nfc_bin[1:3, 1] + 1.96*nfc_bin[1:3,2], 
+         y1 = nfc_bin[1:3, 1] - 1.96*nfc_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, nfc_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Need for Closure", side = 2, padj = -5) # common y axis label
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Sample Size")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = nfc_bin[4:6, 1] + 1.96*nfc_bin[4:6,2], 
+         y1 = nfc_bin[4:6, 1] - 1.96*nfc_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, nfc_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Causality")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = nfc_bin[7:9, 1] + 1.96*nfc_bin[7:9,2], 
+         y1 = nfc_bin[7:9, 1] - 1.96*nfc_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, nfc_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+
+# Trait Index Plot-----------------------------------------------------------------------
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = ti_bin[1:3, 1] + 1.96*ti_bin[1:3,2], 
+         y1 = ti_bin[1:3, 1] - 1.96*ti_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, ti_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Openness Index", side = 2, padj = -5) # common y axis label
+
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = ti_bin[4:6, 1] + 1.96*ti_bin[4:6,2], 
+         y1 = ti_bin[4:6, 1] - 1.96*ti_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, ti_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+
+legend("bottom", legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =1.5, ncol = 3, xpd = NA)
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = ti_bin[7:9, 1] + 1.96*ti_bin[7:9,2], 
+         y1 = ti_bin[7:9, 1] - 1.96*ti_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, ti_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
# Identity Plot---------------------------------------------------------------------------
+
+pdf("lucid_figures/identity_b_2way.pdf", height = 4, width = 8)
+
+par(mfrow = c(1,3), pch = 16, mar = c(0,2,1,0), oma = c(.5,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Evidence Interpretation")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = id_bin[1:3, 1] + 1.96*id_bin[1:3,2], 
+         y1 = id_bin[1:3, 1] - 1.96*id_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, id_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Identity", side = 2, padj = -5) # common y axis label
+
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Sample Size")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = id_bin[4:6, 1] + 1.96*id_bin[4:6,2], 
+         y1 = id_bin[4:6, 1] - 1.96*id_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, id_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+legend("bottom", legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =1.5, ncol = 3, xpd = NA)
+
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Causality")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = id_bin[7:9, 1] + 1.96*id_bin[7:9,2], 
+         y1 = id_bin[7:9, 1] - 1.96*id_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, id_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
# Knowledge Plot--------------------------------------------------------------------------
+
+pdf("lucid_figures/pk_b_2way.pdf", height = 4, width = 8)
+
+par(mfrow = c(1,3), pch = 16, mar = c(0,2,1,0), oma = c(.5,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Evidence Interpretation")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = pk_bin[1:3, 1] + 1.96*pk_bin[1:3,2], 
+         y1 = pk_bin[1:3, 1] - 1.96*pk_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, pk_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Political Knowledge", side = 2, padj = -5) # common y axis label
+
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Sample Size")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = pk_bin[4:6, 1] + 1.96*pk_bin[4:6,2], 
+         y1 = pk_bin[4:6, 1] - 1.96*pk_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, pk_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+legend("bottom", legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =1.5, ncol = 3, xpd = NA)
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Causality")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = pk_bin[7:9, 1] + 1.96*pk_bin[7:9,2], 
+         y1 = pk_bin[7:9, 1] - 1.96*pk_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, pk_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
# Numeracy Plot-----------------------------------------------------------------------
+
+pdf("lucid_figures/numeracy_b_2way.pdf", height = 4, width = 8)
+
+par(mfrow = c(1,1), pch = 16, mar = c(0,2,1,0), oma = c(.5,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Evidence Interpretation")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = num_bin[1:3, 1] + 1.96*num_bin[1:3,2], 
+         y1 = num_bin[1:3, 1] - 1.96*num_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, num_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Numeracy", side = 2, padj = -5) # common y axis label
+
+legend("bottom", legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =1.5, ncol = 3, xpd = NA)
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
# Combined PK and Numeracy Plot----------------------------------------------------------
+
+pdf("lucid_figures/pk_numeracy_b_2way.pdf", height = 4, width = 8)
+
+
+par(mfrow = c(1,3), pch = 16, mar = c(0,2,1,0), oma = c(2,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.4,.7), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Evidence Interpretation")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = pk_bin[1:3, 1] + 1.96*pk_bin[1:3,2], 
+         y1 = pk_bin[1:3, 1] - 1.96*pk_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, pk_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+# add numeracy points and segments
+num_adj <- .25
+alpha_level <- 150
+segments(x0 = 1:3 + num_adj, x1 = 1:3 + num_adj, 
+         y0 = num_bin[1:3, 1] + 1.96*num_bin[1:3,2], 
+         y1 = num_bin[1:3, 1] - 1.96*num_bin[1:3,2], 
+         lwd = 3, col = makeTransparent("gray40", alpha_level))
+points(1:3 + num_adj, num_bin[1:3 + num_adj,1], cex = 2, pch = c(15, 16, 17), 
+       col = makeTransparent("black", alpha_level))
+
+axis(2, at = seq(-.4, .7, .20), labels = seq(-.4, .7, .20)) # common y axis
+mtext(text = "Expertise", side = 2, padj = -5) # common y axis label
+
+legend(.8, -.25, legend = c("Political Knowledge", "Numeracy"),
+       pch = c(15), col = c("black", makeTransparent("black",
+                                                     alpha_level)),
+       cex = 1.5, ncol = 1, xpd = NA, pt.cex = 2, bty = "n")
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.4,.7), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Sample Size")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = pk_bin[4:6, 1] + 1.96*pk_bin[4:6,2], 
+         y1 = pk_bin[4:6, 1] - 1.96*pk_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, pk_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+#legend("bottom", legend = c("Political Knowledge", "Numeracy"),
+#       pch = c(16), col = c("black", makeTransparent("black",
+#                                                     alpha_level)),
+#       cex = 1.5, ncol = 2, xpd = NA, pt.cex = 2, lwd = 2)
+
+legend(.5, -.3, legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =1.5, ncol = 3, xpd = NA)
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.4,.7), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Causality")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = pk_bin[7:9, 1] + 1.96*pk_bin[7:9,2], 
+         y1 = pk_bin[7:9, 1] - 1.96*pk_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, pk_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
+
+
+

6 Ideological Differences in NFC and Openess Trait Index

+

+
## [1] 0.1490705
+
## [1] 0.2586452
+
## [1] 0.06499643
+
## [1] 0.1431215
+
+
+

7 R&R Pooled 3-way Interactions

+
# Pool Data -----------------------------------------------------------------------------
+
+# Rescale Outcomes
+
+data$exp2_goodSample_01 <- rescale_01(data$exp2_goodSample_uns, max = 7)
+#table(data$exp2_goodSample, data$exp2_goodSample_01)
+
+data$exp2_goodCausal_01 <- rescale_01(data$exp2_goodCausal_uns, max = 7)
+#table(data$exp2_goodCausal, data$exp2_goodCausal_01)
+
+# 1. Create shorter dataframe containing only variables needed for models
+
+# create respondent id var
+data$id <- 1:nrow(data)
+
+keep_vars <- c("id", "exp1_correct", "exp2_goodSample_01", "exp2_goodCausal_01", 
+               "exp1_congenial_issue_binary", "exp1_congenial_ideo_binary", 
+               "exp1_congenial_pid_binary", "exp1_congenial_issue_cont.s",
+               "exp1_congenial_ideo_cont.s", "exp1_congenial_pid_cont.s", 
+               "pol_id.s", "huddy_id.s", "nfc_mean.s", "numeracy.s", 
+               "pk_mean.s", "age.s", "female", "edu_hs", "edu_somecollege",
+               "edu_college", "edu_grad", "hispanic", "nonhisp_black", 
+               "income.s", "exp1_issue", "exp2_issue")
+
+# 2. create new dataframe
+new <- rbind(data, data, data)
+new$out_num <- c(rep( "e1", nrow(data)), 
+                 rep ("e2ss", nrow(data)), 
+                 rep ("e2cc", nrow(data)))
+
+# 3. create new outcome variable
+new$outcome <- NA
+new$outcome[new$out_num == "e1"] <- new$exp1_correct[new$out_num == "e1"]
+new$outcome[new$out_num == "e2ss"] <- new$exp2_goodSample_01[new$out_num == "e2ss"]
+new$outcome[new$out_num == "e2cc"] <- new$exp2_goodCausal_01[new$out_num == "e2cc"]
+
+# 4. create new issue varible
+new$issue <- NA
+new$issue[new$out_num == "e1"] <- new$exp1_issue[new$out_num == "e1"]
+new$issue[new$out_num == "e2ss"] <- new$exp2_issue[new$out_num == "e2ss"]
+new$issue[new$out_num == "e2cc"] <- new$exp2_issue[new$out_num == "e2cc"]
+
+
+#5. create new congeniality measure
+
+new$congenial_issue_binary <- ifelse(new$out_num == "e1", new$exp1_congenial_issue_binary, 
+                                    new$exp2_congenial_issue_binary)
+new$congenial_ideo_binary <- ifelse(new$out_num == "e1", new$exp1_congenial_ideo_binary, 
+                                    new$exp2_congenial_ideo_binary) 
+new$congenial_pid_binary <- ifelse(new$out_num == "e1", new$exp1_congenial_pid_binary, 
+                                    new$exp2_congenial_pid_binary)
+
+new$congenial_issue_cont.s <- ifelse(new$out_num == "e1", new$exp1_congenial_issue_cont.s, 
+                                    new$exp2_congenial_issue_cont.s)
+new$congenial_ideo_cont.s <- ifelse(new$out_num == "e1", new$exp1_congenial_ideo_cont.s, 
+                                    new$exp2_congenial_ideo_cont.s) 
+new$congenial_pid_cont.s <- ifelse(new$out_num == "e1", new$exp1_congenial_pid_cont.s, 
+                                    new$exp2_congenial_pid_cont.s)
+
+
+# 1A: Pool 3 Outcomes, Binary-------------------------------------------------------------
+
+# Issue Position
+pooled_a_iss_robust <- 
+  lm_robust(outcome ~ congenial_issue_binary*nfc_mean.s +
+     congenial_issue_binary*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+     data = new, clusters = id)
+
+# Ideology
+pooled_a_ideo_robust <- 
+  lm_robust(outcome ~ congenial_ideo_binary*nfc_mean.s +
+     congenial_ideo_binary*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+   data = new, clusters = id)
+
+# Party ID
+pooled_a_pid_robust <- 
+  lm_robust(outcome ~ congenial_pid_binary*nfc_mean.s +
+              congenial_pid_binary*huddy_id.s*pk_mean.s + 
+              age.s + female + edu_hs + edu_somecollege + edu_college +
+              edu_grad + hispanic + nonhisp_black + income.s + issue,
+   data = new, clusters = id)
+
+# 1B: Pool 3 Outcomes, Continuous---------------------------------------------------------
+
+# Issue Position
+pooled_b_iss_robust <- 
+  lm_robust(outcome ~ congenial_issue_cont.s*nfc_mean.s +
+     congenial_issue_cont.s*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+     data = new, clusters = id)
+
+# Ideology
+pooled_b_ideo_robust <- 
+  lm_robust(outcome ~ congenial_ideo_cont.s*nfc_mean.s +
+     congenial_ideo_cont.s*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+   data = new, clusters = id)
+
+# Party ID
+pooled_b_pid_robust <- 
+  lm_robust(outcome ~ congenial_pid_cont.s*nfc_mean.s +
+              congenial_pid_cont.s*huddy_id.s*pk_mean.s + 
+              age.s + female + edu_hs + edu_somecollege + edu_college +
+              edu_grad + hispanic + nonhisp_black + income.s + issue,
+   data = new, clusters = id)
+
+# 1C: Pool 2 Outcomes, Binary------------------------------------------------------------
+
+# Issue Position
+pooled_c_iss_robust <- 
+  lm_robust(outcome ~ congenial_issue_binary*nfc_mean.s +
+     congenial_issue_binary*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+     data = new[new$out_num != "e1",], clusters = id)
+
+# Ideology
+pooled_c_ideo_robust <- 
+  lm_robust(outcome ~ congenial_ideo_binary*nfc_mean.s +
+     congenial_ideo_binary*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+   data = new[new$out_num != "e1",], clusters = id)
+
+# Party ID
+pooled_c_pid_robust <- 
+  lm_robust(outcome ~ congenial_pid_binary*nfc_mean.s +
+              congenial_pid_binary*huddy_id.s*pk_mean.s + 
+              age.s + female + edu_hs + edu_somecollege + edu_college +
+              edu_grad + hispanic + nonhisp_black + income.s + issue,
+   data = new[new$out_num != "e1",], clusters = id)
+
+ # 1D: Pool 2 Outcomes, Continuous-------------------------------------------------------
+
+# Issue Position
+pooled_d_iss_robust <- 
+  lm_robust(outcome ~ congenial_issue_cont.s*nfc_mean.s +
+     congenial_issue_cont.s*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+     data = new[new$out_num != "e1",], clusters = id)
+
+# Ideology
+pooled_d_ideo_robust <- 
+  lm_robust(outcome ~ congenial_ideo_cont.s*nfc_mean.s +
+     congenial_ideo_cont.s*pol_id.s*pk_mean.s + 
+     age.s + female + edu_hs + edu_somecollege + edu_college +
+     edu_grad + hispanic + nonhisp_black + income.s + issue, 
+   data = new[new$out_num != "e1",], clusters = id)
+
+# Party ID
+pooled_d_pid_robust <- 
+  lm_robust(outcome ~ congenial_pid_cont.s*nfc_mean.s +
+              congenial_pid_cont.s*huddy_id.s*pk_mean.s + 
+              age.s + female + edu_hs + edu_somecollege + edu_college +
+              edu_grad + hispanic + nonhisp_black + income.s + issue,
+   data = new[new$out_num != "e1",], clusters = id)
+
+summary(pooled_d_iss_robust)
+
## 
+## Call:
+## lm_robust(formula = outcome ~ congenial_issue_cont.s * nfc_mean.s + 
+##     congenial_issue_cont.s * pol_id.s * pk_mean.s + age.s + female + 
+##     edu_hs + edu_somecollege + edu_college + edu_grad + hispanic + 
+##     nonhisp_black + income.s + issue, data = new[new$out_num != 
+##     "e1", ], clusters = id)
+## 
+## Standard error type:  CR2 
+## 
+## Coefficients:
+##                                             Estimate Std. Error   t value
+## (Intercept)                                5.390e-01   0.038909 13.853749
+## congenial_issue_cont.s                     4.242e-02   0.005662  7.492128
+## nfc_mean.s                                 1.150e-02   0.005536  2.077016
+## pol_id.s                                   1.205e-02   0.005440  2.216006
+## pk_mean.s                                 -6.893e-03   0.005642 -1.221719
+## age.s                                     -2.066e-03   0.005411 -0.381857
+## female                                    -3.897e-02   0.010410 -3.743837
+## edu_hs                                     2.082e-02   0.038608  0.539288
+## edu_somecollege                            1.343e-02   0.038663  0.347292
+## edu_college                               -1.637e-02   0.039605 -0.413303
+## edu_grad                                  -5.977e-03   0.042260 -0.141428
+## hispanic                                   3.714e-02   0.016214  2.290416
+## nonhisp_black                              5.584e-02   0.017000  3.284586
+## income.s                                   5.420e-03   0.005434  0.997285
+## issueabort                                -6.607e-03   0.015353 -0.430323
+## issuegun                                  -6.192e-03   0.015195 -0.407514
+## issueimm                                   1.142e-04   0.014839  0.007699
+## issuewage                                 -3.228e-03   0.015222 -0.212035
+## congenial_issue_cont.s:nfc_mean.s         -4.060e-03   0.005600 -0.724957
+## congenial_issue_cont.s:pol_id.s            7.702e-05   0.005711  0.013485
+## congenial_issue_cont.s:pk_mean.s           1.207e-02   0.005708  2.114640
+## pol_id.s:pk_mean.s                        -7.940e-03   0.004924 -1.612377
+## congenial_issue_cont.s:pol_id.s:pk_mean.s  1.282e-02   0.005371  2.386848
+##                                            Pr(>|t|)   CI Lower  CI Upper
+## (Intercept)                               1.149e-16  0.4603392  0.617729
+## congenial_issue_cont.s                    2.033e-13  0.0313018  0.053533
+## nfc_mean.s                                3.843e-02  0.0006156  0.022380
+## pol_id.s                                  2.714e-02  0.0013670  0.022742
+## pk_mean.s                                 2.223e-01 -0.0179740  0.004188
+## age.s                                     7.027e-01 -0.0126916  0.008559
+## female                                    1.915e-04 -0.0594030 -0.018546
+## edu_hs                                    5.928e-01 -0.0573057  0.098948
+## edu_somecollege                           7.304e-01 -0.0649524  0.091807
+## edu_college                               6.816e-01 -0.0963857  0.063648
+## edu_grad                                  8.881e-01 -0.0907827  0.078829
+## hispanic                                  2.303e-02  0.0051660  0.069106
+## nonhisp_black                             1.173e-03  0.0223504  0.089324
+## income.s                                  3.191e-01 -0.0052564  0.016096
+## issueabort                                6.671e-01 -0.0367640  0.023551
+## issuegun                                  6.838e-01 -0.0360349  0.023650
+## issueimm                                  9.939e-01 -0.0290288  0.029257
+## issuewage                                 8.322e-01 -0.0331260  0.026671
+## congenial_issue_cont.s:nfc_mean.s         4.691e-01 -0.0150845  0.006965
+## congenial_issue_cont.s:pol_id.s           9.892e-01 -0.0111589  0.011313
+## congenial_issue_cont.s:pk_mean.s          3.503e-02  0.0008519  0.023290
+## pol_id.s:pk_mean.s                        1.082e-01 -0.0176402  0.001760
+## congenial_issue_cont.s:pol_id.s:pk_mean.s 1.801e-02  0.0022230  0.023418
+##                                                DF
+## (Intercept)                                 39.09
+## congenial_issue_cont.s                     705.55
+## nfc_mean.s                                 407.54
+## pol_id.s                                   502.33
+## pk_mean.s                                  582.27
+## age.s                                      658.67
+## female                                    1005.10
+## edu_hs                                      38.48
+## edu_somecollege                             36.44
+## edu_college                                 40.44
+## edu_grad                                    51.89
+## hispanic                                   201.68
+## nonhisp_black                              241.47
+## income.s                                   518.44
+## issueabort                                 553.84
+## issuegun                                   596.81
+## issueimm                                   593.42
+## issuewage                                  569.81
+## congenial_issue_cont.s:nfc_mean.s          273.19
+## congenial_issue_cont.s:pol_id.s            325.40
+## congenial_issue_cont.s:pk_mean.s           437.01
+## pol_id.s:pk_mean.s                         242.37
+## congenial_issue_cont.s:pol_id.s:pk_mean.s  183.36
+## 
+## Multiple R-squared:  0.04738 ,   Adjusted R-squared:  0.04007 
+## F-statistic: 6.427 on 22 and 1445 DF,  p-value: < 2.2e-16
+
summary(pooled_d_ideo_robust)
+
## 
+## Call:
+## lm_robust(formula = outcome ~ congenial_ideo_cont.s * nfc_mean.s + 
+##     congenial_ideo_cont.s * pol_id.s * pk_mean.s + age.s + female + 
+##     edu_hs + edu_somecollege + edu_college + edu_grad + hispanic + 
+##     nonhisp_black + income.s + issue, data = new[new$out_num != 
+##     "e1", ], clusters = id)
+## 
+## Standard error type:  CR2 
+## 
+## Coefficients:
+##                                            Estimate Std. Error  t value
+## (Intercept)                               0.5434384   0.039801 13.65384
+## congenial_ideo_cont.s                     0.0260594   0.005943  4.38457
+## nfc_mean.s                                0.0106721   0.005502  1.93972
+## pol_id.s                                  0.0108347   0.005504  1.96855
+## pk_mean.s                                -0.0061713   0.005692 -1.08415
+## age.s                                    -0.0005173   0.005503 -0.09401
+## female                                   -0.0362450   0.010608 -3.41665
+## edu_hs                                    0.0143740   0.039365  0.36515
+## edu_somecollege                           0.0094565   0.039571  0.23898
+## edu_college                              -0.0198356   0.040569 -0.48893
+## edu_grad                                 -0.0176657   0.043090 -0.40997
+## hispanic                                  0.0475030   0.016398  2.89693
+## nonhisp_black                             0.0518889   0.017136  3.02804
+## income.s                                  0.0047472   0.005480  0.86635
+## issueabort                               -0.0065268   0.015607 -0.41820
+## issuegun                                 -0.0083017   0.015365 -0.54029
+## issueimm                                 -0.0002510   0.015105 -0.01662
+## issuewage                                -0.0055326   0.015421 -0.35876
+## congenial_ideo_cont.s:nfc_mean.s          0.0047641   0.005801  0.82131
+## congenial_ideo_cont.s:pol_id.s            0.0080144   0.005750  1.39382
+## congenial_ideo_cont.s:pk_mean.s           0.0176323   0.005908  2.98441
+## pol_id.s:pk_mean.s                       -0.0087261   0.004955 -1.76101
+## congenial_ideo_cont.s:pol_id.s:pk_mean.s  0.0011699   0.005460  0.21426
+##                                           Pr(>|t|)   CI Lower  CI Upper      DF
+## (Intercept)                              4.690e-16  4.628e-01  0.624072   37.16
+## congenial_ideo_cont.s                    1.442e-05  1.438e-02  0.037739  459.64
+## nfc_mean.s                               5.311e-02 -1.439e-04  0.021488  402.33
+## pol_id.s                                 4.955e-02  2.122e-05  0.021648  502.71
+## pk_mean.s                                2.787e-01 -1.735e-02  0.005008  586.98
+## age.s                                    9.251e-01 -1.132e-02  0.010288  654.50
+## female                                   6.593e-04 -5.706e-02 -0.015428 1005.61
+## edu_hs                                   7.171e-01 -6.541e-02  0.094156   36.72
+## edu_somecollege                          8.125e-01 -7.088e-02  0.089791   34.98
+## edu_college                              6.277e-01 -1.019e-01  0.062250   38.61
+## edu_grad                                 6.836e-01 -1.042e-01  0.068900   49.60
+## hispanic                                 4.182e-03  1.517e-02  0.079835  202.89
+## nonhisp_black                            2.733e-03  1.813e-02  0.085647  237.91
+## income.s                                 3.867e-01 -6.018e-03  0.015512  521.27
+## issueabort                               6.760e-01 -3.718e-02  0.024129  554.65
+## issuegun                                 5.892e-01 -3.848e-02  0.021875  595.88
+## issueimm                                 9.867e-01 -2.992e-02  0.029414  592.38
+## issuewage                                7.199e-01 -3.582e-02  0.024757  569.34
+## congenial_ideo_cont.s:nfc_mean.s         4.128e-01 -6.699e-03  0.016227  147.92
+## congenial_ideo_cont.s:pol_id.s           1.650e-01 -3.328e-03  0.019357  187.84
+## congenial_ideo_cont.s:pk_mean.s          3.090e-03  6.003e-03  0.029262  282.39
+## pol_id.s:pk_mean.s                       7.945e-02 -1.848e-02  0.001033  252.26
+## congenial_ideo_cont.s:pol_id.s:pk_mean.s 8.308e-01 -9.658e-03  0.011998  103.86
+## 
+## Multiple R-squared:  0.03383 ,   Adjusted R-squared:  0.02642 
+## F-statistic: 4.373 on 22 and 1444 DF,  p-value: 7.763e-11
+
summary(pooled_d_pid_robust)
+
## 
+## Call:
+## lm_robust(formula = outcome ~ congenial_pid_cont.s * nfc_mean.s + 
+##     congenial_pid_cont.s * huddy_id.s * pk_mean.s + age.s + female + 
+##     edu_hs + edu_somecollege + edu_college + edu_grad + hispanic + 
+##     nonhisp_black + income.s + issue, data = new[new$out_num != 
+##     "e1", ], clusters = id)
+## 
+## Standard error type:  CR2 
+## 
+## Coefficients:
+##                                             Estimate Std. Error  t value
+## (Intercept)                                0.5208571   0.046757 11.13962
+## congenial_pid_cont.s                       0.0236879   0.005554  4.26466
+## nfc_mean.s                                 0.0130715   0.006091  2.14588
+## huddy_id.s                                 0.0285792   0.005876  4.86337
+## pk_mean.s                                 -0.0064709   0.006604 -0.97980
+## age.s                                     -0.0007553   0.006062 -0.12459
+## female                                    -0.0428045   0.011694 -3.66046
+## edu_hs                                     0.0279551   0.047047  0.59419
+## edu_somecollege                            0.0367982   0.047187  0.77983
+## edu_college                                0.0017206   0.048360  0.03558
+## edu_grad                                   0.0094507   0.050567  0.18689
+## hispanic                                   0.0506643   0.018097  2.79955
+## nonhisp_black                              0.0423532   0.018422  2.29905
+## income.s                                   0.0007516   0.006055  0.12413
+## issueabort                                 0.0060268   0.017288  0.34860
+## issuegun                                  -0.0002843   0.017032 -0.01669
+## issueimm                                   0.0141011   0.017138  0.82279
+## issuewage                                  0.0013364   0.016771  0.07968
+## congenial_pid_cont.s:nfc_mean.s            0.0013574   0.005518  0.24598
+## congenial_pid_cont.s:huddy_id.s           -0.0033797   0.005994 -0.56383
+## congenial_pid_cont.s:pk_mean.s             0.0129580   0.005702  2.27239
+## huddy_id.s:pk_mean.s                      -0.0190227   0.005871 -3.24026
+## congenial_pid_cont.s:huddy_id.s:pk_mean.s  0.0071117   0.005833  1.21926
+##                                            Pr(>|t|)  CI Lower  CI Upper     DF
+## (Intercept)                               1.803e-11  0.424810  0.616904  26.36
+## congenial_pid_cont.s                      2.254e-05  0.012784  0.034592 760.39
+## nfc_mean.s                                3.262e-02  0.001088  0.025055 328.37
+## huddy_id.s                                1.581e-06  0.017032  0.040127 466.17
+## pk_mean.s                                 3.277e-01 -0.019448  0.006506 483.77
+## age.s                                     9.009e-01 -0.012663  0.011152 544.04
+## female                                    2.674e-04 -0.065757 -0.019852 846.51
+## edu_hs                                    5.574e-01 -0.068646  0.124556  26.60
+## edu_somecollege                           4.427e-01 -0.060313  0.133909  25.38
+## edu_college                               9.719e-01 -0.097431  0.100872  27.44
+## edu_grad                                  8.529e-01 -0.093310  0.112211  34.04
+## hispanic                                  5.720e-03  0.014936  0.086393 167.25
+## nonhisp_black                             2.248e-02  0.006038  0.078668 210.97
+## income.s                                  9.013e-01 -0.011149  0.012652 446.18
+## issueabort                                7.275e-01 -0.027944  0.039997 476.63
+## issuegun                                  9.867e-01 -0.033748  0.033180 495.67
+## issueimm                                  4.110e-01 -0.019574  0.047776 481.55
+## issuewage                                 9.365e-01 -0.031620  0.034292 471.74
+## congenial_pid_cont.s:nfc_mean.s           8.059e-01 -0.009512  0.012227 243.13
+## congenial_pid_cont.s:huddy_id.s           5.731e-01 -0.015159  0.008400 454.28
+## congenial_pid_cont.s:pk_mean.s            2.358e-02  0.001748  0.024168 409.38
+## huddy_id.s:pk_mean.s                      1.349e-03 -0.030583 -0.007463 261.86
+## congenial_pid_cont.s:huddy_id.s:pk_mean.s 2.240e-01 -0.004381  0.018605 228.19
+## 
+## Multiple R-squared:  0.04162 ,   Adjusted R-squared:  0.03281 
+## F-statistic: 5.122 on 22 and 1207 DF,  p-value: 1.942e-13
+
# To Plot: 
+tidy(pooled_a_iss_robust)[23, c("term", "estimate", "std.error")]
+
##                                         term   estimate  std.error
+## 23 congenial_issue_binary:pol_id.s:pk_mean.s 0.01430036 0.01122905
+
tidy(pooled_a_ideo_robust)[23, c("term", "estimate", "std.error")]
+
##                                        term   estimate  std.error
+## 23 congenial_ideo_binary:pol_id.s:pk_mean.s 0.03283857 0.01467805
+
tidy(pooled_a_pid_robust)[23, c("term", "estimate", "std.error")]
+
##                                         term  estimate std.error
+## 23 congenial_pid_binary:huddy_id.s:pk_mean.s 0.0106842 0.0124378
+
tidy(pooled_b_iss_robust)[23,c("term", "estimate", "std.error")]
+
##                                         term    estimate   std.error
+## 23 congenial_issue_cont.s:pol_id.s:pk_mean.s 0.008536276 0.005771084
+
tidy(pooled_b_ideo_robust)[23,c("term", "estimate", "std.error")]
+
##                                        term   estimate  std.error
+## 23 congenial_ideo_cont.s:pol_id.s:pk_mean.s 0.01086174 0.00561971
+
tidy(pooled_b_pid_robust)[23,c("term", "estimate", "std.error")]
+
##                                         term   estimate   std.error
+## 23 congenial_pid_cont.s:huddy_id.s:pk_mean.s 0.00163644 0.006055017
+
tidy(pooled_c_iss_robust)[23,c("term", "estimate", "std.error")]
+
##                                         term   estimate   std.error
+## 23 congenial_issue_binary:pol_id.s:pk_mean.s 0.02771031 0.009991809
+
tidy(pooled_c_ideo_robust)[23,c("term", "estimate", "std.error")]
+
##                                        term    estimate  std.error
+## 23 congenial_ideo_binary:pol_id.s:pk_mean.s 0.007209673 0.01362074
+
tidy(pooled_c_pid_robust)[23,c("term", "estimate", "std.error")]
+
##                                         term   estimate std.error
+## 23 congenial_pid_binary:huddy_id.s:pk_mean.s 0.01818385 0.0116276
+
tidy(pooled_d_iss_robust)[23,c("term", "estimate", "std.error")]
+
##                                         term   estimate   std.error
+## 23 congenial_issue_cont.s:pol_id.s:pk_mean.s 0.01282045 0.005371287
+
tidy(pooled_d_ideo_robust)[23,c("term", "estimate", "std.error")]
+
##                                        term    estimate   std.error
+## 23 congenial_ideo_cont.s:pol_id.s:pk_mean.s 0.001169943 0.005460296
+
tidy(pooled_d_pid_robust)[23,c("term", "estimate", "std.error")]
+
##                                         term    estimate   std.error
+## 23 congenial_pid_cont.s:huddy_id.s:pk_mean.s 0.007111718 0.005832831
+
r1_pooled_bin <- rbind(
+  tidy(pooled_a_iss_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_a_ideo_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_a_pid_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_b_iss_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_b_ideo_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_b_pid_robust)[23,c("term", "estimate", "std.error")], 
+  tidy(pooled_c_iss_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_c_ideo_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_c_pid_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_d_iss_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_d_ideo_robust)[23,c("term", "estimate", "std.error")],
+  tidy(pooled_d_pid_robust)[23,c("term", "estimate", "std.error")])
+
+r1_pooled_bin$model <- c(rep("a",3), rep("b",3), rep("c",3), rep("d",3))
+r1_pooled_bin$measure <- rep(c("iss", "ideo", "pid"), 4)
+
+# a = 3 pool, binary
+# b = 3 pool, continuous
+# c = 3 pool, binary
+# d = 3 pool, continous
+
+pdf("lucid_figures/rr_3way_pooled.pdf", height = 4, width = 8)
+
+
+par(mfrow = c(2,1), pch = 16, mar = c(1,2,1,0), oma = c(.5,5,1,0))
+
+# 3 Outcomes Pooled ----------------------------------------------------------------------
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.1,.1), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "3 Pooled Outcomes (Experiments 1 & 2)")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+
+# binary congeniality
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_pooled_bin[r1_pooled_bin$model == "a", 2] + 
+           1.96*r1_pooled_bin[1:3,3], 
+         y1 = r1_pooled_bin[r1_pooled_bin$model == "a", 2] -
+           1.96*r1_pooled_bin[1:3,3], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_pooled_bin[r1_pooled_bin$model == "a", 2], cex = 1, pch = c(15, 16, 17))
+
+# continuous congeniality
+segments(x0 = 1.2:3.2, x1 = 1.2:3.2, 
+         y0 = r1_pooled_bin[r1_pooled_bin$model == "b", 2] + 
+           1.96*r1_pooled_bin[1:3,3], 
+         y1 = r1_pooled_bin[r1_pooled_bin$model == "b", 2] -
+           1.96*r1_pooled_bin[1:3,3], 
+         lwd = 3, col = "gray40")
+points(1.2:3.2, r1_pooled_bin[r1_pooled_bin$model == "b", 2], 
+       cex = 1, pch = c(0, 1, 2))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Cong. X PK X ID", side = 2, padj = -5) # common y axis label
+
+# 2 Outcomes Pooled ----------------------------------------------------------------------
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.1,.1), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "2 Pooled Outcomes (Experiment 2 Only)")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+
+# binary congeniality
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_pooled_bin[r1_pooled_bin$model == "c", 2] + 
+           1.96*r1_pooled_bin[1:3,3], 
+         y1 = r1_pooled_bin[r1_pooled_bin$model == "c", 2] -
+           1.96*r1_pooled_bin[1:3,3], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_pooled_bin[r1_pooled_bin$model == "c", 2], cex = 1, pch = c(15, 16, 17))
+
+# continuous congeniality
+segments(x0 = 1.2:3.2, x1 = 1.2:3.2, 
+         y0 = r1_pooled_bin[r1_pooled_bin$model == "d", 2] + 
+           1.96*r1_pooled_bin[1:3,3], 
+         y1 = r1_pooled_bin[r1_pooled_bin$model == "d", 2] -
+           1.96*r1_pooled_bin[1:3,3], 
+         lwd = 3, col = "gray40")
+points(1.2:3.2, r1_pooled_bin[r1_pooled_bin$model == "d", 2], 
+       cex = 1, pch = c(0, 1, 2))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Cong. X PK X ID", side = 2, padj = -5) # common y axis label
+
+legend(x = .6, y = -.09, legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =.7, ncol = 3, xpd = NA)
+
+legend(x = 2.5, y = -.08, legend = c("Binary Congeniality Measure", 
+                                     "Continuous Congeniality Measure"),
+       pch = c(16, 1),
+       cex =.7, ncol = 1, xpd = NA)
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
+
+

8 R&R NFC and Trait Index Moderation Analysis with No Controls

+
+

8.1 Need for Closure

+
# Experiment 1----------------------------------------------------------------------------
+
+# Issue Position
+r1_nfc_e1_iss <- glm(exp1_correct ~ exp1_congenial_issue_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e1_iss)
+
## 
+## Call:
+## glm(formula = exp1_correct ~ exp1_congenial_issue_binary * nfc_mean.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -0.5994  -0.3798  -0.3563   0.4945   0.6521  
+## 
+## Coefficients:
+##                                         Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                             0.364789   0.017512  20.831  < 2e-16
+## exp1_congenial_issue_binary             0.157813   0.024960   6.323 3.36e-10
+## nfc_mean.s                             -0.007957   0.017522  -0.454   0.6498
+## exp1_congenial_issue_binary:nfc_mean.s  0.044112   0.024943   1.768   0.0772
+##                                           
+## (Intercept)                            ***
+## exp1_congenial_issue_binary            ***
+## nfc_mean.s                                
+## exp1_congenial_issue_binary:nfc_mean.s .  
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.2404164)
+## 
+##     Null deviance: 380.87  on 1543  degrees of freedom
+## Residual deviance: 370.24  on 1540  degrees of freedom
+##   (272 observations deleted due to missingness)
+## AIC: 2186.9
+## 
+## Number of Fisher Scoring iterations: 2
+
# Ideology
+r1_nfc_e1_ideo <- glm(exp1_correct ~ exp1_congenial_ideo_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e1_ideo)
+
## 
+## Call:
+## glm(formula = exp1_correct ~ exp1_congenial_ideo_binary * nfc_mean.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -0.5177  -0.4023  -0.3559   0.4853   0.6683  
+## 
+## Coefficients:
+##                                       Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                            0.37006    0.02184  16.942  < 2e-16 ***
+## exp1_congenial_ideo_binary             0.14526    0.03068   4.735  2.5e-06 ***
+## nfc_mean.s                             0.01710    0.02228   0.767    0.443    
+## exp1_congenial_ideo_binary:nfc_mean.s -0.01598    0.03039  -0.526    0.599    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.2423579)
+## 
+##     Null deviance: 254.23  on 1029  degrees of freedom
+## Residual deviance: 248.66  on 1026  degrees of freedom
+##   (786 observations deleted due to missingness)
+## AIC: 1469.1
+## 
+## Number of Fisher Scoring iterations: 2
+
# Party ID
+r1_nfc_e1_pid <- glm(exp1_correct ~ exp1_congenial_pid_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e1_pid)
+
## 
+## Call:
+## glm(formula = exp1_correct ~ exp1_congenial_pid_binary * nfc_mean.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -0.5400  -0.3866  -0.3754   0.5063   0.6282  
+## 
+## Coefficients:
+##                                      Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                           0.37832    0.01942  19.483  < 2e-16 ***
+## exp1_congenial_pid_binary             0.12691    0.02748   4.618 4.26e-06 ***
+## nfc_mean.s                           -0.00306    0.01993  -0.153    0.878    
+## exp1_congenial_pid_binary:nfc_mean.s  0.01939    0.02745   0.706    0.480    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.2431863)
+## 
+##     Null deviance: 317.83  on 1288  degrees of freedom
+## Residual deviance: 312.49  on 1285  degrees of freedom
+##   (527 observations deleted due to missingness)
+## AIC: 1841.5
+## 
+## Number of Fisher Scoring iterations: 2
+
# Experiment 2 (Sample Size)--------------------------------------------------------------
+
+# Issue Position
+r1_nfc_e2ss_iss <- glm(exp2_goodSample ~ exp2_congenial_issue_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e2ss_iss)
+
## 
+## Call:
+## glm(formula = exp2_goodSample ~ exp2_congenial_issue_binary * 
+##     nfc_mean.s, data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -1.8263  -0.4773   0.2089   0.4210   2.5233  
+## 
+## Coefficients:
+##                                        Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                            -0.10610    0.03609  -2.940  0.00334 ** 
+## exp2_congenial_issue_binary             0.20848    0.05059   4.121 3.97e-05 ***
+## nfc_mean.s                              0.02627    0.03704   0.709  0.47832    
+## exp2_congenial_issue_binary:nfc_mean.s -0.04151    0.05072  -0.818  0.41324    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.9899808)
+## 
+##     Null deviance: 1546.0  on 1547  degrees of freedom
+## Residual deviance: 1528.5  on 1544  degrees of freedom
+##   (268 observations deleted due to missingness)
+## AIC: 4383.4
+## 
+## Number of Fisher Scoring iterations: 2
+
# Ideology
+r1_nfc_e2ss_ideo <- glm(exp2_goodSample ~ exp2_congenial_ideo_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e2ss_ideo)
+
## 
+## Call:
+## glm(formula = exp2_goodSample ~ exp2_congenial_ideo_binary * 
+##     nfc_mean.s, data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -1.9169  -0.8104   0.1877   0.4634   2.5487  
+## 
+## Coefficients:
+##                                       Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                           -0.13578    0.04484  -3.028 0.002522 ** 
+## exp2_congenial_ideo_binary             0.24390    0.06323   3.857 0.000122 ***
+## nfc_mean.s                            -0.05379    0.04418  -1.218 0.223681    
+## exp2_congenial_ideo_binary:nfc_mean.s  0.10774    0.06264   1.720 0.085724 .  
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.032988)
+## 
+##     Null deviance: 1082.4  on 1033  degrees of freedom
+## Residual deviance: 1064.0  on 1030  degrees of freedom
+##   (782 observations deleted due to missingness)
+## AIC: 2973.9
+## 
+## Number of Fisher Scoring iterations: 2
+
# Party ID
+r1_nfc_e2ss_pid <- glm(exp2_goodSample ~ exp2_congenial_pid_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e2ss_pid)
+
## 
+## Call:
+## glm(formula = exp2_goodSample ~ exp2_congenial_pid_binary * nfc_mean.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -1.8819  -0.4952   0.2052   0.4065   2.4551  
+## 
+## Coefficients:
+##                                      Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                          -0.08749    0.03988  -2.194 0.028431 *  
+## exp2_congenial_pid_binary             0.18760    0.05647   3.322 0.000919 ***
+## nfc_mean.s                           -0.01753    0.03959  -0.443 0.657985    
+## exp2_congenial_pid_binary:nfc_mean.s  0.05800    0.05648   1.027 0.304644    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.030652)
+## 
+##     Null deviance: 1341.0  on 1292  degrees of freedom
+## Residual deviance: 1328.5  on 1289  degrees of freedom
+##   (523 observations deleted due to missingness)
+## AIC: 3714.4
+## 
+## Number of Fisher Scoring iterations: 2
+
# Experiment 2 (Causal Claim)-------------------------------------------------------------
+
+# issue Position
+r1_nfc_e2cc_iss <- glm(exp2_goodCausal ~ exp2_congenial_issue_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e2cc_iss)
+
## 
+## Call:
+## glm(formula = exp2_goodCausal ~ exp2_congenial_issue_binary * 
+##     nfc_mean.s, data = data)
+## 
+## Deviance Residuals: 
+##      Min        1Q    Median        3Q       Max  
+## -2.62211  -0.62642   0.04762   0.67319   1.75900  
+## 
+## Coefficients:
+##                                         Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                            -0.192442   0.035607  -5.405 7.52e-08
+## exp2_congenial_issue_binary             0.377436   0.049954   7.556 7.11e-14
+## nfc_mean.s                              0.065048   0.036524   1.781   0.0751
+## exp2_congenial_issue_binary:nfc_mean.s  0.009611   0.050037   0.192   0.8477
+##                                           
+## (Intercept)                            ***
+## exp2_congenial_issue_binary            ***
+## nfc_mean.s                             .  
+## exp2_congenial_issue_binary:nfc_mean.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.9622066)
+## 
+##     Null deviance: 1542.9  on 1542  degrees of freedom
+## Residual deviance: 1480.8  on 1539  degrees of freedom
+##   (273 observations deleted due to missingness)
+## AIC: 4325.4
+## 
+## Number of Fisher Scoring iterations: 2
+
# Ideology
+r1_nfc_e2cc_ideo <- glm(exp2_goodCausal ~ exp2_congenial_ideo_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e2cc_ideo)
+
## 
+## Call:
+## glm(formula = exp2_goodCausal ~ exp2_congenial_ideo_binary * 
+##     nfc_mean.s, data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -2.5612  -0.6374   0.2372   0.8263   1.6772  
+## 
+## Coefficients:
+##                                        Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                           -0.162776   0.044814  -3.632 0.000295 ***
+## exp2_congenial_ideo_binary             0.336964   0.063291   5.324 1.25e-07 ***
+## nfc_mean.s                             0.049283   0.044020   1.120 0.263163    
+## exp2_congenial_ideo_binary:nfc_mean.s -0.007309   0.062645  -0.117 0.907146    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.031803)
+## 
+##     Null deviance: 1091.5  on 1030  degrees of freedom
+## Residual deviance: 1059.7  on 1027  degrees of freedom
+##   (785 observations deleted due to missingness)
+## AIC: 2964.1
+## 
+## Number of Fisher Scoring iterations: 2
+
# Party ID
+r1_nfc_e2cc_pid <- glm(exp2_goodCausal ~ exp2_congenial_pid_binary*nfc_mean.s,
+                        data = data)
+summary(r1_nfc_e2cc_pid)
+
## 
+## Call:
+## glm(formula = exp2_goodCausal ~ exp2_congenial_pid_binary * nfc_mean.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -2.6103  -0.5588   0.1826   0.7877   1.6754  
+## 
+## Coefficients:
+##                                      Estimate Std. Error t value Pr(>|t|)    
+## (Intercept)                          -0.12097    0.03927  -3.080  0.00211 ** 
+## exp2_congenial_pid_binary             0.26933    0.05569   4.836 1.48e-06 ***
+## nfc_mean.s                            0.06138    0.03888   1.579  0.11468    
+## exp2_congenial_pid_binary:nfc_mean.s  0.02944    0.05565   0.529  0.59696    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.9991476)
+## 
+##     Null deviance: 1314.8  on 1288  degrees of freedom
+## Residual deviance: 1283.9  on 1285  degrees of freedom
+##   (527 observations deleted due to missingness)
+## AIC: 3662.9
+## 
+## Number of Fisher Scoring iterations: 2
+
+
+

8.2 Trait Index

+
# Experiment 1----------------------------------------------------------------------------
+
+# Issue Position
+r1_ti_e1_iss <- glm(exp1_correct ~ exp1_congenial_issue_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e1_iss)
+
## 
+## Call:
+## glm(formula = exp1_correct ~ exp1_congenial_issue_binary * trait_index.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -0.5771  -0.3951  -0.3480   0.4872   0.6948  
+## 
+## Coefficients:
+##                                           Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                                0.36404    0.01754  20.754  < 2e-16
+## exp1_congenial_issue_binary                0.15760    0.02500   6.305 3.76e-10
+## trait_index.s                             -0.01766    0.01784  -0.989    0.323
+## exp1_congenial_issue_binary:trait_index.s  0.03674    0.02500   1.470    0.142
+##                                              
+## (Intercept)                               ***
+## exp1_congenial_issue_binary               ***
+## trait_index.s                                
+## exp1_congenial_issue_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.2407573)
+## 
+##     Null deviance: 380.87  on 1543  degrees of freedom
+## Residual deviance: 370.77  on 1540  degrees of freedom
+##   (272 observations deleted due to missingness)
+## AIC: 2189.1
+## 
+## Number of Fisher Scoring iterations: 2
+
# Ideology
+r1_ti_e1_ideo <- glm(exp1_correct ~ exp1_congenial_ideo_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e1_ideo)
+
## 
+## Call:
+## glm(formula = exp1_correct ~ exp1_congenial_ideo_binary * trait_index.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -0.5529  -0.3749  -0.3682   0.4917   0.6355  
+## 
+## Coefficients:
+##                                          Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                              0.370045   0.021847  16.938  < 2e-16
+## exp1_congenial_ideo_binary               0.145412   0.030687   4.739 2.46e-06
+## trait_index.s                            0.002193   0.022436   0.098    0.922
+## exp1_congenial_ideo_binary:trait_index.s 0.010692   0.030225   0.354    0.724
+##                                             
+## (Intercept)                              ***
+## exp1_congenial_ideo_binary               ***
+## trait_index.s                               
+## exp1_congenial_ideo_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.2423998)
+## 
+##     Null deviance: 254.23  on 1029  degrees of freedom
+## Residual deviance: 248.70  on 1026  degrees of freedom
+##   (786 observations deleted due to missingness)
+## AIC: 1469.3
+## 
+## Number of Fisher Scoring iterations: 2
+
# Party ID
+r1_ti_e1_pid <- glm(exp1_correct ~ exp1_congenial_pid_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e1_pid)
+
## 
+## Call:
+## glm(formula = exp1_correct ~ exp1_congenial_pid_binary * trait_index.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -0.5290  -0.3916  -0.3733   0.5005   0.6408  
+## 
+## Coefficients:
+##                                          Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                              0.378299   0.019421  19.479  < 2e-16
+## exp1_congenial_pid_binary                0.126305   0.027476   4.597 4.71e-06
+## trait_index.s                           -0.005722   0.020007  -0.286    0.775
+## exp1_congenial_pid_binary:trait_index.s  0.014115   0.027396   0.515    0.606
+##                                            
+## (Intercept)                             ***
+## exp1_congenial_pid_binary               ***
+## trait_index.s                              
+## exp1_congenial_pid_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.2432789)
+## 
+##     Null deviance: 317.83  on 1288  degrees of freedom
+## Residual deviance: 312.61  on 1285  degrees of freedom
+##   (527 observations deleted due to missingness)
+## AIC: 1842
+## 
+## Number of Fisher Scoring iterations: 2
+
# Experiment 2 (Sample Size)--------------------------------------------------------------
+
+# Issue Position
+r1_ti_e2ss_iss <- glm(exp2_goodSample ~ exp2_congenial_issue_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e2ss_iss)
+
## 
+## Call:
+## glm(formula = exp2_goodSample ~ exp2_congenial_issue_binary * 
+##     trait_index.s, data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -1.8064  -0.4605   0.2119   0.4193   2.5213  
+## 
+## Coefficients:
+##                                           Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                               -0.10556    0.03608  -2.926  0.00349
+## exp2_congenial_issue_binary                0.20818    0.05057   4.117 4.05e-05
+## trait_index.s                              0.04717    0.03631   1.299  0.19409
+## exp2_congenial_issue_binary:trait_index.s -0.04570    0.05055  -0.904  0.36608
+##                                              
+## (Intercept)                               ** 
+## exp2_congenial_issue_binary               ***
+## trait_index.s                                
+## exp2_congenial_issue_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.9893448)
+## 
+##     Null deviance: 1546.0  on 1547  degrees of freedom
+## Residual deviance: 1527.5  on 1544  degrees of freedom
+##   (268 observations deleted due to missingness)
+## AIC: 4382.4
+## 
+## Number of Fisher Scoring iterations: 2
+
# Ideology
+r1_ti_e2ss_ideo <- glm(exp2_goodSample ~ exp2_congenial_ideo_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e2ss_ideo)
+
## 
+## Call:
+## glm(formula = exp2_goodSample ~ exp2_congenial_ideo_binary * 
+##     trait_index.s, data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -1.9117  -0.8572   0.1936   0.4531   2.4858  
+## 
+## Coefficients:
+##                                          Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                              -0.13415    0.04489  -2.989 0.002869
+## exp2_congenial_ideo_binary                0.24476    0.06329   3.867 0.000117
+## trait_index.s                            -0.01740    0.04348  -0.400 0.689140
+## exp2_congenial_ideo_binary:trait_index.s  0.06381    0.06203   1.029 0.303880
+##                                             
+## (Intercept)                              ** 
+## exp2_congenial_ideo_binary               ***
+## trait_index.s                               
+## exp2_congenial_ideo_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.034689)
+## 
+##     Null deviance: 1082.4  on 1033  degrees of freedom
+## Residual deviance: 1065.7  on 1030  degrees of freedom
+##   (782 observations deleted due to missingness)
+## AIC: 2975.6
+## 
+## Number of Fisher Scoring iterations: 2
+
# Party ID
+r1_ti_e2ss_pid <- glm(exp2_goodSample ~ exp2_congenial_pid_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e2ss_pid)
+
## 
+## Call:
+## glm(formula = exp2_goodSample ~ exp2_congenial_pid_binary * trait_index.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -1.9354  -0.5179   0.2042   0.4023   2.4229  
+## 
+## Coefficients:
+##                                          Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                             -0.087454   0.039890  -2.192 0.028531
+## exp2_congenial_pid_binary                0.188662   0.056472   3.341 0.000859
+## trait_index.s                            0.002514   0.039936   0.063 0.949821
+## exp2_congenial_pid_binary:trait_index.s  0.059189   0.056204   1.053 0.292485
+##                                            
+## (Intercept)                             *  
+## exp2_congenial_pid_binary               ***
+## trait_index.s                              
+## exp2_congenial_pid_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.029668)
+## 
+##     Null deviance: 1341.0  on 1292  degrees of freedom
+## Residual deviance: 1327.2  on 1289  degrees of freedom
+##   (523 observations deleted due to missingness)
+## AIC: 3713.2
+## 
+## Number of Fisher Scoring iterations: 2
+
# Experiment 2 (Causal Claim)-------------------------------------------------------------
+
+# issue Position
+r1_ti_e2cc_iss <- glm(exp2_goodCausal ~ exp2_congenial_issue_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e2cc_iss)
+
## 
+## Call:
+## glm(formula = exp2_goodCausal ~ exp2_congenial_issue_binary * 
+##     trait_index.s, data = data)
+## 
+## Deviance Residuals: 
+##      Min        1Q    Median        3Q       Max  
+## -2.53814  -0.65342  -0.01552   0.58802   1.58359  
+## 
+## Coefficients:
+##                                           Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                               -0.19176    0.03569  -5.373 8.95e-08
+## exp2_congenial_issue_binary                0.37556    0.05007   7.501 1.07e-13
+## trait_index.s                              0.01388    0.03590   0.387    0.699
+## exp2_congenial_issue_binary:trait_index.s -0.03616    0.04996  -0.724    0.469
+##                                              
+## (Intercept)                               ***
+## exp2_congenial_issue_binary               ***
+## trait_index.s                                
+## exp2_congenial_issue_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 0.9668168)
+## 
+##     Null deviance: 1542.9  on 1542  degrees of freedom
+## Residual deviance: 1487.9  on 1539  degrees of freedom
+##   (273 observations deleted due to missingness)
+## AIC: 4332.8
+## 
+## Number of Fisher Scoring iterations: 2
+
# Ideology
+r1_ti_e2cc_ideo <- glm(exp2_goodCausal ~ exp2_congenial_ideo_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e2cc_ideo)
+
## 
+## Call:
+## glm(formula = exp2_goodCausal ~ exp2_congenial_ideo_binary * 
+##     trait_index.s, data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -2.5552  -0.6211   0.2620   0.8714   1.5768  
+## 
+## Coefficients:
+##                                          Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                              -0.16446    0.04484  -3.667 0.000258
+## exp2_congenial_ideo_binary                0.33887    0.06333   5.351 1.08e-07
+## trait_index.s                             0.02093    0.04336   0.483 0.629337
+## exp2_congenial_ideo_binary:trait_index.s -0.06106    0.06194  -0.986 0.324509
+##                                             
+## (Intercept)                              ***
+## exp2_congenial_ideo_binary               ***
+## trait_index.s                               
+## exp2_congenial_ideo_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.032891)
+## 
+##     Null deviance: 1091.5  on 1030  degrees of freedom
+## Residual deviance: 1060.8  on 1027  degrees of freedom
+##   (785 observations deleted due to missingness)
+## AIC: 2965.2
+## 
+## Number of Fisher Scoring iterations: 2
+
# Party ID
+r1_ti_e2cc_pid <- glm(exp2_goodCausal ~ exp2_congenial_pid_binary*trait_index.s,
+                        data = data)
+summary(r1_ti_e2cc_pid)
+
## 
+## Call:
+## glm(formula = exp2_goodCausal ~ exp2_congenial_pid_binary * trait_index.s, 
+##     data = data)
+## 
+## Deviance Residuals: 
+##     Min       1Q   Median       3Q      Max  
+## -2.4745  -0.6310   0.2412   0.8539   1.4842  
+## 
+## Coefficients:
+##                                          Estimate Std. Error t value Pr(>|t|)
+## (Intercept)                             -0.121883   0.039407  -3.093  0.00202
+## exp2_congenial_pid_binary                0.268855   0.055879   4.811 1.68e-06
+## trait_index.s                            0.005371   0.039391   0.136  0.89156
+## exp2_congenial_pid_binary:trait_index.s -0.002798   0.055507  -0.050  0.95980
+##                                            
+## (Intercept)                             ** 
+## exp2_congenial_pid_binary               ***
+## trait_index.s                              
+## exp2_congenial_pid_binary:trait_index.s    
+## ---
+## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
+## 
+## (Dispersion parameter for gaussian family taken to be 1.005112)
+## 
+##     Null deviance: 1314.8  on 1288  degrees of freedom
+## Residual deviance: 1291.6  on 1285  degrees of freedom
+##   (527 observations deleted due to missingness)
+## AIC: 3670.6
+## 
+## Number of Fisher Scoring iterations: 2
+
+
+

8.3 Figures

+
rownames <- c("r1_nfc_e1_iss", "r1_nfc_e1_ideo", "r1_nfc_e1_pid", 
+              "r1_nfc_e2ss_iss", "r1_nfc_e2ss_ideo",  "r1_nfc_e2ss_pid", 
+              "r1_nfc_e2cc_iss", "r1_nfc_e2cc_ideo", "r1_nfc_e2cc_pid") 
+
+r1_nfc_bin <- rbind(
+  as.data.frame(summary(r1_nfc_e1_iss)$coef)[4,1:2],
+  as.data.frame(summary(r1_nfc_e1_ideo)$coef)[4,1:2],
+  as.data.frame(summary(r1_nfc_e1_pid)$coef)[4,1:2],
+  
+  as.data.frame(summary(r1_nfc_e2ss_iss)$coef)[4,1:2],
+  as.data.frame(summary(r1_nfc_e2ss_ideo)$coef)[4,1:2],
+  as.data.frame(summary(r1_nfc_e2ss_pid)$coef)[4,1:2],
+  
+  as.data.frame(summary(r1_nfc_e2cc_iss)$coef)[4,1:2],
+  as.data.frame(summary(r1_nfc_e2cc_ideo)$coef)[4,1:2],
+  as.data.frame(summary(r1_nfc_e2cc_pid)$coef)[4,1:2]
+)
+
+r1_ti_bin <- rbind(
+  as.data.frame(summary(r1_ti_e1_iss)$coef)[4,1:2],
+  as.data.frame(summary(r1_ti_e1_ideo)$coef)[4,1:2],
+  as.data.frame(summary(r1_ti_e1_pid)$coef)[4,1:2],
+  
+  as.data.frame(summary(r1_ti_e2ss_iss)$coef)[4,1:2],
+  as.data.frame(summary(r1_ti_e2ss_ideo)$coef)[4,1:2],
+  as.data.frame(summary(r1_ti_e2ss_pid)$coef)[4,1:2],
+  
+  as.data.frame(summary(r1_ti_e2cc_iss)$coef)[4,1:2],
+  as.data.frame(summary(r1_ti_e2cc_ideo)$coef)[4,1:2],
+  as.data.frame(summary(r1_ti_e2cc_pid)$coef)[4,1:2]
+)
+
+
+# NFC Plot--------------------------------------------------------------------------------
+
+pdf("lucid_figures/r1_nfc_b.pdf", height = 4, width = 8)
+
+par(mfrow = c(2,3), pch = 16, mar = c(0,2,1,0), oma = c(.5,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Evidence Interpretation")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_nfc_bin[1:3, 1] + 1.96*r1_nfc_bin[1:3,2], 
+         y1 = r1_nfc_bin[1:3, 1] - 1.96*r1_nfc_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_nfc_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Need for Closure", side = 2, padj = -5) # common y axis label
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Sample Size")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_nfc_bin[4:6, 1] + 1.96*r1_nfc_bin[4:6,2], 
+         y1 = r1_nfc_bin[4:6, 1] - 1.96*r1_nfc_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_nfc_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "Causality")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_nfc_bin[7:9, 1] + 1.96*r1_nfc_bin[7:9,2], 
+         y1 = r1_nfc_bin[7:9, 1] - 1.96*r1_nfc_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_nfc_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+
+# Trait Index Plot-----------------------------------------------------------------------
+
+#par(mfrow = c(2,3), pch = 16, mar = c(0,2,1,0), oma = c(.5,5,1,0))
+
+# E1
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_ti_bin[1:3, 1] + 1.96*r1_ti_bin[1:3,2], 
+         y1 = r1_ti_bin[1:3, 1] - 1.96*r1_ti_bin[1:3,2], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_ti_bin[1:3,1], cex = 2, pch = c(15, 16, 17))
+
+axis(2, at = seq(-.4, .4, .10)) # common y axis
+mtext(text = "Openness Index", side = 2, padj = -5) # common y axis label
+
+
+# E2ss
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_ti_bin[4:6, 1] + 1.96*r1_ti_bin[4:6,2], 
+         y1 = r1_ti_bin[4:6, 1] - 1.96*r1_ti_bin[4:6,2], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_ti_bin[4:6,1], cex = 2, pch = c(15, 16, 17))
+
+
+legend("bottom", legend = c("Issue Position", "Ideology" , "Party ID"),
+       pch = c(15, 16, 17),
+       cex =1.5, ncol = 3, xpd = NA)
+
+# E2cc
+plot(1,1, col = "white", bty = "n", xlim = c(.5,3.5), ylim = c(-.5,.5), 
+     xaxt = "n", yaxt = "n",
+     xlab = "", ylab = "", 
+     main = "")
+segments(x0 = .5, x1 = 3.5, 
+         y0 = 0, y1 = 0, 
+         col = "gray", lty = 2, lwd = 1)
+segments(x0 = 1:3, x1 = 1:3, 
+         y0 = r1_ti_bin[7:9, 1] + 1.96*r1_ti_bin[7:9,2], 
+         y1 = r1_ti_bin[7:9, 1] - 1.96*r1_ti_bin[7:9,2], 
+         lwd = 3, col = "gray40")
+points(1:3, r1_ti_bin[7:9,1], cex = 2, pch = c(15, 16, 17))
+
+dev.off()
+
## quartz_off_screen 
+##                 2
+
sink()
+
+
+ + + + +
+ + + + + + + + + + + + + + +