# fullPath = '/Users/elisa/Dropbox/recognition_heuristic/RH_confidence/dataCodesToShare'
# Load all "preprocessed" data
load(file = paste(fullPath, '/data/modelresults.Rda', sep=""))
load(file = paste(fullPath, '/data/modelresults_long.Rda', sep=""))
load(file = paste(fullPath, '/data/congruencyIndex.Rda', sep=""))
load(file = paste(fullPath, '/data/resultsRH_perSubj.Rda', sep=""))
load(file = paste(fullPath, '/data/resultsRH_perTrial.Rda', sep=""))
# This does the actual analyses 
# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

# fullPath = '/Users/elisa/Dropbox/recognition_heuristic/RH_confidence/dataCodesToShare'


library(lsr)        # compute effect sizes (cohensD) in ttests
library(R.matlab)
library(plyr)       # to summarize data and required before dplyr
library(dplyr)      # for renaming columns 
library(reshape)    # melt and cast functions
source(paste(fullPath,"/functions/as.data.frame.list.R", sep=""))
library(BayesFactor)

# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

1 Criterion knowledge

# Melt for ggplot2
criterionKnowledge_toPlot <- resultsRH.perSubj[,c("rankingRhoRecogOnly_pop", "rankingRhoRecogOnly_dist",
                                                                          "rankingRho_pop",              "rankingRho_dist")]
criterionKnowledge_toPlot <- melt(criterionKnowledge_toPlot)
## Using  as id variables
# Just some renaming for convenience 
# assign condition ID
criterionKnowledge_toPlot$condition[criterionKnowledge_toPlot$variable == "rankingRhoRecogOnly_pop"]    <- "pop"
criterionKnowledge_toPlot$condition[criterionKnowledge_toPlot$variable == "rankingRho_pop"]                 <- "pop"
criterionKnowledge_toPlot$condition[criterionKnowledge_toPlot$variable == "rankingRhoRecogOnly_dist"] <- "dist"
criterionKnowledge_toPlot$condition[criterionKnowledge_toPlot$variable == "rankingRho_dist"]                <- "dist"
criterionKnowledge_toPlot$condition <- as.factor(criterionKnowledge_toPlot$condition)
# assign "Recognized only" ID
criterionKnowledge_toPlot$Ronly[criterionKnowledge_toPlot$variable == "rankingRhoRecogOnly_pop"]        <- "Ronly"
criterionKnowledge_toPlot$Ronly[criterionKnowledge_toPlot$variable == "rankingRho_pop"]                     <- "allcities"
criterionKnowledge_toPlot$Ronly[criterionKnowledge_toPlot$variable == "rankingRhoRecogOnly_dist"]     <- "Ronly"
criterionKnowledge_toPlot$Ronly[criterionKnowledge_toPlot$variable == "rankingRho_dist"]                    <- "allcities"
criterionKnowledge_toPlot$Ronly <- as.factor(criterionKnowledge_toPlot$Ronly)

library(ggplot2)
ggplot(criterionKnowledge_toPlot, aes(x = condition, y = value, color = condition)) +
        geom_jitter() +
        facet_grid(. ~ Ronly) +
        ylab("criterionKnowledge_toPlot")

rhoCritKnow.mean <- aggregate(value ~ condition + Ronly, data = criterionKnowledge_toPlot, mean)
rhoCritKnow.mean
##   condition     Ronly     value
## 1      dist allcities 0.2009737
## 2       pop allcities 0.5074677
## 3      dist     Ronly 0.3965372
## 4       pop     Ronly 0.3839860
rhoCritKnow.sd   <- meanVals <- aggregate(value ~ condition + Ronly, data = criterionKnowledge_toPlot, sd)
rhoCritKnow.sd
##   condition     Ronly     value
## 1      dist allcities 0.3145244
## 2       pop allcities 0.1694908
## 3      dist     Ronly 0.4118157
## 4       pop     Ronly 0.3464002

1.1 Stats on criterion Knowledge

1.1.1 Permutation test for the recognized-only cities

library(exactRankTests)
perm.test(resultsRH.perSubj$rankingRhoRecogOnly_pop, resultsRH.perSubj$rankingRhoRecogOnly_dist, paired=TRUE, alternative="two.sided")
## 
##  Asymptotic 1-sample Permutation Test
## 
## data:  resultsRH.perSubj$rankingRhoRecogOnly_pop and resultsRH.perSubj$rankingRhoRecogOnly_dist
## T = 19.819, p-value = 0.8034
## alternative hypothesis: true mu is not equal to 0

1.1.2 BF

rhoCritKnow.bf = ttestBF(x = resultsRH.perSubj$rankingRhoRecogOnly_pop,
                         y = resultsRH.perSubj$rankingRhoRecogOnly_dist, paired=TRUE)
rhoCritKnow.bf
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 0.1146135 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS

1.1.3 Permutation test for the all cities

perm.test(resultsRH.perSubj$rankingRho_pop, resultsRH.perSubj$rankingRho_dist, paired=TRUE, alternative="two.sided")
## 
##  Asymptotic 1-sample Permutation Test
## 
## data:  resultsRH.perSubj$rankingRho_pop and resultsRH.perSubj$rankingRho_dist
## T = 33.4, p-value = 4.398e-11
## alternative hypothesis: true mu is not equal to 0

1.1.4 BF

rhoCritKnow.bf = ttestBF(x = resultsRH.perSubj$rankingRho_pop,
             y = resultsRH.perSubj$rankingRho_dist, paired=TRUE)
rhoCritKnow.bf
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 116860849049 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS
# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

2 Recognition Rate and Applicability of the RH

recognized.percent.mean <- mean(resultsRH.perSubj$totalRecognized/15*100)
recognized.percent.mean
## [1] 52.92929
recognized.percent.sd   <- sd(resultsRH.perSubj$totalRecognized/15*100)
recognized.percent.sd
## [1] 10.59749

2.1 Proportion of RU trials in the Comparative Judgment Task

resultsRH.perTrial.all$subject <- as.factor(resultsRH.perTrial.all$subject)
trialProportion <- resultsRH.perTrial.all[resultsRH.perTrial.all$condition==1,] %>% #only one condition is necessary. It's the same for both
    group_by(subject) %>% 
    summarise(
      UUtrials  = length(which(trialType==0)),
      RUtrials  = length(which(trialType==1)),
      RRtrials  = length(which(trialType==2))
      )
trialProportion <- as.data.frame(trialProportion)

# Check output: should all be 70 (sum all columns except subject)
all(rowSums(trialProportion[,!names(trialProportion) %in% c("subject")]) == 70)
## [1] TRUE
# Check output: should all be 100 (sum all columns except subject)
trialProportion.percentage <- trialProportion[,!names(trialProportion) %in% c("subject")]/70*100
all(round(rowSums(trialProportion.percentage[,!names(trialProportion.percentage) %in% c("subject")])) == 100)
## [1] TRUE
mean(trialProportion.percentage$RUtrials)
## [1] 51.13997
sd(trialProportion.percentage$RUtrials)
## [1] 6.404977

2.1.1 Plot all subjs’ trial proportions

trialProportion.sorted <- trialProportion[order(trialProportion$UUtrials),]
trialProportion.sorted$subj.sorted <- c(1:dim(trialProportion.sorted)[1])
trialProportion.sorted <- reshape2:::melt.data.frame(trialProportion.sorted,  id.vars=c("subj.sorted"),    #explicitly call melt from reshape2 otherwise coumns might not get properly named
                                                        measure.vars = c("UUtrials", "RUtrials", "RRtrials"),
                                                        variable.name = "trialType",
                                                        value.name = "trialCount")
ggplot(trialProportion.sorted, aes(x = subj.sorted, y = trialCount, fill=trialType)) +
    geom_bar(stat='identity')

# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

3 (Performance in the) Comparative Judgment Task

3.1 Population vs distance

# Group by subject
proportionCorrect.perSubj <- resultsRH.perTrial.all %>% 
    group_by(subject, condition) %>% 
    summarise(
      correct  = mean(correct, na.rm = TRUE)
      )

proportionCorrect.perSubj <- as.data.frame(proportionCorrect.perSubj)
proportionCorrect.perSubj$condition[proportionCorrect.perSubj$condition == 1] <- "pop"
proportionCorrect.perSubj$condition[proportionCorrect.perSubj$condition == 2] <- "dist"

proportionCorrect.perSubj.wide <- reshape(proportionCorrect.perSubj, timevar=c("condition"), 
                  idvar=c("subject"), dir="wide")

3.1.1 ttest

t.test(proportionCorrect.perSubj.wide$correct.pop, proportionCorrect.perSubj.wide$correct.dist, paired = TRUE)
## 
##  Paired t-test
## 
## data:  proportionCorrect.perSubj.wide$correct.pop and proportionCorrect.perSubj.wide$correct.dist
## t = 6.0944, df = 98, p-value = 2.169e-08
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.05060272 0.09946943
## sample estimates:
## mean of the differences 
##              0.07503608
cohensD(proportionCorrect.perSubj.wide$correct.pop, proportionCorrect.perSubj.wide$correct.dist, method="paired")
## [1] 0.6125102

3.1.2 BF

proportionCorrect.bf = ttestBF(x = proportionCorrect.perSubj.wide$correct.pop,
                               y = proportionCorrect.perSubj.wide$correct.dist, paired=TRUE)
proportionCorrect.bf
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 528342.4 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS

3.2 Compare population vs. distance

3.2.1 alphas

t.test(modelResults$alpha.1, modelResults$alpha.2, paired=TRUE, var.equal=TRUE)  #Because of renaming, 1 is population and 2 is distance
## 
##  Paired t-test
## 
## data:  modelResults$alpha.1 and modelResults$alpha.2
## t = 21.801, df = 98, p-value < 2.2e-16
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.2903534 0.3485072
## sample estimates:
## mean of the differences 
##               0.3194303
cohensD(modelResults$alpha.1, modelResults$alpha.2, method = "paired") 
## [1] 2.191062
ttestBF(x = modelResults$alpha.1, y = modelResults$alpha.2, paired=TRUE)
## t is large; approximation invoked.
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 1.018594e+36 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS

3.2.2 r parameters

t.test(modelResults$r_param.1, modelResults$r_param.2, paired=TRUE, var.equal=TRUE)
## 
##  Paired t-test
## 
## data:  modelResults$r_param.1 and modelResults$r_param.2
## t = 9.8546, df = 98, p-value = 2.503e-16
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.2796317 0.4206505
## sample estimates:
## mean of the differences 
##               0.3501411
cohensD(modelResults$r_param.1, modelResults$r_param.2, method = "paired") 
## [1] 0.9904274
ttestBF(x = modelResults$r_param.1, y = modelResults$r_param.2, paired=TRUE)
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 2.468208e+13 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS

3.2.3 b parameters

t.test(modelResults$b_param.1, modelResults$b_param.2, paired=TRUE, var.equal=TRUE)  #Because of renaming, 1 is population and 2 is distance
## 
##  Paired t-test
## 
## data:  modelResults$b_param.1 and modelResults$b_param.2
## t = -2.3179, df = 98, p-value = 0.02254
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.046820512 -0.003628337
## sample estimates:
## mean of the differences 
##             -0.02522442
cohensD(modelResults$b_param.1, modelResults$b_param.2, method = "paired") 
## [1] 0.2329553
# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

4 Formal Modeling: Adaptive Use of the RH

This section is not included here

# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

5 Metacognitive Sensitivity

Do a glm approach, as described my Siedlecka et al (2016, Front Psychol)

library(lme4)
library(lmerTest)

# Prepare the dataset
# Separate between RU+ and RU-
resultsRH.perTrial.all$trialType <- rowSums(cbind(resultsRH.perTrial.all$trialType,0.5 *resultsRH.perTrial.all$RU.Rchosen), na.rm=TRUE)
# Check output
unique(resultsRH.perTrial.all$trialType)
## [1] 1.5 0.5 2.0 0.0
# Set factors
resultsRH.perTrial.all$trialType <- as.factor(resultsRH.perTrial.all$trialType)
resultsRH.perTrial.all$condition <- as.factor(resultsRH.perTrial.all$condition)

# Add the sorted cityPair
# But be careful with this, because city pair will be (partially) confounded with condition
twoCities <- resultsRH.perTrial.all[,c("city1", "city2")]
twoCities <- as.data.frame(t(apply(twoCities, 1, sort))) #Sort the two cities in alphabetical order (so no matter how they were shown on the screen)
resultsRH.perTrial.all$cityPair <- as.factor(paste(twoCities$V1,twoCities$V2,sep=""))


# Get only RU+ trials 
myDataForglm.RU                   <- subset(resultsRH.perTrial.all, trialType %in% c("1.5")) 
myDataForglm.RU                   <- as.data.frame(myDataForglm.RU)
myDataForglm.RU$trialType         <- factor(myDataForglm.RU$trialType)         
myDataForglm.RU$choiceConf.scaled <- scale(myDataForglm.RU$choiceConf)

5.1 Do the models on RU+ trials only

glmres.RU.conf.full = glmer(correct ~ choiceConf.scaled * condition + (1 + choiceConf.scaled|subject)  + (1|cityPair), data = myDataForglm.RU, family = binomial)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control
## $checkConv, : Model failed to converge with max|grad| = 0.0011756 (tol =
## 0.001, component 1)
glmres.RU.conf.sum  = glmer(correct ~ choiceConf.scaled + condition + (1 + choiceConf.scaled|subject)  + (1|cityPair), data = myDataForglm.RU, family = binomial)

# The full model fails to converge but the subsequent tests show that the obtained result is actually reasonable
source(system.file("utils", "allFit.R", package="lme4"))
## Loading required package: optimx
## Warning in library(package, lib.loc = lib.loc, character.only = TRUE,
## logical.return = TRUE, : there is no package called 'optimx'
## Loading required package: dfoptim
## Warning in library(package, lib.loc = lib.loc, character.only = TRUE,
## logical.return = TRUE, : there is no package called 'dfoptim'
fm1.all <- allFit(glmres.RU.conf.full)
## bobyqa : [OK]
## Nelder_Mead :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control
## $checkConv, : Model failed to converge with max|grad| = 0.00587067 (tol =
## 0.001, component 1)
## [OK]
## nlminbw : [OK]
## optimx.L-BFGS-B : [OK]
## nloptwrap.NLOPT_LN_NELDERMEAD :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control
## $checkConv, : unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control
## $checkConv, : Model failed to converge: degenerate Hessian with 1 negative
## eigenvalues
## [OK]
## nloptwrap.NLOPT_LN_BOBYQA :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control
## $checkConv, : unable to evaluate scaled gradient

## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control
## $checkConv, : Model failed to converge: degenerate Hessian with 1 negative
## eigenvalues
## [OK]
ss <- summary(fm1.all)
ss$fixef
##                               (Intercept) choiceConf.scaled condition2
## bobyqa                           2.220025        0.09826015  -1.981727
## Nelder_Mead                      2.220102        0.09819507  -1.981851
## nlminbw                          2.220017        0.09825738  -1.981732
## nloptwrap.NLOPT_LN_NELDERMEAD    2.218147        0.09688806  -1.980510
## nloptwrap.NLOPT_LN_BOBYQA        2.218147        0.09688806  -1.980510
##                               choiceConf.scaled:condition2
## bobyqa                                          0.03323535
## Nelder_Mead                                     0.03337921
## nlminbw                                         0.03324165
## nloptwrap.NLOPT_LN_NELDERMEAD                   0.03508398
## nloptwrap.NLOPT_LN_BOBYQA                       0.03508398

5.1.1 χ^2

interactionModelComp <- anova(glmres.RU.conf.full,glmres.RU.conf.sum) 
interactionModelComp
## Data: myDataForglm.RU
## Models:
## glmres.RU.conf.sum: correct ~ choiceConf.scaled + condition + (1 + choiceConf.scaled | 
## glmres.RU.conf.sum:     subject) + (1 | cityPair)
## glmres.RU.conf.full: correct ~ choiceConf.scaled * condition + (1 + choiceConf.scaled | 
## glmres.RU.conf.full:     subject) + (1 | cityPair)
##                     Df    AIC    BIC  logLik deviance  Chisq Chi Df
## glmres.RU.conf.sum   7 4446.7 4492.8 -2216.4   4432.7              
## glmres.RU.conf.full  8 4448.6 4501.3 -2216.3   4432.6 0.1682      1
##                     Pr(>Chisq)
## glmres.RU.conf.sum            
## glmres.RU.conf.full     0.6817

5.1.2 BF10

interactionModelComp.BF10 = 1/exp((interactionModelComp[3][[1]][2] - interactionModelComp[3][[1]][1])/2) #BF10
interactionModelComp.BF10
## [1] 0.01486311
# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

6 Individual confidence ratings

meanConfPerSubj <- ddply(resultsRH.perTrial.all, c("trialType", "condition", "subject"), summarise,
                          meanConf = mean(choiceConf)
                        )
meanConfPerSubj.RUplus      <- subset(meanConfPerSubj, trialType %in% c(1.5))
meanConfPerSubj.RUplus.pop  <- subset(meanConfPerSubj.RUplus, condition %in% c(1)) 
meanConfPerSubj.RUplus.dist <- subset(meanConfPerSubj.RUplus, condition %in% c(2)) 

6.1 α and mean confidence in RU+

forBF.alphaVsConf <- data.frame(alpha.pop            = modelResults$alpha.1,
                                alpha.dist           = modelResults$alpha.2,
                                meanConf.RUplus.pop  = meanConfPerSubj.RUplus.pop$meanConf,
                                meanConf.RUplus.dist = meanConfPerSubj.RUplus.dist$meanConf,
                                ID                   = modelResults$ID)

# ### population
cor.test(forBF.alphaVsConf$alpha.pop, forBF.alphaVsConf$meanConf.RUplus.pop)
## 
##  Pearson's product-moment correlation
## 
## data:  forBF.alphaVsConf$alpha.pop and forBF.alphaVsConf$meanConf.RUplus.pop
## t = -0.43921, df = 97, p-value = 0.6615
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.2398527  0.1542178
## sample estimates:
##         cor 
## -0.04455035
lmBF(meanConf.RUplus.pop ~ alpha.pop + ID, data = forBF.alphaVsConf)
## Bayes factor analysis
## --------------
## [1] alpha.pop + ID : 0.2085871 ±0.01%
## 
## Against denominator:
##   Intercept only 
## ---
## Bayes factor type: BFlinearModel, JZS
# ### distance
cor.test(forBF.alphaVsConf$alpha.dist, forBF.alphaVsConf$meanConf.RUplus.dist)
## 
##  Pearson's product-moment correlation
## 
## data:  forBF.alphaVsConf$alpha.dist and forBF.alphaVsConf$meanConf.RUplus.dist
## t = -0.8309, df = 97, p-value = 0.4081
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.2768834  0.1152579
## sample estimates:
##         cor 
## -0.08406671
lmBF(meanConf.RUplus.dist ~ alpha.dist + ID, data = forBF.alphaVsConf)
## Bayes factor analysis
## --------------
## [1] alpha.dist + ID : 0.228469 ±0.01%
## 
## Against denominator:
##   Intercept only 
## ---
## Bayes factor type: BFlinearModel, JZS

6.2 Correlation between Δconfidence and Δα: (∆pop-dist)

RU.Delta <- data.frame(meanConf_RH = meanConfPerSubj.RUplus.pop$meanConf - meanConfPerSubj.RUplus.dist$meanConf,
                       alpha       = modelResults$alpha.1                - modelResults$alpha.2,
                       ID          = modelResults$ID)

cor.test(RU.Delta$alpha, RU.Delta$meanConf_RH)
## 
##  Pearson's product-moment correlation
## 
## data:  RU.Delta$alpha and RU.Delta$meanConf_RH
## t = -1.1791, df = 97, p-value = 0.2412
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.30903261  0.08042637
## sample estimates:
##        cor 
## -0.1188728
# plot(RU.Delta$alpha, RU.Delta$meanConf_RH)
lmBF(meanConf_RH ~ alpha + ID, data = RU.Delta)
## Bayes factor analysis
## --------------
## [1] alpha + ID : 0.1202651 ±0.01%
## 
## Against denominator:
##   Intercept only 
## ---
## Bayes factor type: BFlinearModel, JZS
# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

7 Mean confidence ratings

7.1 ANOVA with all trial types: RU+, RU- (4x2)

3x2 anova on mean confidence with all trial types (RR, RUplus, RUminus, UU) * condition

#  Get means
myDataForglm.RUp.RUm.RR.UU                   <- subset(resultsRH.perTrial.all, trialType %in% c("0", "0.5", "1.5", "2")) 
myDataForglm.RUp.RUm.RR.UU                   <- as.data.frame(myDataForglm.RUp.RUm.RR.UU)
myDataForglm.RUp.RUm.RR.UU$trialType         <- factor(myDataForglm.RUp.RUm.RR.UU$trialType)        
myDataForglm.RUp.RUm.RR.UU$choiceConf.scaled <- scale(myDataForglm.RUp.RUm.RR.UU$choiceConf)
myDataForglm.RUp.RUm.RR.UU.mean              <- aggregate(choiceConf ~ subject + condition + trialType, myDataForglm.RUp.RUm.RR.UU, mean)


#Now look for subjects that did not have any RU- or no UU trials and remove them
yesUU.pop   <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 0   & myDataForglm.RUp.RUm.RR.UU.mean$condition == 1])
yesUU.dist  <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 0   & myDataForglm.RUp.RUm.RR.UU.mean$condition == 2])
yesRUm.pop  <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 0.5 & myDataForglm.RUp.RUm.RR.UU.mean$condition == 1])
yesRUm.dist <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 0.5 & myDataForglm.RUp.RUm.RR.UU.mean$condition == 2])
yesRUp.pop  <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 1.5 & myDataForglm.RUp.RUm.RR.UU.mean$condition == 1])
yesRUp.dist <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 1.5 & myDataForglm.RUp.RUm.RR.UU.mean$condition == 2])
yesRR       <- unique(myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$trialType == 2])
noUU.pop    <- as.numeric(setdiff(yesRR,yesUU.pop))
noUU.dist   <- as.numeric(setdiff(yesRR,yesUU.dist))
noRUm.pop   <- as.numeric(setdiff(yesRR,yesRUm.pop))
noRUm.dist  <- as.numeric(setdiff(yesRR,yesRUm.dist))
noRUp.pop   <- as.numeric(setdiff(yesRR,yesRUp.pop))
noRUp.dist  <- as.numeric(setdiff(yesRR,yesRUp.dist))
missings <- unique(c(noUU.pop, noUU.dist, noRUm.pop, noRUm.dist, noRUp.pop, noRUp.dist))
myDataForglm.RUp.RUm.RR.UU.mean$subject[myDataForglm.RUp.RUm.RR.UU.mean$subject %in% missings]  <- NA
myDataForglm.RUp.RUm.RR.UU.mean <- myDataForglm.RUp.RUm.RR.UU.mean[complete.cases(myDataForglm.RUp.RUm.RR.UU.mean),]

factornames <- c("subject", "condition", "trialType")
myDataForglm.RUp.RUm.RR.UU.mean[factornames] <- lapply(myDataForglm.RUp.RUm.RR.UU.mean[factornames], as.factor) 

fit.meanConfs.RUp.RUm.RR.UU <- aov(
            choiceConf ~ (condition * trialType) + 
            Error(subject/(condition * trialType)),
            data = myDataForglm.RUp.RUm.RR.UU.mean)
summary(fit.meanConfs.RUp.RUm.RR.UU)
## 
## Error: subject
##           Df Sum Sq Mean Sq F value Pr(>F)
## Residuals 72 133867    1859               
## 
## Error: subject:condition
##           Df Sum Sq Mean Sq F value Pr(>F)
## condition  1    451   451.3   1.933  0.169
## Residuals 72  16807   233.4               
## 
## Error: subject:trialType
##            Df Sum Sq Mean Sq F value Pr(>F)    
## trialType   3  73967   24656   140.2 <2e-16 ***
## Residuals 216  37982     176                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subject:condition:trialType
##                      Df Sum Sq Mean Sq F value   Pr(>F)    
## condition:trialType   3   2018   672.6   8.521 2.25e-05 ***
## Residuals           216  17050    78.9                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

7.1.1 ANOVA effect sizes

library(DescTools)
EtaSq(fit.meanConfs.RUp.RUm.RR.UU, type=1, anova=TRUE)
##                          eta.sq eta.sq.part  eta.sq.gen         SS df
## condition           0.001599424  0.02614759 0.002188932   451.2662  1
## trialType           0.262161654  0.66071952 0.264476067 73967.0777  3
## condition:trialType 0.007151512  0.10581880 0.009713575  2017.7491  3
##                             MS      SSE dfE          F            p
## condition             451.2662 16807.16  72   1.933174 1.686947e-01
## trialType           24655.6926 37982.21 216 140.213798 1.914508e-50
## condition:trialType   672.5830 17050.22 216   8.520592 2.253529e-05

7.2 ANOVA with all trial types: RU regardless of +/- (3x2)

3x2 anova on mean confidence with all trial types (RR, RU, UU) * condition

#  Get means
myDataForglm.RU.RR.UU                   <- subset(resultsRH.perTrial.all, trialType %in% c("0", "1.5", "2")) 
myDataForglm.RU.RR.UU                   <- as.data.frame(myDataForglm.RU.RR.UU)
myDataForglm.RU.RR.UU$trialType         <- factor(myDataForglm.RU.RR.UU$trialType)        
myDataForglm.RU.RR.UU$choiceConf.scaled <- scale(myDataForglm.RU.RR.UU$choiceConf)

7.3 ANOVA only with RR, RU+

myDataForglm.RU.RR.UU.mean <- aggregate(choiceConf ~ subject + condition + trialType, myDataForglm.RU.RR.UU, mean)

#controlcount <- aggregate(choiceConf ~ subject + condition + trialType, myDataForglm.RU.RR.UU, length)
#Subject 45 did not have any UU trials in one of the conditions. So I remove it 

myDataForglm.RU.RR.UU.mean$choiceConf[myDataForglm.RU.RR.UU.mean$subject == 45] <- NA 
myDataForglm.RU.RR.UU.mean <- myDataForglm.RU.RR.UU.mean[complete.cases(myDataForglm.RU.RR.UU.mean),]
factornames <- c("subject", "condition", "trialType")
myDataForglm.RU.RR.UU.mean[factornames] <- lapply(myDataForglm.RU.RR.UU.mean[factornames], as.factor) 

fit.meanConfs.RU.RR.UU <- aov(
            choiceConf ~ (condition * trialType) + 
            Error(subject/(condition * trialType)),
            data = myDataForglm.RU.RR.UU.mean)
summary(fit.meanConfs.RU.RR.UU)
## 
## Error: subject
##           Df Sum Sq Mean Sq F value Pr(>F)
## Residuals 97 133570    1377               
## 
## Error: subject:condition
##           Df Sum Sq Mean Sq F value  Pr(>F)   
## condition  1   1829  1828.9   9.215 0.00308 **
## Residuals 97  19251   198.5                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subject:trialType
##            Df Sum Sq Mean Sq F value Pr(>F)    
## trialType   2  97634   48817   226.5 <2e-16 ***
## Residuals 194  41812     216                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subject:condition:trialType
##                      Df Sum Sq Mean Sq F value   Pr(>F)    
## condition:trialType   2   2868  1434.1   17.64 9.18e-08 ***
## Residuals           194  15776    81.3                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

7.3.1 ANOVA effect sizes

library(DescTools)
EtaSq(fit.meanConfs.RU.RR.UU, type=1, anova=TRUE)
##                          eta.sq eta.sq.part  eta.sq.gen        SS df
## condition           0.005847865  0.08675896 0.008617045  1828.867  1
## trialType           0.312189610  0.70015676 0.316949880 97634.457  2
## condition:trialType 0.009171135  0.15383664 0.013448151  2868.189  2
##                            MS      SSE dfE          F            p
## condition            1828.867 19250.99  97   9.215113 3.081862e-03
## trialType           48817.229 41812.11 194 226.502374 1.814443e-51
## condition:trialType  1434.094 15776.19 194  17.635074 9.184577e-08

2x2 anova on mean confidence with trial type (RU+ vs RR) * condition

#  Get means
# myDataForglm.RU.RR                   <- subset(resultsRH.perTrial.all, trialType %in% c("1.5", "2")) 
# myDataForglm.RU.RR                   <- as.data.frame(myDataForglm.RU.RR)
# myDataForglm.RU.RR$trialType         <- factor(myDataForglm.RU.RR$trialType)        
# myDataForglm.RU.RR$choiceConf.scaled <- scale(myDataForglm.RU.RR$choiceConf)

# #' ## Anova
# myDataForglm.RU.RR.mean <- aggregate(choiceConf ~ subject + condition + trialType, myDataForglm.RU.RR, mean)
# factornames <- c("subject", "condition", "trialType")
# myDataForglm.RU.RR.mean[factornames] <- lapply(myDataForglm.RU.RR.mean[factornames], as.factor) 

# fit.meanConfs <- aov(
#             choiceConf ~ (condition * trialType) + 
#             Error(subject/(condition * trialType)),
#             data = myDataForglm.RU.RR.mean)
# summary(fit.meanConfs)

# #' ### ANOVA effect sizes
# library(DescTools)
# EtaSq(fit.meanConfs, type=1, anova=TRUE)

# # bayesian anova
# # anovaBF(choiceConf ~ condition * trialType + subject, data = myDataForglm.RU.RR.mean,  whichRandom="subject")

# #' ### BF for interaction
# choiceConf.sum.bf  = lmBF(choiceConf ~ condition + trialType + subject,                       data=myDataForglm.RU.RR.mean,  whichRandom="subject")
# choiceConf.full.bf = lmBF(choiceConf ~ condition + trialType + condition:trialType + subject, data=myDataForglm.RU.RR.mean,  whichRandom="subject")

# # Compare the two models
# bf.condXtrialType = choiceConf.full.bf / choiceConf.sum.bf
# bf.condXtrialType


# #' ## Follow-up t-tests
# # Get each condition (probably could have been done with aggregate)
# RU.pop.means  <- myDataForglm.RU.RR.mean[myDataForglm.RU.RR.mean$trialType=="1.5" & myDataForglm.RU.RR.mean$condition=="1",]
# RR.pop.means  <- myDataForglm.RU.RR.mean[myDataForglm.RU.RR.mean$trialType=="2"   & myDataForglm.RU.RR.mean$condition=="1",]
# RU.dist.means <- myDataForglm.RU.RR.mean[myDataForglm.RU.RR.mean$trialType=="1.5" & myDataForglm.RU.RR.mean$condition=="2",]
# RR.dist.means <- myDataForglm.RU.RR.mean[myDataForglm.RU.RR.mean$trialType=="2"   & myDataForglm.RU.RR.mean$condition=="2",]




RR.pop.means     <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="2"   & myDataForglm.RUp.RUm.RR.UU.mean$condition=="1",]
RR.dist.means    <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="2"   & myDataForglm.RUp.RUm.RR.UU.mean$condition=="2",]
RUplu.pop.means  <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="1.5" & myDataForglm.RUp.RUm.RR.UU.mean$condition=="1",]
RUplu.dist.means <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="1.5" & myDataForglm.RUp.RUm.RR.UU.mean$condition=="2",]
RUmin.pop.means  <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="0.5" & myDataForglm.RUp.RUm.RR.UU.mean$condition=="1",]
RUmin.dist.means <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="0.5" & myDataForglm.RUp.RUm.RR.UU.mean$condition=="2",]
UU.pop.means     <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="0"   & myDataForglm.RUp.RUm.RR.UU.mean$condition=="1",]
UU.dist.means    <- myDataForglm.RUp.RUm.RR.UU.mean[myDataForglm.RUp.RUm.RR.UU.mean$trialType=="0"   & myDataForglm.RUp.RUm.RR.UU.mean$condition=="2",]

7.3.2 RR trials

t.test(RR.pop.means$choiceConf, RR.dist.means$choiceConf, paired = TRUE)
## 
##  Paired t-test
## 
## data:  RR.pop.means$choiceConf and RR.dist.means$choiceConf
## t = 0.14804, df = 72, p-value = 0.8827
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -3.234317  3.753221
## sample estimates:
## mean of the differences 
##               0.2594522
cohensD(RR.pop.means$choiceConf, RR.dist.means$choiceConf, method="paired")
## [1] 0.01732647
ttestBF(x = RR.pop.means$choiceConf, y = RR.dist.means$choiceConf, paired=TRUE)
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 0.1300791 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS
mean(RR.pop.means$choiceConf - RR.dist.means$choiceConf)
## [1] 0.2594522
sd(RR.pop.means$choiceConf - RR.dist.means$choiceConf)
## [1] 14.97433

7.3.3 RU+ trials

t.test(RUplu.pop.means$choiceConf, RUplu.dist.means$choiceConf, paired = TRUE)
## 
##  Paired t-test
## 
## data:  RUplu.pop.means$choiceConf and RUplu.dist.means$choiceConf
## t = 4.3846, df = 72, p-value = 3.89e-05
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##   4.191171 11.179312
## sample estimates:
## mean of the differences 
##                7.685241
cohensD(RUplu.pop.means$choiceConf, RUplu.dist.means$choiceConf, method="paired")
## [1] 0.5131836
ttestBF(x = RUplu.pop.means$choiceConf, y = RUplu.dist.means$choiceConf, paired=TRUE)
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 490.1053 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS
mean(RUplu.pop.means$choiceConf - RUplu.dist.means$choiceConf)
## [1] 7.685241
sd(RUplu.pop.means$choiceConf - RUplu.dist.means$choiceConf)
## [1] 14.97562

7.3.4 RU- trials

t.test(RUmin.pop.means$choiceConf, RUmin.dist.means$choiceConf, paired = TRUE)
## 
##  Paired t-test
## 
## data:  RUmin.pop.means$choiceConf and RUmin.dist.means$choiceConf
## t = -1.2424, df = 72, p-value = 0.2181
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -6.429032  1.492301
## sample estimates:
## mean of the differences 
##               -2.468366
cohensD(RUmin.pop.means$choiceConf, RUmin.dist.means$choiceConf, method="paired")
## [1] 0.1454079
ttestBF(x = RUmin.pop.means$choiceConf, y = RUmin.dist.means$choiceConf, paired=TRUE)
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 0.2689251 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS
mean(RUmin.pop.means$choiceConf - RUmin.dist.means$choiceConf)
## [1] -2.468366
sd(RUmin.pop.means$choiceConf - RUmin.dist.means$choiceConf)
## [1] 16.97546

7.3.5 UU trials

t.test(UU.pop.means$choiceConf, UU.dist.means$choiceConf, paired = TRUE)
## 
##  Paired t-test
## 
## data:  UU.pop.means$choiceConf and UU.dist.means$choiceConf
## t = 0.93123, df = 72, p-value = 0.3548
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -1.774927  4.886955
## sample estimates:
## mean of the differences 
##                1.556014
cohensD(UU.pop.means$choiceConf, UU.dist.means$choiceConf, method="paired")
## [1] 0.1089917
ttestBF(x = UU.pop.means$choiceConf, y = UU.dist.means$choiceConf, paired=TRUE)
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 0.195085 ±0%
## 
## Against denominator:
##   Null, mu = 0 
## ---
## Bayes factor type: BFoneSample, JZS
mean(UU.pop.means$choiceConf - UU.dist.means$choiceConf)
## [1] 1.556014
sd(UU.pop.means$choiceConf - UU.dist.means$choiceConf)
## [1] 14.27644

7.3.6 Difference (between trial types) of differences (between conditions)

# t.test((RU.pop.means$choiceConf - RU.dist.means$choiceConf), (RR.pop.means$choiceConf - RR.dist.means$choiceConf), paired = TRUE)
# cohensD((RU.pop.means$choiceConf - RU.dist.means$choiceConf), (RR.pop.means$choiceConf - RR.dist.means$choiceConf), method="paired")
# ttestBF(x = (RU.pop.means$choiceConf - RU.dist.means$choiceConf), y = (RR.pop.means$choiceConf - RR.dist.means$choiceConf), paired=TRUE)
# mean((RU.pop.means$choiceConf - RU.dist.means$choiceConf) - (RR.pop.means$choiceConf - RR.dist.means$choiceConf))
# sd((RU.pop.means$choiceConf - RU.dist.means$choiceConf) - (RR.pop.means$choiceConf - RR.dist.means$choiceConf))



# # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

8 Table 2

8.0.1 Proportion correct

mean(proportionCorrect.perSubj.wide$correct.pop)
## [1] 0.678355
mean(proportionCorrect.perSubj.wide$correct.dist)
## [1] 0.6033189
sd(proportionCorrect.perSubj.wide$correct.pop)
## [1] 0.07628842
sd(proportionCorrect.perSubj.wide$correct.dist)
## [1] 0.09821031

8.0.2 RH accordance rate

RHaccordance <- ddply(resultsRH.perTrial.all, c("condition", "subject"), summarise,
                          RUplus = length(which(resultsRH.perTrial.all$trialType == "1.5"))
                        )


RHaccordance <- resultsRH.perTrial.all %>% 
    group_by(subject, condition) %>%
    summarise(
      RUplus  = sum(trialType=="1.5"),
      RUminus = sum(trialType=="0.5"),
      RUrate = sum(trialType=="1.5")/(sum(trialType=="1.5") + sum(trialType=="0.5"))
      )
RHaccordance <- as.data.frame(RHaccordance)

mean(RHaccordance$RUrate[RHaccordance$condition == 1])
## [1] 0.8669836
mean(RHaccordance$RUrate[RHaccordance$condition == 2])
## [1] 0.6405639
sd(RHaccordance$RUrate[RHaccordance$condition == 1])
## [1] 0.1507557
sd(RHaccordance$RUrate[RHaccordance$condition == 2])
## [1] 0.2288616

8.0.3 α

mean(modelResults$alpha.1)
## [1] 0.7700849
mean(modelResults$alpha.2)
## [1] 0.4506546
sd(modelResults$alpha.1)
## [1] 0.07753498
sd(modelResults$alpha.2)
## [1] 0.09539329

8.0.4 RTs

# aggregate for each individual (1st level)
meanRTsindividual <- resultsRH.perTrial.all %>% 
    group_by(subject, condition, trialType) %>%
    summarise(
      RTs  = mean(RT)
      )
meanRTsindividual <- as.data.frame(meanRTsindividual)

# aggregate for each condition (2nd level)
meanRTs <- meanRTsindividual %>% 
    group_by(condition, trialType) %>%
    summarise(
      RTs  = mean(RTs)
      )
meanRTs <- as.data.frame(meanRTs)
meanRTs
##   condition trialType      RTs
## 1         1         0 2420.754
## 2         1       0.5 2770.030
## 3         1       1.5 2222.181
## 4         1         2 2504.552
## 5         2         0 3122.385
## 6         2       0.5 3505.465
## 7         2       1.5 3453.227
## 8         2         2 3410.371
stdRTs <- meanRTsindividual %>% 
    group_by(condition, trialType) %>%
    summarise(
      SDs  = sd(RTs)
      )
stdRTs <- as.data.frame(stdRTs)
stdRTs
##   condition trialType      SDs
## 1         1         0 1645.168
## 2         1       0.5 3425.110
## 3         1       1.5 1940.141
## 4         1         2 2351.507
## 5         2         0 3269.775
## 6         2       0.5 6091.179
## 7         2       1.5 6279.167
## 8         2         2 4434.319

8.0.5 meanConfs

# 1st level
meanCONFsindividual <- resultsRH.perTrial.all %>% 
    group_by(subject, condition, trialType) %>%
    summarise(
      CONF  = mean(choiceConf)
      )
meanCONFsindividual <- as.data.frame(meanCONFsindividual)

# 2nd level
meanCONFs <- meanCONFsindividual %>% 
    group_by(condition, trialType) %>%
    summarise(
      meanCONFs  = mean(CONF)
      )
meanCONFs <- as.data.frame(meanCONFs)
meanCONFs
##   condition trialType meanCONFs
## 1         1         0  25.26797
## 2         1       0.5  32.30792
## 3         1       1.5  53.25745
## 4         1         2  55.15932
## 5         2         0  24.75708
## 6         2       0.5  33.13011
## 7         2       1.5  43.60824
## 8         2         2  54.92648
stdCONFs <- meanCONFsindividual %>% 
    group_by(condition, trialType) %>%
    summarise(
      stdCONFs  = sd(CONF)
      )
stdCONFs <- as.data.frame(stdCONFs)
stdCONFs
##   condition trialType stdCONFs
## 1         1         0 18.08790
## 2         1       0.5 21.45722
## 3         1       1.5 18.58001
## 4         1         2 17.40557
## 5         2         0 18.62190
## 6         2       0.5 19.15121
## 7         2       1.5 20.84626
## 8         2         2 19.95564
# rm(list=ls())
library(ggplot2)
library(dplyr)

resultsRH.perTrial.all$subject <- as.factor(resultsRH.perTrial.all$subject)

# explore main effects of confidence for different trialTypes
if (1 %in% unique(resultsRH.perTrial.all$trialType)){
    resultsRH.perTrial.all$trialType <- rowSums(cbind(resultsRH.perTrial.all$trialType,0.5 *resultsRH.perTrial.all$RU.Rchosen), na.rm=TRUE)  
  }

subj_meanConfs = aggregate(choiceConf ~ trialType + condition + subject, resultsRH.perTrial.all, mean, drop=TRUE)
# subj_meanConfs <- subset(subj_meanConfs, trialType %in% c("1.5", "2"))


resultsRH.perTrial.all.tosummarize            <- resultsRH.perTrial.all[,c("choiceConf", "trialType", "condition")]
resultsRH.perTrial.all.tosummarize$trialType  <- as.factor(resultsRH.perTrial.all.tosummarize$trialType)
resultsRH.perTrial.all.tosummarize$condition  <- as.factor(resultsRH.perTrial.all.tosummarize$condition)
resultsRH.perTrial.all.tosummarize$choiceConf <- as.numeric(resultsRH.perTrial.all.tosummarize$choiceConf)

# Get densities by hand for the split violin plots 
# you need library(dplyr for this)
group_confDensity <- subj_meanConfs %>%                                              #resultsRH.perTrial.all.tosummarize %>% 
          group_by(trialType, condition) %>%
          do(data.frame(loc  = density(.$choiceConf)$x,                              #bw="nrd0", adjust = .2
                        dens = density(.$choiceConf)$y))

group_confStats <- resultsRH.perTrial.all.tosummarize %>% 
          group_by(trialType, condition) %>%
            summarize(conf_mean = mean(choiceConf),
                conf_se = sqrt(var(choiceConf)/length(choiceConf)))

temp_0 = NULL
for (trial in unique(group_confDensity$trialType)) {
  for (cond in unique(group_confDensity$condition) ) {
    temp = subset(group_confDensity, trialType == trial & condition == cond)
    temp$conf_mean = subset( group_confStats, condition == cond & trialType == trial)$conf_mean
    temp$conf_se = subset( group_confStats, condition == cond & trialType == trial)$conf_se
    temp_0 = rbind(temp_0,temp)
  } 
}

group_confDensity = temp_0

group_confDensity$dens <- ifelse(group_confDensity$condition == "1", group_confDensity$dens * -1, group_confDensity$dens)

group_confDensity$dens[group_confDensity$trialType=="0"]   <- 10*group_confDensity$dens[group_confDensity$trialType=="0"] 
group_confDensity$dens[group_confDensity$trialType=="0.5"] <- 10*group_confDensity$dens[group_confDensity$trialType=="0.5"] + 1
group_confDensity$dens[group_confDensity$trialType=="1.5"] <- 10*group_confDensity$dens[group_confDensity$trialType=="1.5"] + 2
group_confDensity$dens[group_confDensity$trialType=="2"]   <- 10*group_confDensity$dens[group_confDensity$trialType=="2"]   + 3

group_confDensity <- as.data.frame(group_confDensity)

subj_meanConfs$x[subj_meanConfs$trialType=="0"]     <- 0
subj_meanConfs$x[subj_meanConfs$trialType=="0.5"]   <- 1
subj_meanConfs$x[subj_meanConfs$trialType=="1.5"]   <- 2
subj_meanConfs$x[subj_meanConfs$trialType=="2"]     <- 3


subj_meanConfs$x         <- as.numeric(subj_meanConfs$x)
subj_meanConfs$trialType <- as.factor(subj_meanConfs$trialType)
subj_meanConfs$condition <- as.factor(subj_meanConfs$condition)


save(group_confDensity, file = paste(fullPath,'/data/toPlot_group_confDensity.Rda', sep=""))
save(subj_meanConfs,    file = paste(fullPath,'/data/toPlot_subj_meanConfs.Rda', sep=""))

8.1 Plot split violin plots + scatter

p_splitViolins <- ggplot(group_confDensity, aes(dens, loc, fill = condition, group = interaction(condition, trialType))) + 
  geom_polygon() +
  scale_x_continuous(breaks = 0:3, labels = c('UU trials', 'RU(U) trials', 'RU(R) trials', 'RR trials')) +
  # scale_x_continuous(breaks = 2:3, labels = c('RU+ trials', 'RR trials')) +
  #scale_fill_brewer(palette = "Greys", guide=FALSE) +
  scale_fill_manual(values = c("#909090", "#A0A0A0"), guide=FALSE) +
  ylab('density') +
  theme(axis.title.x = element_blank()) +
  geom_jitter(data = subj_meanConfs,
        aes(x, y = choiceConf, shape = condition, group = interaction(condition, trialType)),
        alpha = 0.5, size = 1, #shape = 21, #colour = "black", 
        position = position_jitterdodge(), #width = 0.25, 
        show.legend = FALSE) + 
  scale_shape_manual(name = "Condition", values=c(19,1), labels = c("Population", "Distance")) +  
  # scale_fill_discrete(name="Condition",
  #                  labels=c("Population", "Distance")) +
  xlab("Trial type") +
  ylab("Mean Individual Confidence")  +
  annotate("segment", x = 1.75, y = 100, xend  = 2.25,  yend = 100) +
  annotate("text",    x = 2,    y = 101, label = "*",   size = 7)  +
  annotate("segment", x = 2,    y = 110, xend  = 3,     yend = 110) +
  annotate("text",    x = 2.5,  y = 111, label = "*",   size = 7)  +
  ggtitle("D.                                                           ")
# fullPath = '/Users/elisa/Dropbox/recognition_heuristic/RH_confidence/dataCodesToShare'


library(ggplot2)
library(BayesFactor)
library(gridExtra)


source(paste(fullPath, "/functions/multiplot.R", sep=""))

load(paste(fullPath,'/data/modelresults.Rda', sep=""))
load(paste(fullPath,'/data/modelresults_long.Rda', sep=""))

REMEMBER: The stats are all done at the modelling level. Here we just plot r and alpha

# Get Δr
modelResults$Delta.r <- modelResults$r_param.1 - modelResults$r_param.2 

modelResults$Delta.r.sorted <- sort(modelResults$Delta.r)
modelResults$sortedOrder    <- c(1:length(modelResults$ID))
modelResults$positiveDelta.r[modelResults$Delta.r.sorted > 0] <- 1
modelResults$positiveDelta.r[modelResults$Delta.r.sorted < 0] <- 0
modelResults$positiveDelta.r <- as.factor(modelResults$positiveDelta.r)

#p1 <- ggplot(data = modelResults, aes(x = sortedOrder, y = Delta.r.sorted, fill = positiveDelta.r)) +
p_deltaR <- ggplot(data = modelResults, aes(x = sortedOrder, y = Delta.r.sorted)) +
        geom_bar(stat="identity") +
        coord_flip() + 
        #scale_fill_manual(values = c("blue", "red")) +
        theme(legend.position="none") +
        labs(x = "Participant index (sorted)", y = expression(Delta*r)) +
        ggtitle("A.                          ")


p_alphaVsR <- ggplot(modelResults.long, aes(x = alpha, y = r_param)) +
                    geom_point(aes(shape=factor(condition)), alpha = 1) +
                    scale_shape_manual(
                        name = "Condition",
                        values = c(19,1),
                        labels = c("Population", "Distance")) + 
                    scale_alpha_manual(values = c(0.4, 1)) +
                    xlim(0, 1) +
                    labs(x = expression(alpha), y = "r parameter") +
                    #theme(legend.position="none") +
                    # coord_fixed() +                                                                   #force square plot
                    # theme(legend.position=c(.85,.85),
                    ggtitle("B.                                                         ")

Maybe we’d like to plot ΔmeanConf vs Δalpha here.

# Plot both with custom arrangement
grid.arrange(p_deltaR, p_alphaVsR, widths=c(0.3, 0.7), ncol=2)

8.1.1 Get and plot mean confidence vs α in each condition

# rm(list = ls())

# fullPath = '/Users/elisa/Dropbox/recognition_heuristic/RH_confidence/dataCodesToShare'


library(ggplot2)
library(BayesFactor)
library(gridExtra)

source(paste(fullPath, "/functions/multiplot.R", sep=""))

load(paste(fullPath,'/data/modelresults.Rda', sep=""))
load(paste(fullPath,'/data/modelresults_long.Rda', sep=""))
load(paste(fullPath,'/data/resultsRH_perTrial.Rda', sep=""))


resultsRH.perTrial.all$trialType <- rowSums(cbind(resultsRH.perTrial.all$trialType,0.5 *resultsRH.perTrial.all$RU.Rchosen), na.rm=TRUE)


meanConfPerSubj <- ddply(resultsRH.perTrial.all, c("trialType", "condition", "subject"), summarise,
                          meanConf = mean(choiceConf)
                        )
meanConfPerSubj.RUplus      <- subset(meanConfPerSubj, trialType %in% c(1.5))
meanConfPerSubj.RUplus.pop  <- subset(meanConfPerSubj.RUplus, condition %in% c(1)) 
meanConfPerSubj.RUplus.dist <- subset(meanConfPerSubj.RUplus, condition %in% c(2)) 

RU.Delta <- data.frame(meanConf_RH = meanConfPerSubj.RUplus.pop$meanConf - meanConfPerSubj.RUplus.dist$meanConf,
                       alpha       = modelResults$alpha.1                - modelResults$alpha.2,
                       ID          = modelResults$ID)

RU <- data.frame(alpha = modelResults.long$alpha,
                 condition = modelResults.long$condition,
                 meanConfRH = c(meanConfPerSubj.RUplus.pop$meanConf, meanConfPerSubj.RUplus.dist$meanConf)
                 )

p_meanConf_Alpha <- ggplot(
          RU, aes(x = alpha, y = meanConfRH)) +
          geom_point(aes(shape=factor(condition)), alpha = 1) + 
#          scale_shape_manual(name = "Condition", values=c(19,1), labels = c("Population", "Distance")) + 
          scale_shape_manual(name = "Condition", values=c(19,1), guide=FALSE) + 
          xlim(0, 1) +
          labs(x = expression(alpha), y = "Mean individual confidence") +
          # theme(legend.position=c(.85,.85),
          #   legend.key = element_rect(fill = NA, colour = NA)) +
          ggtitle("A.                                                      ")

8.1.2 Plot confidence difference vs α difference in each condition

p_meanConfDiff_AlphaDiff <- ggplot(RU.Delta, aes(x = alpha, y = meanConf_RH)) +
          geom_point(shape = 17) +  
          #xlim(0,.75) +
          labs(x = expression(Delta*alpha), y = expression(Delta*" Mean individual confidence")) +
          ggtitle("B.                                                      ") 



load(file = paste(fullPath,'/data/toPlot_group_confDensity.Rda', sep=""))
load(file = paste(fullPath,'/data/toPlot_subj_meanConfs.Rda', sep=""))


source(paste(fullPath,'/functions/summarySE.R', sep=""))

descriptives.MeanConfs <- summarySE(subj_meanConfs, measurevar="choiceConf", groupvars=c("condition", "trialType"))
is.factor(descriptives.MeanConfs$trialType)
## [1] TRUE
descriptives.MeanConfs$trialType <- as.numeric(as.character(descriptives.MeanConfs$trialType))
descriptives.MeanConfs$trialType[descriptives.MeanConfs$trialType == 0]   <- 0
descriptives.MeanConfs$trialType[descriptives.MeanConfs$trialType == 0.5] <- 1
descriptives.MeanConfs$trialType[descriptives.MeanConfs$trialType == 2]   <- 3
descriptives.MeanConfs$trialType[descriptives.MeanConfs$trialType == 1.5] <- 2
mean.pop.UU   <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 1 & descriptives.MeanConfs$trialType == 0]
mean.dist.UU  <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 2 & descriptives.MeanConfs$trialType == 0]
mean.pop.RUm  <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 1 & descriptives.MeanConfs$trialType == 1]
mean.dist.RUm <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 2 & descriptives.MeanConfs$trialType == 1]
mean.pop.RUp  <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 1 & descriptives.MeanConfs$trialType == 2]
mean.dist.RUp <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 2 & descriptives.MeanConfs$trialType == 2]
mean.pop.RR   <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 1 & descriptives.MeanConfs$trialType == 3]
mean.dist.RR  <- descriptives.MeanConfs$choiceConf[descriptives.MeanConfs$condition == 2 & descriptives.MeanConfs$trialType == 3]

group_confDensity$condition <- as.factor(group_confDensity$condition)

8.2 Plot split violin plots + scatter

p_splitViolins <- ggplot(group_confDensity, aes(dens, loc, fill = condition, group = interaction(condition, trialType))) + 
  geom_polygon() +
  scale_x_continuous(breaks = 0:3, labels = c('UU trials', 'RU- trials', 'RU+ trials', 'RR trials')) +
  #scale_fill_brewer(palette = "Greys", guide=FALSE) +
  scale_fill_manual(values = c("#708090", "#C0C0C0"), name = "Condition", labels = c("Population", "Distance")) +
  ylab('density') +
  theme(axis.title.x = element_blank()) +
  geom_jitter(data = subj_meanConfs,
        aes(x, y = choiceConf, shape = condition, group = interaction(condition, trialType)),
        alpha = 0.5, size = 1, #shape = 21, #colour = "black", 
        position = position_jitterdodge(), #width = 0.25, 
        show.legend = TRUE) +
  scale_shape_manual(name = "Condition", values=c(19,1), labels = c("Population", "Distance")) +  
  xlab("Trial type") +
  ylab("Mean Individual Confidence")  +
  annotate("segment", x = 1.75, y = 100, xend  = 2.25,  yend = 100) +
  annotate("text",    x = 2,    y = 101, label = "*",   size = 7)  +
  # annotate("segment", x = 2,    y = 110, xend  = 3,     yend = 110) +
  # annotate("text",    x = 2.5,  y = 111, label = "*",   size = 7)  +
  ggtitle("C.                                                                                                 ") +
  annotate("segment", x = -0.4, y = mean.pop.UU,   xend  = -0.05, yend = mean.pop.UU,   size = 1) +
  annotate("segment", x = 0.05, y = mean.dist.UU,  xend  =  0.4,  yend = mean.dist.UU,  size = 1) +
  annotate("segment", x = 0.6,  y = mean.pop.RUm,  xend  = 0.95,  yend = mean.pop.RUm,  size = 1) +
  annotate("segment", x = 1.05, y = mean.dist.RUm, xend  = 1.4,   yend = mean.dist.RUm, size = 1) +
  annotate("segment", x = 1.6,  y = mean.pop.RUp,  xend  = 1.95,  yend = mean.pop.RUp,  size = 1) +
  annotate("segment", x = 2.05, y = mean.dist.RUp, xend  = 2.4,   yend = mean.dist.RUp, size = 1) +
  annotate("segment", x = 2.6,  y = mean.pop.RR,   xend  = 2.95,  yend = mean.pop.RR,   size = 1) +
  annotate("segment", x = 3.05, y = mean.dist.RR,  xend  = 3.4,   yend = mean.dist.RR,  size = 1)

# The right way to do it is something like this, but I just can't get it right. Added means by hand, aware that it's a bad strategy!
  # geom_point(data = descriptives.MeanConfs, aes(colour=condition, y=choiceConf, x=trialType), position=position_jitterdodge(0.5), size = 5) + 
  # geom_errorbar(data = descriptives.MeanConfs, aes(ymax = choiceConf + se, ymin = choiceConf - se, group = interaction(condition, trialType)), width=0.2)



grid.arrange(p_meanConf_Alpha, p_meanConfDiff_AlphaDiff, p_splitViolins, layout_matrix = rbind(c(1,2),c(3,3)))

# Run and knit this stript with 
# library(knitr)
# rmarkdown::render('/scripts/Filevichetal_1_AllSubjs.R')