Skip to content

Instantly share code, notes, and snippets.

@cristiano74
Last active June 18, 2019 16:50
Show Gist options
  • Save cristiano74/d778e0347fea9ff00da6a24b7aae87e5 to your computer and use it in GitHub Desktop.
Save cristiano74/d778e0347fea9ff00da6a24b7aae87e5 to your computer and use it in GitHub Desktop.
#https://gist.github.com/cristiano74/d778e0347fea9ff00da6a24b7aae87e5
#gists_temp3
test_proportion=8/9 #percentuale di successi osservata
total_tested=9 #nr di deliverd
passed = round(test_proportion*total_tested) #nr successi
benchmark_value=0.70 #valore di riferimento
conf_level=0.95
scope="UniqueOpenRate"
if ( passed<15 )
{
#######################################
# SAMPLE SELECTION - exact
#######################################
pvalue=binom.test(passed,total_tested,test_proportion,alternative="greater")$p.value
#pvalue= 0,699
#pvalue
wald.binom.ci <- function(m,n,alpha) {
theta.hat <- m/n
theta.se <- sqrt(theta.hat*(1 - theta.hat)/n)
z.val <- qnorm(1-(1 - alpha/100)/2)
return(c(theta.hat-theta.se*z.val,theta.hat+theta.se*z.val)) }
ac.binom.ci <- function(m,n,alpha) {
adjust <- qnorm(1-(1 - alpha/100)/2)^2
return(wald.binom.ci(m+adjust/2,n+adjust,alpha)) }
ac.binom.ci(passed,total_tested,conf_level*100)
library(exactci)
#https://cran.r-project.org/web/packages/exactci/exactci.pdf
g=binom.exact(passed,total_tested,p=benchmark_value,
alternative = c("greater"),
tsmethod = c("central"),
conf.level = conf_level,plot=FALSE, midp=TRUE)$p.value
#The probability the observed proportion 0.09 comes from a population greater than 0.11 is 41.34%
prob_pop= 1-g
prob_pop
if (ac.binom.ci(passed,total_tested,conf_level*100)[2]>1)
{upper=1}
else
{
upper=ac.binom.ci(passed,total_tested,conf_level*100)[2]
}
print (paste(c("numero total_tested:"),total_tested), collapse=(" "))
print (paste(c("benchmark:"),benchmark_value*100,"%"), collapse=(""))
print (paste("Intervallo di confidenza del",scope,":",c(round(ac.binom.ci(passed,total_tested,conf_level*100)[1]*100,2)),"-",
round(upper*100,2),"%"
)
)
print(paste(c("Possiamo essere confidenti al", round(prob_pop*100,2),"% di ottenere un",scope,"superiore al benchmark del" ,(benchmark_value*100),"%"),collapse=" "))
print(paste(c("Possiamo essere confidenti al", round((1-prob_pop)*100,2),"% di ottenere un",scope,"inferiore al benchmark del" ,(benchmark_value*100),"%"),collapse=" "))
check=0
for(i in 1:total_tested)
{ if (ac.binom.ci(i,total_tested,conf_level*100)[1] >= benchmark_value)
{ check=check+1}
if (check==1)
{
print (paste(c("numero di successi per ottenere un Lower(ci) >= benchmark:(base",total_tested,"delivered)",i,"successi"), collapse=(" ")))
#print(total_tested)
print(paste(c("percentuale di successi richiesta per validare il campione in confronto al benchmark (nr delivered prestabilito):", round(i/total_tested*100,2),"%"), collapse = ""))
print(paste(c("Nuovo CI (",conf_level*100,"%)", "sulla base dei",i,"successi",
round(ac.binom.ci(i,total_tested,conf_level*100)[1]*100,2),
"-",
round(ac.binom.ci(i,total_tested,conf_level*100)[2]*100,2),
"%",
"(benchmark:", benchmark_value*100, "%)"
), collapse = " "))
}
}
}
if ( passed>=15)
{
##################
# SAMPLE SELECTION - exact
# NORMAL distribution, large SAMPLE.
##################
# total_tested=922
# test_proportion=0.46
# passed=round(test_proportion*total_tested)
# benchmark_value=0.31
# scope="UniqueOpenRate"
res <- prop.test(x = passed, n = total_tested, p = test_proportion,
correct = FALSE)
# Printing the results
res
z=(test_proportion-benchmark_value)/sqrt(benchmark_value*(1-benchmark_value)/total_tested)
pvalue_norm=1-pnorm(z)
#if (pvalue_norm < 1-conf_level)
# { print("risultato sign")
# print(pvalue_norm)
# print (paste(c("numero total_tested:"),total_tested), collapse=(" "))
# print (paste(c("benchmark:"),benchmark_value*100,"%"), collapse=(""))
# print (paste(c("numero di successi per ottenere un Lower(ci) >= benchmark:(base",total_tested,"delivered)",i,"successi"), collapse=(" ")))
#
# print(paste(c("Possiamo essere confidenti al", round(pnorm(z)*100,2),"% di ottenere un",scope,"superiore al benchmark del" ,(benchmark_value*100),"%"),collapse=" "))
# print(res$conf.int[1])
#
# }
check=0
for(i in 1:total_tested)
{ prop_value=prop.test(x = i, n = total_tested, p = test_proportion,correct = FALSE)
if (prop_value[["conf.int"]][1] >= benchmark_value)
{check=check+1}
if (check==1)
{
print("Normal")
print (paste(c("numero total_tested:"),total_tested), collapse=(" "))
print (paste(c("benchmark:"),benchmark_value*100,"%"), collapse=(""))
print (paste("Intervallo di confidenza del",scope,":",c(round(res[["conf.int"]][1]*100,2)),"-",
round(res[["conf.int"]][2]*100,2),"%" ) )
print(paste(c("Possiamo essere confidenti al", round(pnorm(z)*100,2),"% di ottenere un",scope,"superiore al benchmark del" ,(benchmark_value*100),"%"),collapse=" "))
print(paste(c("Possiamo essere confidenti al", round((1-pnorm(z))*100,2),"% di ottenere un",scope,"inferiore al benchmark del" ,(benchmark_value*100),"%"),collapse=" "))
print (paste(c("Validazione: numero di successi minimo per ottenere un Lower(ci) >= benchmark:(base",total_tested,"delivered)",i,"successi"), collapse=(" ")))
#print(total_tested)
print(paste(c("percentuale di successi richiesta per validare il campione in confronto al benchmark (nr delivered prestabilito):", round(i/total_tested*100,2),"%"), collapse = ""))
print(paste(c("Nuovo CI (",conf_level*100,"%)", "sulla base dei",i,"successi",
round(prop_value[["conf.int"]][1]*100),
"-",
round(prop_value[["conf.int"]][2]*100),
"%",
"(benchmark:", benchmark_value*100, "%)"), collapse = " "))
}
}
}
Stuff and things
------------------
## head of mtcars
```{r}
head(mtcars)
```
## head of iris
```{r}
head(iris)
```
test1
test2
ghsdghasgdh
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment