Description Usage Arguments Details Author(s) See Also Examples
Computes the (optionally centered and/or absolute) sample moment of a certain order.
1 |
x |
a numeric vector containing the values whose moment is to be computed. |
order |
order of the moment to be computed, the default is to compute the first moment, i.e., the mean. |
center |
a logical value indicating whether centered moments are to be computed. |
absolute |
a logical value indicating whether absolute moments are to be computed. |
na.rm |
a logical value indicating whether |
n |
a numeric vector containing the values whose moment is to be computed. |
d |
Effect size (Cohens d) - difference between the means divided by the pooled standard deviation |
sig.level |
a logical value indicating whether centered moments are to be computed. |
power |
Power of test (1 minus Type II error probability) |
type |
Type of t test : one- two- or paired-samples |
alternative |
a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less" |
When center
and absolute
are both FALSE
, the
moment is simply sum(x ^ order) length(x) Exactly one of the parameters u,v,f2,power and sig.level must be passed as NULL, and that parameter is determined from the others. Notice that the last one has non-NULL default so NULL must be explicitly passed if you want to compute it.
.
Kurt Hornik and Friedrich Leisch
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 | x <- rnorm(100)
## Compute the mean
moment(x)
## Compute the 2nd centered moment (!= var)
moment(x, order=2, center=TRUE)
## Compute the 3rd absolute centered moment
moment(x, order=3, center=TRUE, absolute=TRUE)
# NOT RUN {
## Exercise 9.1 P. 424 from Cohen (1988)
pwr.f2.test(u=5,v=89,f2=0.1/(1-0.1),sig.level=0.05)
# }
# NOT RUN {
## One sample (power)
## Exercise 2.5 p. 47 from Cohen (1988)
pwr.t.test(d=0.2,n=60,sig.level=0.10,type="one.sample",alternative="two.sided")
## Paired samples (power)
## Exercise p. 50 from Cohen (1988)
d<-8/(16*sqrt(2*(1-0.6)))
pwr.t.test(d=d,n=40,sig.level=0.05,type="paired",alternative="two.sided")
## Two independent samples (power)
## Exercise 2.1 p. 40 from Cohen (1988)
d<-2/2.8
pwr.t.test(d=d,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
## Two independent samples (sample size)
## Exercise 2.10 p. 59
pwr.t.test(d=0.3,power=0.75,sig.level=0.05,type="two.sample",alternative="greater")
# }
##read
data = read.delim("data.csv",header = T,dec=".", sep = ";")
#Check NA
apply(data,2,function(x){sum(is.na(x))})
#Plot
plot(data,main = "Data", xlab = "data",ylab = "value")
plot(density(data),main = "Data",xlab = "value",ylab = "density",lwd = 2)
lines(seq(min(data),max(data),0.01),dnorm(seq(min(data),max(data),0.01),mean = mean(data),sd=sd(data)),col="blue",lwd = 2,lty = dotted)
boxplot(data,main = "Data",ylab = "values",notch = T)
ggqqplot(data,main = "Data")
gridExtra::grid.arrange(qq1,qq2,nrow=2)
#DOE
pairs(data)
scatterplotMatrix(data)
interaction.plot(data$factor1, data$factor2, data$measured))
#DOE Char val.
data
#lm
model1 = lm(m~.^2 ,data=data3)
mod = lm(measured~.^3+I(factor1^2)+I(factor2^2)+I(factor3^2)+I(factor3^2)+I(factor2*factor1^2)+I(factor3*factor1^2)+I(factor1*factor2^2)+I(factor3*factor2^2)+I(factor1*factor3^2)+I(factor2*factor3^2) ,data=data)
summary(mod)
anova(mod,mod_old)
#Parabula
mod=lm(measured~ poly(factor1, degree = 2,raw=T),data=data)
summary(mod)
#Opt Model
stepm <- stepAIC(model1, direction="both")
stepm$anova
#Resid. Homogenous
par(mfrow = c(1,2))
plot(mod,which=1)
plot(mod,which=3)
ncvTest(mod)
mod_r2=summary(mod)$r.squared
#Power of lm
f=mod_r2/(1-mod_r2)
uf=1
sig=0.01
p=0.99
f2_res=pwr.f2.test(u=uf,v=NULL,f2=f,sig.level=sig,power=p)
n=f2_res$v+uf+1
#Cooks leverage
par(mfrow = c(1,2))
plot(mod,which=4)
plot(mod,which=6)
cd1c=4/length(mod$residuals)
cd1=abs(cooks.distance(mod))
subset(cd1, cd1 > cd1c)
MEPlot(mod)
IAPlot(mod)
#Normality
shapiro.test(data)
#Outlier
rosnerTest(data, k = 2, alpha = 0.05, warn = T)
#Box Cox
data=data-min(data)+1
x=powerTransform(data, family="bcPower")
transData=bcPower(data,lambda=x$lambda)
#Characteristic Val.
values1=c(mean(s1),median(s1),sd(s1),mad(s1),min(s1),quantile(s1,0.25),quantile(s1,0.75),max(s1),skewness(s1),mc(s1),kurtosis(s1))
values2=c(mean(s2),median(s2),sd(s2),mad(s2),min(s2),quantile(s2,0.25),quantile(s2,0.75),max(s2),skewness(s2),mc(s2),kurtosis(s2))
values3=c(mean(s3),median(s3),sd(s3),mad(s3),min(s3),quantile(s3,0.25),quantile(s3,0.75),max(s3),skewness(s3),mc(s3),kurtosis(s3))
values4=c(mean(s4),median(s4),sd(s4),mad(s4),min(s4),quantile(s4,0.25),quantile(s4,0.75),max(s4),skewness(s4),mc(s4),kurtosis(s4))
column.names <- c("mean","median","$\sigma$","MAD","Min","Q1","Q3","Max","skewness","Medcouple","kurtosis")
row.names <- c("s1","s2","s3","s4")
results = array(rbind(values1,values2,values3,values4),dim = c(4,11),dimnames = list(row.names,column.names))
kable(results)
#FWHM
d <- density(data)
xmax <- d$x[d$y==max(d$y)]
x1 <- d$x[d$x < xmax][which.min(abs(d$y[d$x < xmax]-max(d$y)/2))]
x2 <- d$x[d$x > xmax][which.min(abs(d$y[d$x > xmax]-max(d$y)/2))]
fwhm=x2-x1
#Plan
set.seed(1234) # set the seed for the random numbers
f <- factor(seq(1,5,4/(n-1))) # generate vector of factor levels
fac <- sample(f,7) # radomize the order of levels
plan <- data.frame(level =fac) # create a data frames with all experiments
#Plan with continuous factor
nparam <- 3
levels <- seq(1000,1500,(1500-1000)/(nparam-1))
lvlsamp <- sample(levels,nparam)#Randomize
plan <- data.frame(parameter=lvlsamp)
#Write matr number
write.table(1, file="plan1.csv", sep=";", dec=".", row.names=F, col.names=F,append=F)
#write Testplan
write.table(plan, file="plan1.csv", sep=";", dec=".", row.names=F, col.names=T,append=T)
#find optimum with parabula
mod1=lm(measured~poly(parameter, degree=2, raw=T),data=data71)
x_max=as.double(-mod1$coefficients[2]/(2*mod1$coefficients[3]))
#Plan DOE
Var1=c(-1,1) # generate vector of factor1
Var2=c(-1,1) # generate vector of factor2
Var3=c(-1,1) # generate vector of factor3
plan=expand.grid(Var1,Var2,Var3)
n_rep=4 # number of replicates
planr=do.call("rbind", replicate(n_rep, plan, simplify=F))
set.seed(1234) # set the seed for the random numbers
plan_r=planr[order(sample(1:nrow(planr))),] #randomised design
#Alternative Approach full factorial
plan2 = fac.design(factor.names=list(Var1=c(-1,1), Var2=c(-1,1), Var3=c(-1,1)),replications=10,seed=1)
#blocked design instead of full factorial
plan3=fac.design(factor.names=list(temp=c(160,170), pres=c(700,800)),replications =3 , blocks=2, randomize = T, block.name = "Tech")
plan3=subset(plan3, select = -c(Blocks) ) # drop the not needed Blocks column
ma<- rep( 1 , times=length(plan3[,1])) # generate a vector with machine number
plan3a=cbind(ma,plan3)
matrnb=1
# First line writes the Matrikelnummer
write.table(matrnb,file="plan.csv",sep = ";", dec = ".",row.names = F, col.names = F, append = F)
# Second line writes the Testplan
write.table(plan1,file="plan.csv",sep = ";", dec = ".",row.names = F, col.names = T, append = T)
data=read.delim("plan.csv",header = T,dec=".", sep = ";")
data=transform(data,ma=as.factor(ma)) # transform the machine number from a numerical to a factor!
f=r2/(1-r2)
uf=2
f2_res=pwr.f2.test(u=uf,v=NULL,f2=f,sig.level=0.01,power=0.99)
n=ceiling(f2_res$v+uf+1)
ma=c(1,2)
ts=c(4000,6000)
fr=c(0.001,0.003)
plan2=expand.grid(ma,ts,fr)
# plan2=rbind(plan2,plan2,plan2,plan2,plan2,plan2) # 6 replicates
plan2=do.call("rbind", replicate(n2, plan2, simplify=F)) # better programming than the line before
names(plan2)<- c("ma","ts","fr") # to names the variables
set.seed(1234) # set seed for random number generator
plan2=plan2[order(sample(1:nrow(plan2))),] # randomize design
# Alternative
plan2a=fac.design(factor.names=list(ma=c(1,2),ts=c(4000,6000), fr=c(0.001,0.003)),replications =n2 , randomize = T)
plan2a=subset(plan2a, select = -c(Blocks) ) # drop the not needed Blocks column
matrno=1
# First line writes the Matrikelnummer
write.table(matrno,file="plan2_73.csv",sep = ";", dec = ".",row.names = F, col.names = F, append = F)
# Second line writes the Testplan
write.table(plan2,file="plan2_73.csv",sep = ";", dec = ".",row.names = F, col.names = T, append = T)
data73b=read.delim("plan2_73_res.csv",header = T,dec=".", sep = ";")
data73b=transform(data73b,ma=as.factor(ma)) # transform the machine number from a numerical to a factor!
head(data73b)
#Block Design
#Incomplete Blocked Design
bs = 2 # Block size
gw = 4 # number of pfactors
bd=bibd(v=gw,k=bs)
#Transform to Plan
plan1=as.data.frame.model.matrix(bd)
plan2=cbind(c(1:(length(bd)/2)),plan1)
colnames(plan2)<-c("machine","run")
data=transform(data,machine=as.factor(machine))
#Testing
#mean
t.test(x=mach2,y=mach1, mu=.2*m1, alternative = "less",paired=F, conf.level = 0.95) #H0:(m2-m1)>=m1*0.2
t.test(data, mu = 0.6 , conf.level = 0.95)
#SD with F-Test
var.test(data1,data2, conf.level = 0.95)
#mean 3 samples Anova Welch for uneq. var
dat=stack(data) # stack the data
names(dat)=c("row","col") # row=values;col=set1,set2,set3
oneway.test(row~col,var.equal="F")
#SD 3 samples
bartlett.test(data)
#Wilcoxon non norm. distr test median 2 sample
wilcox.test(weight ~ group, data = data, exact = FALSE, alternative = "two.sided")
#Kruskal non norm. distr test median 3 sample
kruskal.test(value ~ set, data = data)
#Power
d=(1.87-1.7)/(1.7*0.05)
pwr.t.test(n=NULL,d=d, sig.level = 0.01, power = 0.99, type = "one.sample", alternative = "greater")
OR pwr.anova.test(k=2, n = NULL,f=d, sig.level=.01, power = .90)
#calc n of experiments with relative error/mean square
rel_error=0.01
n=(SD(data)/(rel_error*mean(Data)))^2
#calc rel error
rel_error=SD(data)/mean(data)/sqrt(nrow(data))
#delta m
np=size.t.test(type = one.sample, power = .95,delta=mean_old*0.2, sd=sd_old , sig.level=.05,alternative = "one.sided")
#diff in variance with factor/ratio 2 betw variances
n=size.comparing.variances(ratio=(0.1/0.05)^2,alpha=0.05,power=0.95)
## Compute the mean
moment(x)
## Compute the 2nd centered moment (!= var)
moment(x, order=2, center=TRUE)
## Compute the 3rd absolute centered moment
moment(x, order=3, center=TRUE, absolute=TRUE)
# NOT RUN {
## Exercise 9.1 P. 424 from Cohen (1988)
pwr.f2.test(u=5,v=89,f2=0.1/(1-0.1),sig.level=0.05)
# }
## Compute the mean
moment(x)
## Compute the 2nd centered moment (!= var)
moment(x, order=2, center=TRUE)
## Compute the 3rd absolute centered moment
moment(x, order=3, center=TRUE, absolute=TRUE)
# NOT RUN {
## Exercise 9.1 P. 424 from Cohen (1988)
pwr.f2.test(u=5,v=89,f2=0.1/(1-0.1),sig.level=0.05)
# }
|
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.