Calculations for Konigsberg, Algee-Hewitt, and Steadman (2009)

You can obtain “R” from:

http://cran.r-project.org/


Obtaining the data:
 
Copy and paste the following into your "Rgui"
 
download.file('http://faculty.las.illinois.edu/lylek/KAHS/KAHS.txt','KAHS.txt')
skulls=matrix(scan('KAHS.txt'),nc=12)
colnames(skulls)=c("BNL","BBH","BPL","NPH","NLH","OBH","OBB","NLB","DKB","FRC","PAC","OCC")
# DKB (the ninth variable) was in original analysis; now dropped because pathological in "Mr. Johnson"
skulls=skulls[,-9]
NC=NCOL(skulls)
size=apply(skulls,1,prod)^(1/NC)
skulls=data.frame(size,skulls/size)
N.F=c(99,168,38,35,0,27,18,49,53,54,49,52,37,53,55,27,38,49,51,32,55,55,0,41,51,42,50,54,45,46)
N.M=c(141,255,48,35,42,42,29,52,56,55,41,47,49,58,53,30,45,51,57,55,55,55,50,50,51,45,33,56,53,55)
Pop.labels=c("American Black","American White","Ainu","Andaman","Anyang","Arikara","Atayal","Australia", "Berg","Buriat","Bushman", "Dogon",
"Easter Islander", "Egypt","Eskimo","Guam","Hainan","Mokapu","Moriori","No. Japan","Norse","Peru","Phillipine", "So. Japan",
"Santa Cruz", "Tasmania", "Teita","Tolai","Zalavar","Zulu")
skulls=data.frame(cbind(Sex=rep(c('F','M'),c(1423,1744)),Pop=c(rep(Pop.labels,N.F),rep(Pop.labels,N.M)),skulls))
johnson= structure(list(Sex = structure(1L, .Label = "M", class = "factor"), 
    Pop = structure(1L, .Label = "J", class = "factor"), 
    size = 74.5503335363188, BNL = 1.55599572124952, BBH = 2.03889094508558, 
    BPL = 1.46209942772584, NPH = 0.93493880837148, NLH = 0.733330052419926, 
    OBH = 0.460225975828199, OBB = 0.548488491740457, NLB = 0.353184201211206, 
    FRC = 1.54606954164845, PAC = 1.56095881105006, OCC = 1.46142873991496), .Names = c("Sex", 
"Pop", "size", "BNL", "BBH", "BPL", "NPH", "NLH", "OBH", 
"OBB", "NLB", "FRC", "PAC", "OCC"), row.names = "1", class = "data.frame")
labs= c("Basion-Nasion length", "Basion-Bregma height", "Basion-Prosthion length", 
"Nasion-Prosthion height", "Nasal height", "Orbit height", "Orbit breadth", 
"Nasal breadth", "Nasion-Bregma chord", "Bregma-Lambda chord", 
"Lambda-Opisthion chord")
OMB.labs=c('White','Black','Am. Indian','Asian','Pacific Islander','Some other')

iowa= c(2777183, 72512, 18246, 43119, 2196, 46858)

hawaii= c(476162, 33343, 24882, 703232, 282667, 47603)

gary= c(13317, 87604, 719, 293, 52, 2744)
OMB= c(4,2,1,4,4,3,4,6,1,4,2,2,5,1,3,5,4,5,5,4,1,3,4,3,4,6,2,5,1,2)
# And clean up
rm(N.F);rm(N.M);rm(Pop.labels);rm(size);rm(NC);file.remove('K,A-H,S.txt')
 
 
 

 
 

Table 1 – This produces Table 1, which is just a listing of measurements for "Mr. Johnson".

table.01=function()  
{
out=data.frame(cbind(labs,names(johnson)[-(1:3)],
    t(round(johnson[-(1:3)]*johnson$size,2)))) 
colnames(out)=c('Measurement Name','Abbev','Mr. Johnson data')
rownames(out)=1:11
return(out)
} 
 
table.01()
 
 
 

 
 
 
Table 2 – This produces the results for Table 2.
 
table.02=function (type='L')  
{ 
library(MASS) 
sto=data.frame(skulls$Sex,skulls$size*skulls[,-(1:3)]) 
colnames(sto)=c('Sex',colnames(sto)[-1]) 
if(type!='Q' & type!='q'){ 
   sto.da=lda(Sex~.,sto,prior=c(.5,.5)) 
} 
else{ 
   sto.da=qda(Sex~.,sto,prior=c(.5,.5)) 
} 
pred.Sex=predict(sto.da,sto[,-1])$class 
results=table(data.frame(sto$Sex,pred.Sex)) 
marg=margin.table(results,1)
Correct=round(results[1]/marg[1]*100,2)
Correct[2]=round(results[4]/marg[2]*100,2)
print(cbind(results,Correct))
results=as.vector(results)
Perc=round((results[1]+results[4])/sum(results)*100,2)
cat('\n\n','Overall percent correct = ',Perc,'%','\n\n') 
} 
 
table.02()
table.02('q')
 
 
 

 
 

Table 3 – This produces the results for Table 3

table.03= function (type='L') 
{
library(MASS)
 sto=skulls[skulls$Pop=='American White' | skulls$Pop=='American Black' | skulls$Pop=='Zulu' 
            | skulls$Pop=='No. Japan' | skulls$Pop=='So. Japan',]
 sto=data.frame(sto$Sex,sto$size*sto[,-(1:3)])
colnames(sto)=c('Sex',colnames(sto)[-1])
if(type!='Q' & type!='q'){
   sto.da=lda(Sex~.,sto,prior=c(.5,.5))
}
else{
   sto.da=qda(Sex~.,sto,prior=c(.5,.5))
}
pred.Sex=predict(sto.da,sto[,-1])$class
results=table(data.frame(sto$Sex,pred.Sex))
marg=margin.table(results,1)
Correct=round(results[1]/marg[1]*100,2)
Correct[2]=round(results[4]/marg[2]*100,2)
print(cbind(results,Correct))
results=as.vector(results)
Perc=round((results[1]+results[4])/sum(results)*100,2)
cat('\n\n','Overall percent correct = ',Perc,'%','\n\n')
}
 
table.03()
table.03('q')
 
 
 

 
 

"Using this latter data set, we can find the posterior probability that "Mr. Johnson" was a male.  This posterior probability is 0.996 from the linear discriminant function analysis and 0.999 from the quadratic discriminant function analysis."

J.sex= function (type='L') 
{
library(MASS)
sto=skulls[skulls$Pop=='American White' | skulls$Pop=='American Black' | skulls$Pop=='Zulu' 
            | skulls$Pop=='No. Japan' | skulls$Pop=='So. Japan',]
train=data.frame(sto$Sex,sto$size*sto[,-(1:3)])
test=data.frame(johnson$Sex,johnson$size*johnson[,-(1:3)])
colnames(train)=c('Sex',colnames(train)[-1])
colnames(test)=c('Sex',colnames(test)[-1])
if(type!='Q' & type!='q'){
  da=lda(Sex~.,train,prior=c(.5,.5))
}
else{
  da=qda(Sex~.,train,prior=c(.5,.5))
}
pred.Sex=predict(da,test[,-1])
print(pred.Sex$post)
x=train[train$Sex=='M',-1]
N=NROW(x)
V=cov(x)
IV=solve(V)
mu=as.numeric(apply(x,2,mean))
d=as.numeric(test[,-1])-mu
D2M=as.vector(d%*%IV%*%d)
cat('\n Mahalanobis D2 between Mr. Johnson and male centroid = ',D2M,'\n')
x=train[train$Sex=='F',-1]
N=NROW(x)
V=cov(x)
IV=solve(V)
mu=as.numeric(apply(x,2,mean))
d=as.numeric(test[,-1])-mu
D2F=as.vector(d%*%IV%*%d)
cat('\n The following typicalities assume unequal var-covar
   matrices.','\n')
cat('Typicality from Male mu & V = ',1-pchisq(D2M,11),'\n')
cat('Typicality from Female mu & V = ',1-pchisq(D2F,11),'\n')
}
 
J.sex()
J.sex('Q')
 
 
 

 
 

"Taken as an evidentiary problem and assuming equal priors for male as for female within the population at large, the likelihood ratio from the quadratic discriminant function is 1.997."  Note: you will need to download the package "mvtnorm" for the following.

LR.sex= function () 
{
library(mvtnorm)
sto=skulls[skulls$Pop=='American White' | skulls$Pop=='American Black' | skulls$Pop=='Zulu' 
            | skulls$Pop=='No. Japan' | skulls$Pop=='So. Japan',]
train=data.frame(sto$Sex,sto$size*sto[,-(1:3)])
test=data.frame(johnson$Sex,johnson$size*johnson[,-(1:3)])
colnames(train)=c('Sex',colnames(train)[-1])
colnames(test)=c('Sex',colnames(test)[-1])
x=train[train$Sex=='M',-1]
V=cov(x)
mu=as.numeric(apply(x,2,mean))
d.M=dmvnorm(test[,-1],mu,V)
x=train[train$Sex=='F',-1]
V=cov(x)
mu=as.numeric(apply(x,2,mean))
d.F=dmvnorm(test[,-1],mu,V)
return(as.vector(d.M/mean(c(d.M,d.F))))
}
LR.sex()

 
Table 4 – Produces table as well as chi-square test reported in text.  You must download "mclust" for this.
 
table.04= function(){
library(mclust)
#  restores size to the variables
  x=skulls$size*skulls[,-(1:3)]
#  uses size corrected
# x=skulls[,-(1:3)]
km<-kmeans(x,2)
# do one m-step in EM from initial k-means partition
try2=mstepVVV(data=x, z=unmap(km$cluster))
#  now em for two groups
z=emVVV(x,param=try2$para)$z[,1]
Classification=as.vector(z>=.5)
results=table(data.frame(skulls$Sex,Classification))
# Labelling as "T" and "F" may have been flipped
if(results[2,1]>results[2,2]) results=cbind(results[,2],results[,1])
print(chisq.test(results))
marg=margin.table(results,1)
Correct=round(results[1]/marg[1]*100,2)
Correct[2]=round(results[4]/marg[2]*100,2)
print(cbind(results,Correct))
results=as.vector(results)
Perc=round((results[1]+results[4])/sum(results)*100,2)
cat('\n\n','Percent Correct = ',Perc,'%','\n\n')
}
 
table.04()
 
 
 

 
 

Figure 1 – Production of this figure requires running the bootstrap and saving the results, and then forming the histogram.  To do the analysis you will need to download the library mclust.

boot2= function (nb=10,sc=FALSE)
{
  library(mclust)
  if(sc==FALSE){x=skulls$size*skulls[,-(1:3)]}
  else {x=skulls[,-(1:3)]}
  N = NROW(x)
  bootg=rep(0,nb)
  try1=mstepVVV(x,z=rep(1,NROW(x)))
  LL1=emVVV(x,param=try1$para)
  G1pars=LL1$para
  LL1=LL1$loglik
  km<-kmeans(x,2)
# do one m-step in EM from initial k-means partition
  try2=mstepVVV(data=x, z=unmap(km$cluster))
#  now em for two groups, and save the log-likelihood in lk2
  LL2=emVVV(x,param=try2$para)$loglik
  bootg[1] = -2*(LL1-LL2)
  cat(1,bootg[1],'\n')
  flush.console()
  set.seed(123456789)
  for(nboot in 2:nb){
  x = simVVV(G1pars,N)[,-1]
  km<-kmeans(x,2)
  try1=mstepVVV(x,z=rep(1,NROW(x)))
  LL1=emVVV(x,param=try1$para)$loglik
  try2=mstepVVV(data=x, z=unmap(km$cluster))
#  now em for two groups, and save the log-likelihood in lk2
  LL2=emVVV(x,param=try2$para)$loglik
  X2=-2*(LL1-LL2)
  bootg[nboot]=X2
  cat(nboot,X2,'\n')
  flush.console()
}
return(bootg)
}
 
sto.boot2=boot2(10000)
sto.boot2[1]
 
figure.01= function () 
{
top=max(sto.boot2)
hist(sto.boot2[-1],nc=40,prob=T,xlim=c(0,1.1*top),xlab='Likelihood Ratio Statistic',ylab='Density',
   main='One versus Two Groups (sexual dimorphism)')
lines(c(top,top),c(0,1))
}
figure.01()
 
 
 

 
 

Figure 2 – Run again, but with size correction

sto.boot2b=boot2(10000,TRUE)
sto.boot2b[1]
figure.02= function() 
{
hist(sto.boot2b[-1],nc=20,prob=T,xlab='Likelihood Ratio Statistic',ylab='Density',
   main='One versus Two Groups (size "corrected")')
}
figure.02()
 
 
 

 
 

Table 5

 
table.05= function ()
{
library(mclust)
#  restores size to the variables
#  x=skulls$size*skulls[,-(1:3)]
#  uses size corrected
   x=skulls[,-(1:3)]
 
km<-kmeans(x,2)
# do one m-step in EM from initial k-means partition
try2=mstepVVV(data=x, z=unmap(km$cluster))
#  now em for two groups
z=emVVV(x,param=try2$para)$z[,1]
Classification=as.vector(z>=.5)
results=table(data.frame(skulls$Sex,Classification))
# Labelling as "T" and "F" may have been flipped
if(results[2,1]>results[2,2]) results=cbind(results[,2],results[,1])
print(chisq.test(results))
marg=margin.table(results,1)
Correct=round(results[1]/marg[1]*100,2)
Correct[2]=round(results[4]/marg[2]*100,2)
print(cbind(results,Correct))
results=as.vector(results)
Perc=round((results[1]+results[4])/sum(results)*100,2)
cat('\n\n','Percent Correct = ',Perc,'%','\n\n')
}
table.05()
 
 
 

 
 

Table 6

 
table.06= function (type='Q') 
{
  library(mclust)
  sto=skulls[,-(1:3)]
  x=sto
# We need somewhere to start, in which case k-means for 2 groups is fine
  km<-kmeans(x,2)
# do one m-step in EM from initial k-means partition to get start values
  if(type!='Q' & type!='q'){
     try2=mstepEEE(data=x, z=unmap(km$cluster))}
  else{
     try2=mstepVVV(data=x, z=unmap(km$cluster))}
# now em for two groups
  if(type!='Q' & type!='q'){
     two.group=emEEE(x,param=try2$para)}
  else{
     two.group=emVVV(x,param=try2$para)}
  sto=data.frame(skulls$Pop,two.group$z[,1]>=.5)
  colnames(sto)=c('Pop','Group')
  sto=table(sto)
# Because labelling of "TRUE" versus "FALSE" can flip we need to test
# for the first element and swap columns if necessary so that "Group 1"
# is the same as in the publication
  if(sto[1,1]>sto[1,2]) sto=cbind(sto[,2],sto[,1])
  print(chisq.test(sto))
  print(sto)
  return(sto)
}
sto=table.06()
 
 
 

 
 

Table 7

 

table.07= function (type='Q')

{

library(MASS)

train=data.frame(skulls$Pop,skulls[,-(1:3)])

test=data.frame(johnson$Pop,johnson[,-(1:3)])

colnames(train)=c('Pop',colnames(train)[-1])

colnames(test)=c('Pop',colnames(test)[-1])

if(type!='Q' & type!='q'){

  da=lda(Pop~.,train,prior=rep(1/30,30))

}

else{

  da=qda(Pop~.,train,prior=rep(1/30,30))

}

pred.Pop=predict(da,test[,-1])$post

ix=sort(pred.Pop,decreasing=TRUE,index.return=TRUE)$ix

Pops=levels(skulls$Pop)

typicalities=function ()

{

johnson.typ=function (pop)

{

train=skulls[skulls$Pop==pop,-(1:3)]

test=johnson[-(1:3)]

N=NROW(train)

V=cov(train)

IV=solve(V)

mu=as.numeric(apply(train,2,mean))

   x=as.numeric(test)

   d=x-mu

   D2=as.vector(d%*%IV%*%d)

D2=(N-1-11)*N*D2/(11*((N-1)*(N-1)-N*D2))

return(round(1-pf(D2,11,N-1-11),4))

}

pops=levels(skulls$Pop)

typ=0

for(i in 1:30){

typ[i]=johnson.typ(pops[i])

}

return(typ)

}

 

typ=typicalities()

output=data.frame(cbind(Pops[ix],round(pred.Pop[ix],4),round(typ[ix],4)))

colnames(output)=c('Pop','Posterior','Typicality')

return(output)

}

 

table.07()

 
 
 

 
 

Table 8 – Prints census data for Iowa, Hawaii, and Gary, IN

table.08= function () 
{
out=data.frame(OMB.labs,iowa,round(iowa/sum(iowa)*100,2),
          hawaii,round(hawaii/sum(hawaii)*100,2),
          gary,round(gary/sum(gary)*100,2))
colnames(out)=c('OMB Race','Iowa','Percent','Hawaii','Percent','Gary, IN','Percent')
return(out)
}
 
table.08()
 
 
 

 
 

Table 9 –

 

table.09= function (state='I')

{

library(MASS)

train=data.frame(skulls$Pop,skulls[,-(1:3)])

test=data.frame(johnson$Pop,johnson[,-(1:3)])

colnames(train)=c('Pop',colnames(train)[-1])

colnames(test)=c('Pop',colnames(test)[-1])

if(state=='I')

 {prior=(1/table(OMB)*iowa[1:6])[OMB]/sum(iowa[1:6])}

if(state=='H')

 {prior=(1/table(OMB)*hawaii[1:6])[OMB]/sum(hawaii[1:6])}

if(state=='gary')

 {prior=(1/table(OMB)*gary[1:6])[OMB]/sum(gary[1:6])}

da=qda(Pop~.,train,prior=prior)

pred.Pop=predict(da,test[,-1])$post

ix=sort(pred.Pop,decreasing=TRUE,index.return=TRUE)$ix

Pops=levels(skulls$Pop)

output=(cbind(Pops[ix],OMB.labs[OMB[ix]],round(prior[ix],6),round(pred.Pop[ix],4)))

row.names(output)=1:30

output=data.frame(output)

colnames(output)=c('Provenience','OMB Race','Prior','Posterior')

return(output)

}

 

table.09()
table.09('H')
table.09('gary')
 
 
 

 
 

"By this method, the calculated likelihood ratio is 3.72…"

 
LR.race= function (state='I') 
{
library(mvtnorm)
pops=levels(skulls$Pop)
test=as.numeric(johnson[,-(1:3)])
pd=0
 
for(i in 1:30){
  train=skulls[skulls$Pop==pops[i],-(1:3)]
  mu=apply(train,2,mean)
  V=cov(train)
  pd[i]=dmvnorm(test,mu,V)
}
if(state=='I')
 {prior=(1/table(OMB)*iowa[1:6])[OMB]/sum(iowa[1:6])}
if(state=='H')
 {prior=(1/table(OMB)*hawaii[1:6])[OMB]/sum(hawaii[1:6])}
if(state=='gary')
 {prior=(1/table(OMB)*gary[1:6])[OMB]/sum(gary[1:6])}
ave=as.vector(pd%*%prior)
pd[pops=='American White']/ave
}
 
LR.race()
LR.race('H')
LR.race('gary')
 
 
 

 
 

"Starting with the logic that there are only five OMB races to consider from Iowa data in Table 8, and using the data from that table… "

 
post.race2= function () 
{
library(mvtnorm)
test=as.numeric(johnson[,-(1:3)])
pd=0
pops=c('American White','American Black','Eskimo','So. Japan','Easter Islander')
prior=iowa[-6]/sum(iowa[-6])
for(i in 1:5){
  train=skulls[skulls$Pop==pops[i],-(1:3)]
  mu=apply(train,2,mean)
  V=cov(train)
  pd[i]=dmvnorm(test,mu,V)
}
ave=as.vector(prior%*%pd)
return(round(pd*prior/ave,3))
}
post.race2()
 
 
 

 
 

"The results presented in Table 6 for the two-group model-based clustering after adjusting for craniometric "size" require further comment."

 
table.06b= function () 
{
sub.Sahara=c(F,T,F,F,F,F,F,F,F,F,T,T,F,F,F,F,F,F,F,F,F,F,F,F,F,F,T,F,F,T)
print(sto[2,])
sto=(cbind(sub.Sahara,sto))
G1=apply(sto[sto[,1]==T,-1],2,sum)
G2=apply(sto[sto[,1]==F,-1],2,sum)
sto=rbind(G1,G2)
rownames(sto)=c('Sub Saharan','Not sub Saharan')
print(chisq.test(sto))
print(sto)
}
table.06b()
 
 
 

 
 

From the appendix

" For "Mr. Johnson" the F statistic is equal to 0.577, which with 11 and 74 degrees of freedom gives a p-value of 0.8415, close to the 0.8277 value from the chi-square approximation."

johnson.typicality= function (pop='Easter Islander') 
{
train=skulls[skulls$Pop==pop,-(1:3)]
test=johnson[-(1:3)]
N=NROW(train)
V=cov(train)
IV=solve(V)
mu=as.numeric(apply(train,2,mean))
 
   x=as.numeric(test)
   d=x-mu
   D2=as.vector(d%*%IV%*%d)
 
cat('\n\n Mahalanobis D2 = ',D2)
p.chisq=round(1-pchisq(D2,11),4)
cat('\n\n Chi-square p-value = ',p.chisq)
D2=(N-1-11)*N*D2/(11*((N-1)*(N-1)-N*D2))
p.F=round(1-pf(D2,11,N-1-11),4)
cat('\n\n F = ',D2,'  df1 = ',11,'  df2 = ',N-1-11,'\n\n')
cat('\n\n F distribution p-value = ',p.F,'\n\n')
}
 
johnson.typicality()
 
 
 

 
 
From the appendix
 

" On 99,999 parametric bootstraps (treating the observed d2 as the 100,000 value) there were 81,214 d2 values greater than or equal to the observed, giving an estimate of the p-value at 0.8121, again close to the chi-square and F statistic calculations. "

 
johnson.typicality2= function (nb=100000,pop='Easter Islander') 
{
library(mvtnorm)
set.seed(1234)
train=skulls[skulls$Pop==pop,-(1:3)]
test=johnson[-(1:3)]
N=NROW(train)
V=cov(train)
IV=solve(V)
mu=as.numeric(apply(train,2,mean))
 
x=as.numeric(test)
d=x-mu
obs.D2=as.vector(d%*%IV%*%d)
 
nu=N-1
m=11
df = (nu + nu - m + 1) - (nu - m + 1):nu
S=solve((nu)*V)
U = chol(S)
imore=1
for(i in 1:(nb-1)) {
 
# See "rwishart" in bayesm.  Code has been taken from there
# so that the Cholesky and df (which are invariant here) are
# not recalculated within this loop.
 
  T = diag(sqrt(rchisq(c(rep(1, m)), df)))
  T[lower.tri(T)] = rnorm((m * (m + 1)/2 - m))
  C = t(T) %*% U
  CI = backsolve(C, diag(m))
  sample.V = crossprod(t(CI))
 
  sample.mu = rmvnorm(1,mu,sample.V/N)
  x = rmvnorm(1,sample.mu,sample.V)
  d=as.vector(x-sample.mu)
  IV=solve(sample.V)
  D2=as.vector(d%*%IV%*%d)
  if(D2>=obs.D2) imore=imore+1
  cat('iter = ',i+1,'\r')
  flush.console()
 
}
cat('\n\n')
return(imore/nb)
 
}
 
# Below only runs 9 bootstraps to check.  Calling with no argument will run 99,999 bootstraps
 
johnson.typicality2(10)

Figure 3

figure.03= function () 
{
t = 1:20
Fst = seq(0.0,0.3,0.001)
g = 5
h2 = 0.35
f = function(t,Fst) 1/(1+(g-1)*exp(2*h2*g*t*Fst/(g*Fst-Fst-g+1)))
post.p = outer(t, Fst, f)
contour(t,Fst,post.p,xlab='Number of Traits',ylab=expression(F[ST]),
       levels=pretty(c(0,1),15))
lines(c(0,19),c(0,0))
text(19.3,0,'0.2',cex=0.6,vfont=c("sans serif", "plain"))
}
figure.03()
 

From the appendix - Derivation of equations 6 - 8 from equation 5 using wxMaxima (http://wxmaxima.sourceforge.net) calling Maxima (http://maxima.sourceforge.net)

 

wxMaxima 0.7.3a http://wxmaxima.sourceforge.net
Maxima 5.13.0 http://maxima.sourceforge.net
Using Lisp GNU Common Lisp (GCL) GCL 2.6.8 (aka GCL)
Distributed under the GNU Public License. See the file COPYING.
Dedicated to the memory of William Schelter.
This is a development version of Maxima. The function bug_report()
provides bug reporting information.

(%i4) 1/(1+(g-1)*exp(-2*g*t*F/((1-F)*(g-1))));

eqn6-8_0

(%i5) diff(1/((g-1)*%e^(-(2*g*t*F)/((g-1)*(1-F)))+1), t);

eqn6-8_1

(%i6) diff(1/((g-1)*%e^(-(2*g*t*F)/((g-1)*(1-F)))+1), F);

eqn6-8_2

(%i7) ratsimp(-(g-1)*(-(2*g*t*F)/((g-1)*(1-F)^2)-(2*g*t)/((g-1)*(1-F))));

eqn6-8_3

(%i8) diff(1/((g-1)*%e^(-(2*g*t*F)/((g-1)*(1-F)))+1), g);

eqn6-8_4

(%i10) ratsimp(-(g-1)*((2*g*t*F)/((g-1)^2*(1-F))-(2*t*F)/((g-1)*(1-F)))-1);

eqn6-8_5

(%i11)


Created with wxMaxima.

 

From the appendix – Simplification in equation 10

 (%i1) exp(-((x^2-x+1/4)*8*t*F/(1-F))/2)/(exp(-((x^2-x+1/4)*8*t*F/(1-F))/2)+exp(-((x^2+x
 +1/4)*8*t*F/(1-F))/2));

eqn10_0

(%i2) ratsimp(%e^(-(4*t*(x^2-x+1/4)*F)/(1-F))/(%e^(-(4*t*(x^2+x+1/4)*F)/(1-F))+%e^(-(4*t*(x^2
 -x+1/4)*F)/(1-F))));

eqn10_1

(%i3)


Created with wxMaxima.

 

From the appendix – Derivation of equations 11 – 13.

 

 (%i1) 1/(1+exp(8*t*F*x/(F-1)));

eqn11-13_0

(%i2) diff(1/(%e^((8*t*x*F)/(F-1))+1), x);

eqn11-13_1

(%i3) diff(1/(%e^((8*t*x*F)/(F-1))+1), t);

eqn11-13_2

(%i4) diff(1/(%e^((8*t*x*F)/(F-1))+1), F);

eqn11-13_3

(%i5) ratsimp(-((8*t*x)/(F-1)-(8*t*x*F)/(F-1)^2));

eqn11-13_4

(%i6)


Created with wxMaxima.