Bioops

Bioinformatics=(ACGAAG->AK)+(#!/bin/sh)+(P(A|B)=P(B|A)*P(A)/P(B))

Demo of Classification

| Comments

R code demo of

  1. Linear discriminant analysis (LDA)
  2. Quadratic discriminant analysis (QDA)
  3. k-nearest neighbor (KNN)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# read data
library(RCurl)
data <- getURL("https://raw.githubusercontent.com/bioops/mis_scripts/master/statistics/data/admission.txt")
admission<-read.table(text=data,header=T)
admission$CLASS<-as.factor(admission$CLASS)

# scale the data
admission$GMAT<-scale(admission$GMAT)
admission$GPA<-scale(admission$GPA)

# (1) LDA
library(MASS)
lda.fit<-lda(CLASS~GMAT+GPA,data=admission)
lda.pred<-predict(lda.fit)
# plot
# different symbols represents true classifications
# different colors are predicted classifications
plot(admission[,1:2],col=lda.pred$class, pch=as.numeric(admission$CLASS),
     xlab="GPA",ylab="GMAT",main="LDA")

LDA

1
2
3
4
5
6
7
8
# (2) QDA
qda.fit<-qda(CLASS~GMAT+GPA,data=admission)
qda.pred<-predict(qda.fit)
# plot
# different symbols represents true classifications
# different colors are predicted classifications
plot(admission[,1:2],col=qda.pred$class, pch=as.numeric(admission$CLASS),
     xlab="GPA",ylab="GMAT",main="QDA")

QDA

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
library(class)

# tune parameter using cross-validation (CV)
k<-seq(1,15,1) # different k
cv.err<-NULL # cv error
for (ki in k){
  knn.pred.cv<-knn.cv(admission[,1:2],admission[,3],k=ki)
  cv.err<-c(cv.err, mean(knn.pred.cv!=admission[,3]))
}
plot(k,cv.err, main="CV error vs k")

# using the optimal parameter
knn.pred<-knn(admission[,1:2],admission[,1:2], admission[,3],k=k[which.min(cv.err)])

# plot
# different symbols represents true classifications
# different colors are predicted classifications
plot(admission[,1:2],col=knn.pred, pch=as.numeric(admission$CLASS),
     xlab="GPA",ylab="GMAT",main="KNN")

KNN

Solving Bridge Regression Using Local Quadratic Approximation (LQA)

| Comments

Bridge regression is a broad class of penalized regression, and can be used in high-dimensional regression problems.

It includes the ridge (q=2) and lasso (q =1) as special cases.

More technical details can be found here. Below R code demonstrates:

  1. sovling bridge regression using local quadratic approximation (LQA) and Newton–Raphson algorithm.
  2. simulation of tuning parameters using 50/50/200 observations (training/validation/testing).
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# The function to solve bridge regression
bridge<-function(x, y, lambda, q=1, eta=0.001){
  library(glmnet)
  # use ridge coefficients as a starting value
  beta.start<-coef(glmnet(x, y, alpha=0, lambda=lambda, intercept=F))
  beta.mat<-matrix(NA,ncol=ncol(beta.start),nrow=nrow(beta.start)-1)

  # take each lambda value in the grid
  for(i in 1:length(grid)){
    # initial beta without intercept
    beta_prev<-as.vector(beta.start[-1,i])
    # initial converge
    converge<-10^10

    # iteration until converge or two many iterations
    iteration<-0
    while(converge>eta && iteration<=100){
      iteration<-iteration+1
      # judge whether some beta is small enough
      del<-which(abs(beta_prev)<eta)

      # if all coefficient are small enough then stop the iteration
      if(length(del)==length(beta_prev)){
        beta_prev[del]<-0
        converge<-0

        # else if we need to remove some but not all of the coefficients  
      }else if(length(del)>0){
        # set these beta to 0
        beta_prev[del]<-0

        # update design matrix x
        x.new<-x;x.new<-x.new[,-del]

        # calculate the diagonal matrix involving penalty terms
        # and the next beta
        if(length(beta_prev)-length(del)==1){
          diag<-grid[i]*q*(abs(beta_prev[-del])^(q-2))/2
        }else{
          diag<-diag(grid[i]*q*(abs(beta_prev[-del])^(q-2))/2)
        }

        # next beta
        beta_curr<-beta_prev
        beta_curr[-del]<-solve(t(x.new)%*%x.new+diag)%*%t(x.new)%*%y

        # new converge value
        converge<-sum((beta_curr-beta_prev)^2)
        # next iteration
        beta_prev<-beta_curr

        # if we don't need to remove the coefficients
      }else{
        x.new<-x
        diag<-diag(grid[i]*q*(abs(beta_prev)^(q-2))/2)
        # next beta
        beta_curr<-solve(t(x.new)%*%x.new+diag)%*%t(x.new)%*%y

        # new converge value
        converge<-sum((beta_curr-beta_prev)^2)
        # next iteration
        beta_prev<-beta_curr

      }
    }

    beta.mat[,i]<-beta_prev
  }
  colnames(beta.mat)<-grid
  return(beta.mat)
}

# The function to find the optimal coefficients set
# that minimize mse of validation set
bridge.opt<-function(coef.matrix, newx, newy){
  # calculate the mse
  mse.validate<-NULL
  for(i in 1:ncol(coef.matrix)){
    mse.validate<-c(mse.validate, mean((newx%*%coef.matrix[,i]-newy)^2))
  }
  # find the optimal coefficients set
  coef.opt<-coef.matrix[,which.min(mse.validate)]
  return(coef.opt)
}


##################
# start simulation
# For this simulation, create three data sets 
# consisting of 50/50/200 observations (training/validation/testing).
# Use validation data to select the tuning parameter (lambda).


# q settings
q_seq<-c(0.1,0.5,1,2)
# lambda grid
grid=10^seq(10,-2,length=100)
# training and testing set
ntrain<-50
ntest<-200
# validation set size
nvalidate<-50

# repeat number
repeatnum<-100
# initialize MSE
MSE<-matrix(NA, nrow=repeatnum,ncol=length(q_seq))
colnames(MSE)<-q_seq

library(MASS)
# the beta
beta<-matrix(c(3,1.5,0,0,2,0,0,0),ncol=1)
# covariance matrix of X
cov<-matrix(NA,8,8)
for (i in 1:8){
  for (j in 1:8){
    cov[i,j]<-0.5^(abs(i-j))
  }
}

for (i in 1:repeatnum){
  # generate X
  x.train<-mvrnorm(n=ntrain,rep(0,8),cov)
  x.test <-mvrnorm(n=ntest,rep(0,8),cov)
  x.validate <-mvrnorm(n=nvalidate,rep(0,8),cov)
  # generate error term
  err.train<-matrix(rnorm(n=ntrain,mean=0,sd=3),ncol=1)
  err.test<-matrix(rnorm(n=ntest,mean=0,sd=3),ncol=1)
  err.validate<-matrix(rnorm(n=nvalidate,mean=0,sd=3),ncol=1)
  # calculate Y
  y.train<-x.train%*%beta+err.train
  y.test<-x.test%*%beta+err.test
  y.validate<-x.validate%*%beta+err.validate
  # centralize Y and standardize X
  y.train<-scale(y.train,scale=F);y.test<-scale(y.test,scale=F)
  y.validate<-scale(y.validate,scale=F)
  x.train<-scale(x.train);x.test<-scale(x.test)
  x.validate<-scale(x.validate)

  # run for each q
  for(j in 1:length(q_seq)){

    # coefficients for trainning set
    bridge.train<-bridge(x.train, y.train, grid, q=q_seq[j], eta=0.001)
    # find the optimal coefficients set that minimize mse of validation set
    coef.opt<-bridge.opt(bridge.train, x.validate, y.validate)
    # MSE on the test set using the optimal model
    MSE[i,j]<-mean((x.test%*%coef.opt-y.test)^2)
  }
}

# mean of MSEs under different models
apply(MSE,2,mean)
# sd of MSEs under different models
apply(MSE,2,sd)
# boxplot of MSEs under different models
boxplot(MSE, ylab="MSE", xlab="q")

bridge MSE plots

The Consistent Estimator of Bernouli Distribution

| Comments

This is a simple post showing the basic knowledge of statistics, the consistency.

For Bernoulli distribution, $ Y \sim B(n,p) $, $ \hat{p}=Y/n $ is a consistent estimator of $ p $, because:

for any positive number $ \epsilon $.

Here is the simulation to show the estimator is consitent.

1
2
3
4
5
6
7
8
9
# set parameters
n<-1000;p<-0.5;
# n Bernoulli trails
obs<-rbinom(n,1,p)
# estimate p on different number of trials.
phat<-cumsum(obs)/cumsum(rep(1,n))
# the convergence plot
plot(phat, type="l", xlab="Trails")
abline(h=p)

convergence plot

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# then, 100 repetitions

# set parameters
n<-1000;p<-0.5;B<-100;
# n*B Bernoulli trails
obs<-rbinom(n*B,1,p)
# convert n*B observations to a n*B matrix
obs_mat<-matrix(obs, nrow=n, ncol=B)
# a function to estimate p on different number of trials
est_p<-function(x,n) cumsum(x)/cumsum(rep(1,n))
# estimate p on different number of trials for each repetition
phat_mat<-apply(obs_mat,2, est_p, n=n)
# the convergence plot with 100 repetitions
matplot(phat_mat,type="l",lty=1,xlab="Trials",ylab="phat")

convergence plots

Permutation Test for Principal Component Analysis

| Comments

The procedure of permutation test for PCA is as follows:

For each replicate,

  1. Individually permute each column of the data matrix.

  2. Conduct the PCA and find the proportion of variance explained by each of the components 1 to s. Store this information.

  3. Repeat 1 and 2 R times.

At the end of this we will have a matrix with R rows and s columns that contains the proportion of variance explained by each component for each replicate.

Finally, compare the observed values from the original data to the set of values from the permutations in order to determine the approximate p-value.

The R code:

pca_perm.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# the fuction to assess the significance of the principal components.
sign.pc<-function(x,R=1000,s=10, cor=T,...){
  # run PCA
  pc.out<-princomp(x,cor=cor,...)
  # the proportion of variance of each PC
  pve=(pc.out$sdev^2/sum(pc.out$sdev^2))[1:s]

  # a matrix with R rows and s columns that contains
  # the proportion of variance explained by each pc
  # for each randomization replicate.
  pve.perm<-matrix(NA,ncol=s,nrow=R)
  for(i in 1:R){
    # permutation each column
    x.perm<-apply(x,2,sample)
    # run PCA
    pc.perm.out<-princomp(x.perm,cor=cor,...)
    # the proportion of variance of each PC.perm
    pve.perm[i,]=(pc.perm.out$sdev^2/sum(pc.perm.out$sdev^2))[1:s]
  }
  # calcalute the p-values
  pval<-apply(t(pve.perm)>pve,1,sum)/R
  return(list(pve=pve,pval=pval))
}


# apply the function
library(RCurl)
data <- getURL("https://raw.githubusercontent.com/bioops/mis_scripts/master/statistics/data/pca.txt")
OCRdata <- read.table(text = data, header=T,sep="\t")
OCRdat<-OCRdata[,-1] #leave out location id column
sign.pc(OCRdat,cor=T)

The result:

$pve
    Comp.1     Comp.2     Comp.3     Comp.4     Comp.5     Comp.6     Comp.7     Comp.8 
0.23129378 0.14864525 0.11552865 0.06741744 0.06274641 0.05858431 0.05033795 0.04484122 
    Comp.9    Comp.10 
0.03873311 0.03431297 

$pval
 [1] 0.000 0.000 0.000 1.000 1.000 0.996 1.000 1.000 1.000 1.000

Demo of Support Vector Machine

| Comments

Demo of SVM

svm.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# load library
library(e1071)

# simulate x and y
x1<-rnorm(100);x2<-rnorm(100)
y<-as.factor(ifelse(x1^2+x2^2<=1.6,1,-1))
dat3<-data.frame(x1,x2,y)

# (a) tuning parameters
cost<-c(0.001, 0.01, 0.1, 1, 5, 10, 100)
gamma<-seq(0.1,1,0.1)
tune.out<-tune(svm, y~., data=dat3, kernel="radial",
              ranges=list(cost=cost,gamma=gamma))
bestmod<-tune.out$best.model
# the best model
summary(bestmod)

# (b) test set
# simulate test set
x1<-rnorm(100);x2<-rnorm(100)
y<-as.factor(ifelse(x1^2+x2^2<=1.6,1,-1))
dat3.test<-data.frame(x1,x2,y)
ypred<-predict(bestmod,dat3.test)
# the confusion matrix
table(predict=ypred, truth=dat3.test$y)

Linear Regression With Cross Validation

| Comments

Cross validation for linear model and the bootstrap confidence interval for coefficients

linear_CV.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# (a) Linear model

# read data
library(RCurl)
data <- getURL("https://raw.githubusercontent.com/bioops/mis_scripts/master/statistics/data/prostateData.txt")
prostate <- read.table(text = data, header=T,sep="\t")
# remove the first column
prostate<-prostate[,-1]
# run the linear model
prostate_lm<-lm(lpsa~.,data=prostate)

# summary of the fit
summary(prostate_lm)


# (b) Cross-validation

# need boot package
library(boot)
# fit the linear model
glm.fit<-glm(lpsa~lcavol+lweight+age+lbph+svi+lcp+gleason+pgg45,data=prostate)

# LOOCV
cv.glm(prostate, glm.fit)$delta[1]

# 10-fold CV
cv.err.10<-rep(0,10)
for(i in 1:10){
  cv.err.10[i]<-cv.glm(prostate, glm.fit, K=10)$delta[1]
}

# mean error of 10-fold CV
mean(cv.err.10)


# (c) Bootstrap

# get ther residuals and fitted values from linear model
prostate_new<-data.frame(prostate, res=resid(prostate_lm), fitted=fitted(prostate_lm))
# a function to get the coefficients from each bootstrap
prostate.fun<-function(data, i){
  d<-data
  d$lpsa<-d$fitted+d$res[i]
  coef(update(prostate_lm, data=d))
}
# bootstrap
prostate.lm.boot<-boot(prostate_new, prostate.fun, R=1000)

# 95% conficence level for lcavol
boot.ci(prostate.lm.boot, index=2)
# 95% conficence level for lweight
boot.ci(prostate.lm.boot, index=3)

Estimate Gamma Distribution Parmaters Using MME and MLE

| Comments

This post shows how to estimate gamma distribution parameters using (a) moment of estimation (MME) and (b) maximum likelihood estimate (MLE).

The probability density function of Gamma distribution is

The MME:

We can calculate the MLE of $ \alpha $ using the Newton-Raphson method.

For $ k =1,2,…,$

where

Use the MME for the initial value of $ \alpha^{(0)} $, and stop the approximation when $ \vert \hat{\alpha}^{(k)}-\hat{\alpha}^{(k-1)} \vert < 0.0000001 $. The MLE of $ \beta $ can be found by $ \hat{\beta} = \bar{X} / \hat{\alpha} $.

Below is the R code.

gamma.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# (a) MME
gamma_MME<-function(x){
  n<-length(x)
  mean_x<-mean(x)
  alpha<-n*(mean_x^2)/sum((x-mean_x)^2)
  beta<-sum((x-mean_x)^2)/n/mean_x
  estimate_MME<-data.frame(alpha,beta)
  return(estimate_MME)
}


# (b) MLE
gamma_MLE<-function(x){
  n<-length(x)
  mean_x<-mean(x)

  # initiate the convergence and alpha value
  converg<-1000
  alpha_prev<-gamma_MME(x)$alpha

  # initiate two vectors to store alpha and beta in each step
  alpha_est<-alpha_prev
  beta_est<-mean_x/alpha_prev

  # Newton-Raphson
  while(converg>0.0000001){
    #first derivative of alpha_k-1
    der1<-n*log(alpha_prev/mean_x)-n*digamma(alpha_prev)+sum(log(x))
    #second derivative of alpha_k-1
    der2<-n/alpha_prev-n*trigamma(alpha_prev)
    #calculate next alpha
    alpha_next<-alpha_prev-der1/der2
    # get the convergence value
    converg<-abs(alpha_next-alpha_prev)
    # store estimators in each step
    alpha_est<-c(alpha_est, alpha_next)
    beta_est<-c(beta_est, mean_x/alpha_next)
    # go to next alpha
    alpha_prev<-alpha_next
  }

  alpha<-alpha_next
  beta<-mean_x/alpha_next
  estimate_MLE<-data.frame(alpha,beta)

  return(estimate_MLE)
}

# apply
x<-rgamma(100,2,scale=5)
gammma_MME(x)
gamma_MLE(x)

2015

| Comments

Happy new year!

Hopefully, I will write and code more often in 2015.

Stay tuned!

CFDA Approved BGI’s Next Generation Sequencing Diagnostic Products

| Comments

只收集一些相关资料,不评论。 http://www.sda.gov.cn/WS01/CL0051/102239.html 2014年6月30日,国家食品药品监督管理总局经审查,批准了BGISEQ-1000基因测序仪、BGISEQ-100基因测序仪和胎儿染色体非整倍体(T21、T18、T13)检测试剂盒(联合探针锚定连接测序法)、胎儿染色体非整倍体(T21、T18、T13)检测试剂盒(半导体测序法)医疗器械注册。这是国家食品药品监督管理总局首次批准注册的第二代基因测序诊断产品。

该批产品可通过对孕周12周以上的高危孕妇外周血血浆中的游离基因片段进行基因测序,对胎儿染色体非整倍体疾病21-三体综合征、18-三体综合征和13-三体综合征进行无创产前检查和辅助诊断。

http://www.knowgene.com/question/677 BGISEQ-1000基因测序仪基于Complete Genomics平台,配套的试剂盒为胎儿染色体非整倍体(T21、T18、T13)检测试剂盒(联合探针锚定连接测序法)。CG平台的特点是通量高,但周期较长,因此BGISEQ-1000应该主要会应用于全国范围内的样品,集中测序分析;

BGISEQ-100基因测序仪基于Ion Torrent平台,配套的试剂盒为胎儿染色体非整倍体(T21、T18、T13)检测试剂盒(半导体测序法)。Ion Torrent平台的特点是测序周期短,可灵活部署,BGISEQ-100有很大可能会被部署到有一定业务量的大中型医院,就地采样、测序、分析并出具报告.