[Swift-commit] r3070 - in SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain: . Rscripts
noreply at svn.ci.uchicago.edu
noreply at svn.ci.uchicago.edu
Thu Aug 6 11:03:58 CDT 2009
Author: andric
Date: 2009-08-06 11:03:58 -0500 (Thu, 06 Aug 2009)
New Revision: 3070
Added:
SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/
SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/peakfitv2v1.R
SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/preprocessEnewsmooth.R
SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/subqrymediator.R
Log:
R code for doing peak analysis
Added: SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/peakfitv2v1.R
===================================================================
--- SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/peakfitv2v1.R (rev 0)
+++ SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/peakfitv2v1.R 2009-08-06 16:03:58 UTC (rev 3070)
@@ -0,0 +1,385 @@
+# ----------------------------------------------------------------------
+# Savitzky-Golay Algorithm
+# ----------------------------------------------------------------------
+# T2 <- sav.gol(T, fl, forder, dorder);
+#
+# Polynomial filtering method of Savitzky and Golay
+# See Numerical Recipes, 1992, Chapter 14.8, for details.
+#
+# T = vector of signals to be filtered
+# (the derivative is calculated for each ROW)
+# fl = filter length (for instance fl = 51..151)
+# forder = filter order (2 = quadratic filter, 4= quartic)
+# dorder = derivative order (0 = smoothing, 1 = first derivative, etc.)
+#
+sav.gol <- function(T, fl, forder, dorder)
+{
+ m <- length(T)
+ dorder <- dorder + 1
+
+ # -- calculate filter coefficients --
+ fc <- (fl-1)/2 # index: window left and right
+ X <- outer(-fc:fc, 0:forder, FUN="^") # polynomial terms and
+coefficients
+ Y <- pinv(X); # pseudoinverse
+
+ # -- filter via convolution and take care of the end points --
+ T2 <- convolve(T, rev(Y[dorder,]), type="o") # convolve(...)
+ T2 <- T2[(fc+1):(length(T2)-fc)]
+}
+#-----------------------------------------------------------------------
+# *** PseudoInvers of a Matrix ***
+# using singular value decomposition
+#
+pinv <- function (A)
+{
+ s <- svd(A)
+ s$v %*% diag(1/s$d) %*% t(s$u)
+}
+#-----------------------------------------------------------------------
+#
+#
+#------------gamma function used in peakfit-------------------------------------------
+#
+# | x-a1 | | (x-a1)/a2 + a3 -1 |^(a3-1)
+# y = a0*exp|- -------|*| ------------------ |
+# | a2 | | a3-1 |
+# a0 = amplitude
+# a1 = center
+# a2 = width (>0)
+# a3 = shape (> 1.01, < 167.92)
+#
+
+f_peakfit <- function(a0,a1,a2,a3,n,x) {
+ expr <- rep(c(0),length(x));
+ for (i in 1:n)
+ {
+ tmp <- rep(c(0),length(x))
+ tmp <- a0[i]*exp(-(x - a1[i])/a2[i])*((x - a1[i])/(a2[i]*(a3[i] - 1)) + 1 )^(a3[i] - 1);
+# when { b[i]/shape*(x -center[i]) + 1 } < 0, the power is meaningless. tmp[j] is "NaN" and will be replaced by zero
+ for (j in 1:length(x)) {if (tmp[j] == "NaN") {tmp[j] = 0} }
+ expr <- expr + tmp
+ }
+ eval(expr)
+}
+
+
+# define the function (a kind of simplified version from above) to be used to fit the data
+#
+# y = a*exp(-b*(x - center))*(b/shape*(x - center) + 1)^(shape)
+#
+# a = amplitude
+# b = decay parameter (correspond to 1/a2)
+# center = a1;
+# shape = a3 - 1; later shape will be fixed as 8.6, which is a common number used in most of fMRI study
+#
+#
+# -------------------------------------------------------------------------------------------
+# the following is a summation of gmma functions to fit raw data
+# assumption: linear system
+# x is the time points
+# n is the number of peaks detected by second derivatives
+# a, b and center are the parameters defined above and need to be estimated
+# a, b and center are vectors with length corresponding to the number of peaks
+#
+
+
+# shape is fixed at 8.6 in all the following analysis
+shape = 8.6
+f <- function(a,b,center,n,x) {
+ shape = 8.6;
+ expr <- rep(c(0),length(x));
+ for (i in 1:n)
+ {
+ tmp <- rep(c(0),length(x))
+ tmp <- a[i]*exp(-b[i]*(x - center[i]))*(b[i]/shape*(x -center[i]) + 1 )^shape
+# when { b[i]/shape*(x -center[i]) + 1 } < 0, the power is meaningless. tmp[j] is "NaN" and will be replaced by zero
+ for (j in 1:length(x)) {if (tmp[j] == "NaN") {tmp[j] = 0} }
+ expr <- expr + tmp
+ }
+ eval(expr)
+}
+
+# linear square fit, minmize funtion 0.5*|y - y_hat|^2.
+# Use as an input function for constraint estimation
+fcn_L2 <- function(par,n,x,y_and_noise,fcall) {
+ a <- par[1:n]
+ b <- par[(n+1):(2*n)]
+ center <- par[(2*n+1):(3*n)]
+ new_par <- c(list(a = a),list(b = b),list(center = center))
+ res <- (y_and_noise - do.call("fcall",c( as.list(new_par), list(n = n), list(x = x) ) ));
+ 0.5*t(res) %*% res
+ }
+
+
+# define the function (a kind of simplified version from above) to be used to fit the data
+#
+# y = a*exp(-b*(x - center))*(b/shape*(x - center) + 1)^(shape)
+#
+# a = amplitude
+# b = decay parameter (correspond to 1/a2)
+# center = a1;
+# shape = a3 - 1; later shape will be fixed as 8.6, which is a common number used in most of fMRI study
+#
+# df_a = exp(-b*(x - center))*(b/shape*(x - center) + 1)^(shape)
+# df_b = a*(-(x-center))*exp(-b*(x - center))*(b/shape*(x - center) + 1)^(shape) + a*exp(-b*(x - center))*shape*(b/shape*(x - center) + 1)^(shape-1)*(x - center)/shape
+# df_center = a*b*exp(-b*(x - center))*(b/shape*(x - center) + 1)^(shape) + a*exp(-b*(x - center))*shape*(b/shape*(x - center) + 1)^(shape-1)*(-b/shape)
+
+D_fcn_L2 <- function(par,n,x,y_and_noise,fcall) {
+ df_a <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ df_b <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ df_center <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ a <- par[1:n]
+ b <- par[(n+1):(2*n)]
+ center <- par[(2*n+1):(3*n)]
+ new_par <- c(list(a = a),list(b = b),list(center = center))
+ for (i in 1:n) {
+ df_a[,i] <- exp(-b[i]*(x - center[i]))*(b[i]/shape *(x -center[i]) + 1 )^shape
+ df_b[,i] <- a[i]*(-(x - center[i]))*exp(-b[i]*(x - center[i]))*(b[i]/shape *(x -center[i]) + 1 )^shape + a[i]*exp(-b[i]*(x - center[i]))*shape*(b[i]/shape*(x -center[i]) + 1 )^(shape - 1)*(x - center[i])/shape
+ df_center[,i] <- a[i]*b[i]*exp(-b[i]*(x - center[i]))*(b[i]/shape *(x -center[i]) + 1 )^shape + a[i]*exp(-b[i]*(x - center[i]))*shape*(b[i]/shape*(x -center[i]) + 1 )^(shape -1 )*(-b[i]/shape)
+ }
+ tmp <- cbind(df_a,df_b,df_center)
+ for (i in 1:(dim(tmp)[1]) ) {
+ for (j in 1:(dim(tmp)[2]) ) {
+ if (tmp[i,j] == "NaN") {tmp[i,j] = 0}
+ }
+ }
+ j_f <- tmp
+ y_eval <- do.call("fcall",c(list(a = a), list(b = b), list(center = center), list(n = n), list(x = x)))
+ t(j_f) %*% (y_eval - y_and_noise)
+ }
+
+#---- deriv 3-------------------------------------------------------------
+D_individual_f <- deriv3(
+ ~a*exp(-b*(x - center))*(b/shape*(x -center) + 1 )^shape,
+ c("a","b","center"),
+ function(a,b,center,x,shape) {})
+
+test_D_fcn_L2 <- function(par,n,x,y_and_noise,fcall) {
+ shape = 8.6
+ df_a <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ df_b <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ df_center <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ a <- par[1:n]
+ b <- par[(n+1):(2*n)]
+ center <- par[(2*n+1):(3*n)]
+ new_par <- c(list(a = a),list(b = b),list(center = center))
+ for (i in 1:n) {
+ tmp <- D_individual_f(a[i],b[i],center[i],x,shape)
+ g <- attr(tmp,"gradient")
+ df_a[,i] <- g[,1];
+ df_b[,i] <- g[,2];
+ df_center[,i] <- g[,3];
+ }
+ tmp <- cbind(df_a,df_b,df_center)
+ for (i in 1:(dim(tmp)[1]) ) {
+ for (j in 1:(dim(tmp)[2]) ) {
+ if (tmp[i,j] == "NaN") {tmp[i,j] = 0}
+ }
+ }
+ j_f <- tmp
+ y_eval <- do.call("fcall",c(list(a = a), list(b = b), list(center = center), list(n = n), list(x = x)))
+ t(j_f) %*% (y_eval - y_and_noise)
+ }
+#----------------------------------------------------------------------
+
+# 2nd derivative
+test_D2_fcn_L2 <- function(par,n,x,y_and_noise,fcall) {
+ shape = 8.6
+ df_a <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ df_b <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ df_center <- matrix(rep(c(0),n*length(x)),ncol = n, nrow = length(x))
+ a <- par[1:n]
+ b <- par[(n+1):(2*n)]
+ center <- par[(2*n+1):(3*n)]
+ new_par <- c(list(a = a),list(b = b),list(center = center))
+ f <- do.call("fcall",c(list(a = a), list(b = b), list(center = center), list(n = n), list(x = x))) - y_and_noise
+
+ for (i in 1:n) {
+ tmp <- D_individual_f(a[i],b[i],center[i],x,shape)
+ g <- attr(tmp,"gradient")
+ df_a[,i] <- g[,1];
+ df_b[,i] <- g[,2];
+ df_center[,i] <- g[,3];
+ }
+ tmp <- cbind(df_a,df_b,df_center)
+ for (i in 1:(dim(tmp)[1]) ) {
+ for (j in 1:(dim(tmp)[2]) ) {
+ if (tmp[i,j] == "NaN") {tmp[i,j] = 0}
+ }
+ }
+ j_f <- tmp
+ term1 <- t(j_f) %*% j_f
+# 1st term in the equation calculated, shall be a 3n x 3n matrix
+
+ f_D2f = matrix(rep(c(0),9*n^2),ncol = 3*n,nrow = 3*n);
+ for (i in 1:n) {
+ tmp <- D_individual_f(a[i],b[i],center[i],x,shape)
+ h <- colSums(attr(tmp,"hessian")*f,1)
+ f_D2f[i,i] = h[1,1];
+ f_D2f[n+i,i] = f_D2f[i,n+i] = h[1,2];
+ f_D2f[2*n+i,i] = f_D2f[i,2*n+i] = h[1,3];
+ f_D2f[n+i,n+i] = h[2,2];
+ f_D2f[2*n+i,n+i] = f_D2f[n+i,2*n+i] = h[2,3];
+ f_D2f[2*n+i,2*n+i] = h[3,3];
+ }
+ for (i in 1:(dim(f_D2f)[1]) ) {
+ for (j in 1:(dim(f_D2f)[2]) ) {
+ if (f_D2f[i,j] == "NaN") {f_D2f[i,j] = 0}
+ }
+ }
+
+ term1 + f_D2f
+ }
+
+#----------------------------------------------------------------------------------------
+# plot fitted curve with observed one
+plot.est <- function(par,fcall,n,x,col) {
+ a <- par[1:n]
+ b <- par[(n+1):(2*n)]
+ center <- par[(2*n+1):(3*n)]
+ new_par <- c(list(a = a),list(b = b),list(center = center))
+ y_est <- do.call("fcall",c( as.list(new_par), list(n = n), list(x = x) ))
+ lines(x,y_est,col = col,lwd = 2)
+ }
+
+# plot individual fitted curve
+plot.est.individual <- function(par,fcall,n,x,col) {
+ a <- par[1:n]
+ b <- par[(n+1):(2*n)]
+ center <- par[(2*n+1):(3*n)]
+ for (i in 1:n)
+ {
+ new_par <- c(list(a = a[i]),list(b = b[i]),list(center = center[i]))
+ y_est <- do.call("fcall",c( as.list(new_par), list(n = 1), list(x = x) ))
+ lines(x,y_est,col = col,lwd = 1)
+ }
+}
+
+
+# fit summary
+
+# fit statistical summary for constraint estimation
+est.stats.summary <- function(par,sse,x,y) {
+ n_x = length(x); # the number of observations
+ m = length(par); # the number of parameters
+ sse # sum of squares due to error
+ ave_y = mean(y); # the mean of observation
+ ssm = 0.5* t(y - ave_y) %*% (y - ave_y); # the sum of square about mean
+ r2 = 1 - sse/ssm; # R square, coefficient of determination
+ dof = n_x - m; # degree of freedom
+ r2_adjusted = 1 - sse*(n_x-1)/(ssm*(dof-1)); # adjusted R square
+ mse = sse/dof; # the mean square error
+ se = sqrt(mse); # the standard error of fit, the root mse
+ msr = (ssm - sse)/(m - 1); # mean square regression
+ f = msr/mse; # F-statistics
+ c(r2,dof,r2_adjusted,f,se)
+# c("r2" = r2,"DF" = dof,"Adj r2" = r2_adjusted,"F" = f,"standard error of fit" = se)
+ }
+
+# fit summary includes two fitting parameters (amplitude and centerfwhm and analytical area under the curve
+# -----fwhm------------
+# fwhm ~ 2.35*b^(1/2)*c
+# b & c are from gamma variate function x^b*exp(-t/c)
+# this approximation is found in ref "Event-related fMRI Contast When using Constant Interstimulus Interval: Theory and Experiment", pubished in Magnetic Resonance in Medicine 43:540-548 (2000)
+# this approximation result is a little bit different from that from peakfit
+# in our code,shape parameter is fixed at 8.6 & width related parameter is b
+# fwhm ~ 2.35*sqrt(8.6)/b
+# -----area under the curve-------
+# after some derivations, area under the curve is a/b*exp(8.6)*8.6^(-8.6)*gamma(8.6+1);
+# the derivation was confirmed with peakfit result
+est.summary_1 <- function(fcall,par,sse,n,x,y) {
+ a <- par[1:n] # estimated a
+ b <- par[(n+1):(2*n)] # estimated b
+ center <- par[(2*n+1):(3*n)] # estimated center
+# --calculate standard error of fit
+ n_x = length(x); # the number of observations
+ m = length(par); # the number of parameters
+ dof = n_x - m; # degree of freedom
+ sse # sum of squares due to error
+ mse = sse/dof; # the mean square error
+ se = sqrt(mse); # the standard error of fit, the root mse
+#---standard error of parameters, se * 1/sqrt(diag(hessian matrix of 0.5*|y-y_hat|^2))
+ std_par <- se * 1/sqrt(diag(test_D2_fcn_L2(par,n,x,y,f))) # std error of estimated parameters
+ std_a <- std_par[1:n];
+ std_b <- std_par[(n+1):(2*n)];
+ std_center <- std_par[(2*n+1):(3*n)];
+#---calculate fwhm, analytical area and percent area------
+ shape <- 8.6
+ fwhm <- matrix(c(0),ncol = 1,nrow = n);
+ area <- matrix(c(0),ncol = 1,nrow = n);
+ percent_area <- matrix(c(0),ncol = 1,nrow = n); for (i in 1:n) {
+ fwhm[i] <- 2.35*sqrt(shape)/b[i];
+ area[i] <- a[i]/b[i]*exp(shape)*shape^(-shape)*gamma(shape+1);
+ }
+ percent_area <- area/sum(area)*100;
+#---need to double check---------------------
+ std_fwhm <- std_b * 2.35*sqrt(8.6) * 1/sqrt(b^4);
+#---calculate partial F stats for each peak
+ y_est_col <- matrix(c(0),ncol = n, nrow = n_x);
+ for (i in 1:n)
+ {
+ new_par <- c(list(a = a[i]),list(b = b[i]),list(center = center[i]));
+ y_est_col[,i] <- do.call("fcall",c( as.list(new_par), list(n = 1), list(x = x) )); # each col corresponds to each peak fitted ts
+ }
+
+ p_partial <- matrix(c(0),ncol = 1, nrow = n);
+ for (i in 1:n)
+ {
+ res_wo_i <- rowSums(y_est_col[,-i]) - y; # residual without ith peak
+ ssr <- 0.5*res_wo_i %*% res_wo_i; # sum square of error of residual
+ dof_diff <- 3; # degree of freedom difference between full model and reduced model (i.e. without ith peak)
+ F_partial <- ((ssr - sse)/dof_diff)/(sse/dof); # partial F stats for ith peak
+ p_partial[i] <- 1 - pf(F_partial,dof_diff,dof);
+ }
+
+#---summary of individual results
+ summary <- data.frame(a,std_a,center,std_center,fwhm,std_fwhm,area,percent_area,p_partial);
+ name_of_row <- paste("Peak",1:n,sep="");
+ name_of_col <- c("Amplitude","e_Amplitude","Center","e_Center","FWHM","e_FWHM","Area","% Area","p_partial");
+ dimnames(summary) <- list(name_of_row,name_of_col);
+ summary
+ }
+
+est.summary <- function(par,sse,n,x,y) {
+ a <- par[1:n] # estimated a
+ b <- par[(n+1):(2*n)] # estimated b
+ center <- par[(2*n+1):(3*n)] # estimated center
+# --calculate standard error of fit
+ n_x = length(x); # the number of observations
+ m = length(par); # the number of parameters
+ dof = n_x - m; # degree of freedom
+ sse # sum of squares due to error
+ mse = sse/dof; # the mean square error
+ se = sqrt(mse); # the standard error of fit, the root mse
+#---standard error of parameters, se * 1/sqrt(diag(hessian matrix of 0.5*|y-y_hat|^2))
+ std_par <- se * 1/sqrt(diag(test_D2_fcn_L2(par,n,x,y,f))) # std error of estimated parameters
+ std_a <- std_par[1:n];
+ std_b <- std_par[(n+1):(2*n)];
+ std_center <- std_par[(2*n+1):(3*n)];
+#---calculate fwhm, analytical area and percent area------
+ shape <- 8.6
+ fwhm <- matrix(c(0),ncol = 1,nrow = n);
+ area <- matrix(c(0),ncol = 1,nrow = n);
+ percent_area <- matrix(c(0),ncol = 1,nrow = n); for (i in 1:n) {
+ fwhm[i] <- 2.35*sqrt(shape)/b[i];
+ area[i] <- a[i]/b[i]*exp(shape)*shape^(-shape)*gamma(shape+1);
+ }
+ percent_area <- area/sum(area, na.rm=T)*100;
+#---need to double check---------------------
+ std_fwhm <- std_b * 2.35*sqrt(8.6) * 1/sqrt(b^4);
+#---onset ---------------
+ onset <- center - shape/b;
+#---summary of individual results
+ summary <- data.frame(a,std_a,center,std_center,fwhm,std_fwhm,area,percent_area,onset);
+ name_of_row <- paste("Peak",1:n,sep="");
+ name_of_col <- c("Amplitude","e_Amplitude","Center","e_Center","FWHM","e_FWHM","Area","% Area","onset");
+ dimnames(summary) <- list(name_of_row,name_of_col);
+ summary
+ }
+
+
+
+
+
+
Property changes on: SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/peakfitv2v1.R
___________________________________________________________________
Name: svn:executable
+ *
Added: SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/preprocessEnewsmooth.R
===================================================================
--- SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/preprocessEnewsmooth.R (rev 0)
+++ SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/preprocessEnewsmooth.R 2009-08-06 16:03:58 UTC (rev 3070)
@@ -0,0 +1,76 @@
+preprocess_1 <- function(input) {
+
+ # read file (read.table) and save as vector (as.matrix)
+ original_y <- input*1000;
+
+ # original # of points before interpolation
+ original_x <- c(1:length(original_y));
+
+ #---------calculate 2nd derivative to find peaks------------------------------------
+ #
+ # windowlength argument n
+ # p is order of polynimal
+ # D is the order of derivative
+
+ windowlength <- 3 # 3 data points covered
+ # tested 4 data points, detail info might be lost
+ order_polynomial <- 2; # includes constant, linear and quadratic terms
+ order_derivative <- 2; # need the information of second derivatives
+
+ derivative_original_y <- sav.gol(original_y, fl = windowlength, forder = order_polynomial, dorder = order_derivative);
+
+
+ # guess # of possible peaks by counting negative y derivatives and corresponding amplitude of y > noise level
+ noise_level <- 0; # so far noise level is set to 0
+
+ # guess peak location: use all negative second derivatives or use local min of second derivative
+ # peak_location = original_x[derivative_original_y < 0 & original_y > noise_level];
+
+ tmp <- rep(c(0),length(original_x));
+ for ( i in 2:(length(original_x)-1) ) {
+ if ( (derivative_original_y[i] < derivative_original_y[i-1]) & (derivative_original_y[i] < derivative_original_y[i+1]) )
+ { tmp[i] = i;}
+ }
+ peak_location = original_x[tmp > 0];
+
+ possible_peaks = length(peak_location)
+
+ #-------------------------------------------------------------------------------------
+
+
+ #-----------------interpolation data---------------------------------------------------
+ #
+ # always needs # of parameters < length of observation
+ # in the case that 2nd derivative detect many peaks, interpolation shall be used
+ stim_res = 2 # sympliy double the time points
+
+ interpolation <- ksmooth(original_x,original_y,"normal",bandwidth = 1,x.points = seq(1,length(original_x),by = 1/stim_res));
+
+ #derivative_interpolation <- sav.gol(interpolation$y, fl = windowlength, forder = order_polynomial, dorder = order_derivative);
+ # calculatae new peak location
+ new_peak_location = stim_res*(peak_location - 1) + 1;
+
+ # calculatae new peak amplitude
+
+ peak_amplitude = rep(c(0),possible_peaks);
+ for (i in 1:possible_peaks ) { peak_amplitude[i] = interpolation$y[new_peak_location[i]] }
+
+ #--------------------initializaation----------------------------------------
+ #
+ # parameter a corresponds to peak amplitude
+ a_guess <- peak_amplitude;
+
+ # parameter b corresponds to 1/peak width
+ b_guess <- rep(c(length(original_y)/possible_peaks), possible_peaks);
+
+ #real_center_guess <- interpolation$x[new_peak_location];
+ #center_guess <- b_guess*real_center_guess;
+ center_guess <- interpolation$x[new_peak_location];
+
+ guess <- c(a_guess,b_guess,center_guess);
+ n <- possible_peaks;
+
+ out <- list(guess = guess,n = n,interpolated_x = interpolation$x,interpolated_y = interpolation$y);
+ out
+}
+
\ No newline at end of file
Property changes on: SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/preprocessEnewsmooth.R
___________________________________________________________________
Name: svn:executable
+ *
Added: SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/subqrymediator.R
===================================================================
--- SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/subqrymediator.R (rev 0)
+++ SwiftApps/SIDGrid/swift/projects/andric/peakfit_pilots/PK2/wholebrain/Rscripts/subqrymediator.R 2009-08-06 16:03:58 UTC (rev 3070)
@@ -0,0 +1,64 @@
+library(stats)
+source("Rscripts/preprocessEnewsmooth.R")
+source("Rscripts/peakfitv2v1.R")
+
+inputfilename <- Sys.getenv("R_INPUT")
+print(inputfilename)
+allargs <- Sys.getenv("R_SWIFT_ARGS")
+print(allargs)
+outobjectname <- noquote(strsplit(allargs," ")[[1]][2])
+print(outobjectname)
+outputPDFfilename <- paste(outobjectname,"pdf",sep="")
+outputAfilename <- paste(outobjectname,"a",sep="")
+outputBfilename <- paste(outobjectname,"b",sep="")
+
+dd <- as.matrix(read.table(inputfilename))
+dd <- data.frame(dd[1,])
+print(length(dd[,1]))
+
+## taking the mean of the TS to be the baseline
+quickbase <- mean(dd[,1])
+adjusted.input = dd[,1]-quickbase
+print(paste("starting res_preprocess ::: ",date(),sep=""))
+res_preprocess <- preprocess_1(input = adjusted.input);
+
+print(paste("moving on to test_nlminb ::: ",date(),sep=""))
+
+test2_res_D2_nlminb <- nlminb(
+ start = res_preprocess$guess,
+ objective = fcn_L2,
+ gradient = test_D_fcn_L2,
+#hessian = test_D2_fcn_L2,
+ control = list(eval.max = 50000, iter.max = 5000),
+# the lower line with fewer iterations is just for testing purposes. the upper line is the real one
+# control = list(eval.max = 100, iter.max = 50),
+ lower = rep(c(0),length(res_preprocess$guess)),
+ fcall = f,
+ n = res_preprocess$n,
+ x = res_preprocess$interpolated_x,
+ y_and_noise = res_preprocess$interpolated_y)
+
+pdf(file=outputPDFfilename)
+op <- par(mfrow=c(1,1))
+plot(res_preprocess$interpolated_x,res_preprocess$interpolated_y,xlab = "",ylab = "BOLD (1E-3)",type = "b");
+plot.est(par = test2_res_D2_nlminb$par, fcall = f, n = res_preprocess$n, res_preprocess$interpolated_x,col = "blue");
+plot.est.individual(par = test2_res_D2_nlminb$par, fcall = f, n = res_preprocess$n, res_preprocess$interpolated_x,col = "blue");
+dev.off()
+par(op)
+
+result1<- est.stats.summary(
+ par = test2_res_D2_nlminb$par,
+ sse = test2_res_D2_nlminb$objective,
+ x = res_preprocess$interpolated_x,
+ y = res_preprocess$interpolated_y)
+
+result2 <- est.summary(
+ par = test2_res_D2_nlminb$par,
+ sse = test2_res_D2_nlminb$objective,
+ n = res_preprocess$n,
+ x = res_preprocess$interpolated_x,
+ y = res_preprocess$interpolated_y)
+
+
+write.table(result1,outputAfilename,row.name = F,col.name = F)
+write.table(result2,outputBfilename,row.name = F,col.name = F)
More information about the Swift-commit
mailing list