81 if (vec.size() % 3 != 0)
83 printf(
"Input must be a row or a column vector with 3N components");
87 MatrixXd tmp = MatrixXd(vec.transpose());
90 SparseMatrix<double> sC = *s*s->transpose();
91 VectorXd* comb =
new VectorXd(sC.rows());
93 for(qint32 i = 0; i < sC.rows(); ++i)
94 (*comb)[i] = sC.coeff(i,i);
105 JacobiSVD<MatrixXd> svd(A);
106 s = svd.singularValues();
108 double c = s.maxCoeff()/s.minCoeff();
118 JacobiSVD<MatrixXd> svd(A);
119 s = svd.singularValues();
121 double c = s.maxCoeff()/s.mean();
132 SelfAdjointEigenSolver<MatrixXd> t_eigenSolver(A);
134 eig = t_eigenSolver.eigenvalues();
135 eigvec = t_eigenSolver.eigenvectors().transpose();
137 MNEMath::sort<double>(eig, eigvec,
false);
140 for(qint32 i = 0; i < eig.size()-rnk; ++i)
143 printf(
"Setting small %s eigenvalues to zero.\n", ch_type.toLatin1().constData());
145 printf(
"Not doing PCA for %s\n", ch_type.toLatin1().constData());
148 printf(
"Doing PCA for %s.",ch_type.toLatin1().constData());
151 eigvec = eigvec.block(eigvec.rows()-rnk, 0, rnk, eigvec.cols());
160 std::vector<int> tmp;
162 std::vector< std::pair<int,int> > t_vecIntIdxValue;
165 for(qint32 i = 0; i < v1.size(); ++i)
166 tmp.push_back(v1[i]);
168 std::vector<int>::iterator it;
169 for(qint32 i = 0; i < v2.size(); ++i)
171 it = std::search(tmp.begin(), tmp.end(), &v2[i], &v2[i]+1);
173 t_vecIntIdxValue.push_back(std::pair<int,int>(v2[i], it-tmp.begin()));
176 std::sort(t_vecIntIdxValue.begin(), t_vecIntIdxValue.end(), MNEMath::compareIdxValuePairSmallerThan<int>);
178 VectorXi p_res(t_vecIntIdxValue.size());
179 idx_sel = VectorXi(t_vecIntIdxValue.size());
181 for(quint32 i = 0; i < t_vecIntIdxValue.size(); ++i)
183 p_res[i] = t_vecIntIdxValue[i].first;
184 idx_sel[i] = t_vecIntIdxValue[i].second;
257 qDebug() <<
"ToDo: Figure out how to accelerate MNEMath::issparse(VectorXd &v).";
263 for(qint32 i = 0; i < n; ++i)
298 qint32 ma = A.rows();
299 qint32 na = A.cols();
300 float bdn = ((float)na)/n;
306 printf(
"Width of matrix must be even multiple of n\n");
310 typedef Eigen::Triplet<double> T;
311 std::vector<T> tripletList;
312 tripletList.reserve(bdn*ma*n);
314 qint32 current_col, current_row, i, r, c;
315 for(i = 0; i < bdn; ++i)
318 current_row = i * ma;
320 for(r = 0; r < ma; ++r)
321 for(c = 0; c < n; ++c)
322 tripletList.push_back(T(r+current_row, c+current_col, A(r, c+current_col)));
325 SparseMatrix<double>* bd =
new SparseMatrix<double>((int)floor((
float)ma*bdn+0.5),na);
327 bd->setFromTriplets(tripletList.begin(), tripletList.end());
340 int t_iNumOfCombination = (int)(n*(n-1)*0.5);
342 return t_iNumOfCombination;
350 JacobiSVD<MatrixXd> t_svdA(A);
351 VectorXd s = t_svdA.singularValues();
352 double t_dMax = s.maxCoeff();
355 for(qint32 i = 0; i < s.size(); ++i)
356 sum += s[i] > t_dMax ? 1 : 0;
363 MatrixXd
MNEMath::rescale(
const MatrixXd &data,
const RowVectorXf ×, QPair<QVariant,QVariant> baseline, QString mode)
365 MatrixXd data_out = data;
366 QStringList valid_modes;
367 valid_modes <<
"logratio" <<
"ratio" <<
"zscore" <<
"mean" <<
"percent";
368 if(!valid_modes.contains(mode))
370 qWarning() <<
"\tWarning: mode should be any of : " << valid_modes;
373 printf(
"\tApplying baseline correction ... (mode: %s)\n", mode.toLatin1().constData());
378 if(!baseline.first.isValid())
382 bmin = baseline.first.toFloat();
383 for(qint32 i = 0; i < times.size(); ++i)
392 if (!baseline.second.isValid())
396 bmax = baseline.second.toFloat();
397 for(qint32 i = times.size()-1; i >= 0; --i)
407 VectorXd mean = data_out.block(0, imin,data_out.rows(),imax-imin).rowwise().mean();
408 if(mode.compare(
"mean") == 0)
410 data_out -= mean.rowwise().replicate(data.cols());
412 else if(mode.compare(
"logratio") == 0)
414 for(qint32 i = 0; i < data_out.rows(); ++i)
415 for(qint32 j = 0; j < data_out.cols(); ++j)
416 data_out(i,j) = log10(data_out(i,j)/mean[i]);
418 else if(mode.compare(
"ratio") == 0)
420 data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
422 else if(mode.compare(
"zscore") == 0)
424 MatrixXd std_mat = data.block(0, imin, data.rows(), imax-imin) - mean.rowwise().replicate(imax-imin);
425 std_mat = std_mat.cwiseProduct(std_mat);
426 VectorXd std_v = std_mat.rowwise().mean();
427 for(qint32 i = 0; i < std_v.size(); ++i)
428 std_v[i] = sqrt(std_v[i] / (
float)(imax-imin));
430 data_out -= mean.rowwise().replicate(data_out.cols());
431 data_out = data_out.cwiseQuotient(std_v.rowwise().replicate(data_out.cols()));
433 else if(mode.compare(
"percent") == 0)
435 data_out -= mean.rowwise().replicate(data_out.cols());
436 data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
static void get_whitener(MatrixXd &A, bool pca, QString ch_type, VectorXd &eig, MatrixXd &eigvec)
static VectorXd * combine_xyz(const VectorXd &vec)
static bool issparse(VectorXd &v)
static SparseMatrix< double > * make_block_diag(const MatrixXd &A, qint32 n)
static double getConditionNumber(const MatrixXd &A, VectorXd &s)
static VectorXi intersect(const VectorXi &v1, const VectorXi &v2, VectorXi &idx_sel)
static qint32 rank(const MatrixXd &A, double tol=1e-8)
MNEMath class declaration.
static MatrixXd rescale(const MatrixXd &data, const RowVectorXf ×, QPair< QVariant, QVariant > baseline, QString mode)
static MatrixXd legendre(qint32 n, const VectorXd &X, QString normalize=QString("unnorm"))
static int nchoose2(int n)
static double getConditionSlope(const MatrixXd &A, VectorXd &s)