diff --git a/bf_inverse_ebb.m b/bf_inverse_ebb.m index 53dd622..2a1686f 100644 --- a/bf_inverse_ebb.m +++ b/bf_inverse_ebb.m @@ -21,8 +21,8 @@ corr.name = 'Correlated & homologous sources'; corr.help = {['Prior matrix is modified so to account for power in a location '... 'and its correlated partner, allowing for correlated sources normally suppressed '... - 'by beamformers to be reconstructed. Correlated pairs can be definied in a matrix'... - '(see below) or automatically guessed by looking for the mirror along the saggital plane.']}; + 'by beamformers to be reconstructed. Correlated pairs can be defined in a matrix'... + '(see below) or automatically guessed by looking for the mirror along the sagittal plane.']}; corr.labels = {'yes','no'}; corr.values = {true, false}; corr.val = {false}; @@ -51,7 +51,7 @@ mixmethod.name = 'Prior combination method'; mixmethod.help = {['How should we combine the correlated and uncorrelated '... 'priors (assuming we have both) +SUM: simple addition of the two '... - 'priors to make one matrix. +REML: Two seperate priors, where the ReML'... + 'priors to make one matrix. +REML: Two separate priors, where the ReML'... 'optimisation will scale them automatically. (Default: sum)']}; mixmethod.labels = {'sum','reml'}; mixmethod.values = {'sum','reml'}; @@ -70,7 +70,7 @@ iid = cfg_menu; iid.tag = 'iid'; iid.name = 'Identity source covariance'; - iid.help = {['Assumes sources are indepedent and identically distributed, equivalent to a Bayesian '... + iid.help = {['Assumes sources are independent and identically distributed, equivalent to a Bayesian '... 'minimum norm estimation. This option bypasses correlated source mode']}; iid.labels = {'yes','no'}; iid.values = {true, false}; @@ -135,7 +135,7 @@ S.corr = true; end -% Inital setup +% Initial setup %---------------------------------------------------------------- C = BF.features.(S.modality).C; @@ -160,7 +160,7 @@ % Check to see if tdcov has been used, warn model evidence scores may not % behave as expected ntrials = size(BF.data.D,3); -if Nn > ntrials*16 % maxmium possible samples using tdcov +if Nn > ntrials*16 % maximum possible samples using tdcov warning(['covariance matrix not generated using tdcov - '... 'model evidence values may not scale as expected!']) Nn = 1; @@ -269,7 +269,7 @@ 'file is the one you want me to use, please only have one in there!']); eval(['pairs = X.' flds{1} ';']); % Check it has the correct size - assert(length(pairs)==nvert,'size of pairs mat doesnt correspond to number of sources'); + assert(length(pairs)==nvert,'size of pairs mat doesn''t correspond to number of sources'); % % need to check if its symmetric - and make this fail if so. % % (except for the condition of an indetity matrix) % tmp = pairs - pairs'; @@ -295,9 +295,9 @@ if ~isnan(L{i}) if isempty(S.pairs) - % We are looking for sources in the mirror of the saggital + % We are looking for sources in the mirror of the sagittal % plane, which means flipping the position in the x-axis and - % looking for the source which is the closest (Which isnt + % looking for the source which is the closest (Which isn't % itself!) target = [-pos(i,1) pos(i,2) pos(i,3)]; del = bsxfun(@minus,pos,target); @@ -307,8 +307,8 @@ else % If using the pairs matrix, there are three possible % outcomes: - % 1) Nothing to be found to corellate to - % 2) It finds itself (useful if we need a contolled mix + % 1) Nothing to be found to correlate to + % 2) It finds itself (useful if we need a controlled mix % of correlated and uncorrelated sources). % 3) Finds other source(s) to correlate with. id = find(pairs(i,:)); @@ -336,7 +336,7 @@ % allocate to corresponding locations in array % count how many times a point comes up (should be twice but - % could be less/more depeding on asymmetry in source space). + % could be less/more depending on asymmetry in source space). if ondiag pow_dual(i,i) = pow_dual(i,i) + pow2_tmp; count(i,i) = count(i,i) + 1; @@ -364,7 +364,7 @@ end % correct power for the fact that a) having two sets of lead fields - % doubles the power and b) some locations may have been visted more + % doubles the power and b) some locations may have been visited more % than once,stops this skewing the covariance too much in the % correlated sources favour. idx = find(count); @@ -421,8 +421,8 @@ hP(1) = -5; % assumes IID noise is 1/100th of signal. hC(1) = 1e-64; -% Step 2: optional room noise: WARNING this hasnt been tested for -% compatibilty with multiple source priors yet! +% Step 2: optional room noise: WARNING this hasn't been tested for +% compatibility with multiple source priors yet! if ~isempty(S.noise) if iscell(S.noise) @@ -456,7 +456,7 @@ % ReML to optimise the weighted combination of covariances to % best match the original sensor covariance. Comes in two flavours, -% STRICT: where hyperpriors and precisions have been predfined earlier +% STRICT: where hyperpriors and precisions have been predefined earlier % LOOSE: hyperpriors have free reign, similar to SPM's inversions %------------------------------------------------------------------ switch lower(S.reml) @@ -466,7 +466,7 @@ case 'loose' fprintf('Using ReML: Loose hyperprior settings\n'); % Need to add a final extra term here to allow ReML to not run into - % trouble, a fixed (co)variance componenent which is ~1/100 the + % trouble, a fixed (co)variance component which is ~1/100 the % magnitude of the sensor covariance. Q0 = exp(-5)*trace(C)*Qe{1}; [Cy,h,~,F,Fa,Fc]= bf_reml_sc(C,[],[Qe LQpL],Nn,-4,16,Q0); diff --git a/bf_output_PLI.m b/bf_output_PLI.m index 6527994..4dd6dbd 100644 --- a/bf_output_PLI.m +++ b/bf_output_PLI.m @@ -138,7 +138,7 @@ if isfield(BF.sources, 'voi') montage.labelnew = BF.sources.voi.label; elseif isfield(S, 'vois') - % collect labels from voi and maks definitions + % collect labels from voi and mask definitions if isfield(S.vois{1}, 'voidef') montage.labelnew{1} = S.vois{v}.voidef.label; elseif isfield(S.vois{1}, 'maskdef') diff --git a/bf_output_image_cfGLM.m b/bf_output_image_cfGLM.m index 597cd33..1adeec8 100644 --- a/bf_output_image_cfGLM.m +++ b/bf_output_image_cfGLM.m @@ -567,7 +567,7 @@ B2=squeeze(mean(mean(Beta(:,:,:,2,:),2),1)); B3=squeeze(mean(mean(Beta(:,:,:,3,:),2),1)); -% seond level stats +% second level stats for i=1:nvert V=[]; @@ -639,4 +639,4 @@ % end -res = image; \ No newline at end of file +res = image; \ No newline at end of file diff --git a/bf_output_image_mv.m b/bf_output_image_mv.m index 39dd7eb..fdcb949 100644 --- a/bf_output_image_mv.m +++ b/bf_output_image_mv.m @@ -228,7 +228,7 @@ end if min(num_trials)~= max(num_trials) - warning ('Number of trials are not the same accross conditions- throwing away'); + warning ('Number of trials are not the same across conditions- throwing away'); num_trials end nt = min(num_trials); % ANNA -throw away the extra trials diff --git a/bf_output_montage.m b/bf_output_montage.m index f86e325..52a7a34 100644 --- a/bf_output_montage.m +++ b/bf_output_montage.m @@ -91,7 +91,7 @@ if isfield(BF.sources, 'voi') montage.labelnew = BF.sources.voi.label; elseif isfield(S, 'vois') - % collect labels from voi and maks definitions + % collect labels from voi and mask definitions for v = 1:numel(S.vois) if isfield(S.vois{v}, 'voidef') montage.labelnew{v} = S.vois{v}.voidef.label; diff --git a/bf_sources.m b/bf_sources.m index 6d0608a..421bc9b 100644 --- a/bf_sources.m +++ b/bf_sources.m @@ -54,7 +54,7 @@ visualise = cfg_menu; visualise.tag = 'visualise'; visualise.name = 'Visualise head model and sources'; -visualise.help = {'Visualise head model and sourses to verify that everythin was done correctly'}; +visualise.help = {'Visualise head model and sources to verify that everything was done correctly'}; visualise.labels = {'yes', 'no'}; visualise.values = {true, false}; visualise.val = {true}; diff --git a/private/GALA_find_localmin.m b/private/GALA_find_localmin.m index 949fd3d..515f0ca 100644 --- a/private/GALA_find_localmin.m +++ b/private/GALA_find_localmin.m @@ -63,7 +63,7 @@ % add boundary vertices to appropriate cluster A0=Aip-eye(length(Aip)); % direct neighbors matrix - for i=1:length(bound) % loop by all boudary vertices + for i=1:length(bound) % loop by all boundary vertices neib = find(A0(:,bound(i))); % direct neighbors of current verex diff --git a/private/GALA_invert.m b/private/GALA_invert.m index 606b810..5155935 100644 --- a/private/GALA_invert.m +++ b/private/GALA_invert.m @@ -180,7 +180,7 @@ % different subjects, so correlated between subjects part of covarince % prior is overlapping but not identical patches - % pay attention - it's to exclude disconected vertices from Jcov + % pay attention - it's to exclude disconnected vertices from Jcov % it seems ssQ1 = spones(sQ1) works better than simple sQ1 % may be because in Jcov.*sQ1 there is double attenuation of tails diff --git a/private/bf_reml_sc.m b/private/bf_reml_sc.m index cb79295..09fe7ba 100644 --- a/private/bf_reml_sc.m +++ b/private/bf_reml_sc.m @@ -100,12 +100,12 @@ try, hE = hE(:); catch, hE = -32; end try, hP = spm_inv(hC); catch, hP = 1/256; end -% check sise +% check size %-------------------------------------------------------------------------- if length(hE) < m, hE = hE(1)*ones(m,1); end if length(hP) < m, hP = hP(1)*speye(m,m); end -% intialise h: so that sum(exp(h)) = 1 +% initialise h: so that sum(exp(h)) = 1 %-------------------------------------------------------------------------- if any(diag(hP) > exp(16)) h = hE; diff --git a/private/champagne_aug2015.m b/private/champagne_aug2015.m index 3749d03..91801d5 100644 --- a/private/champagne_aug2015.m +++ b/private/champagne_aug2015.m @@ -25,7 +25,7 @@ % Two parameters in code that can be adjusted are: % eps1 = Default is 1e-8 For numerical stability while inverting model covariance matrix. % eps1z = Default is 1e-8 For numerical stability while calculating -% voxel variance and auxilliary variable z +% voxel variance and auxiliary variable z function [gamma,x,w,sigu,like]=champagne(y,f,sigu,nem,nd,vcs,nupd,gamma0,retx, fig); diff --git a/private/get_components.m b/private/get_components.m index d3d724e..6ea5d22 100644 --- a/private/get_components.m +++ b/private/get_components.m @@ -4,9 +4,9 @@ % [comps,comp_sizes] = get_components(adj); % % Returns the components of an undirected graph specified by the binary and -% undirected adjacency matrix adj. Components and their constitutent nodes are +% undirected adjacency matrix adj. Components and their constituent nodes are % assigned the same index and stored in the vector, comps. The vector, comp_sizes, -% contains the number of nodes beloning to each component. +% contains the number of nodes belonging to each component. % % Inputs: adj, binary and undirected adjacency matrix % diff --git a/private/get_data_features.m b/private/get_data_features.m index 59d57ca..dd59363 100644 --- a/private/get_data_features.m +++ b/private/get_data_features.m @@ -2,12 +2,12 @@ function [trialfeatures,vedata]=get_data_features(flatpepdata,Nbins,Ntrials,weights,Tfull,datatype,featureind,regressout) %%% put data in form for mv test -%% options are datatye +%% options are datatype %% {'peakabs','peakreal','peakimag','peaksin','peakcos','sumpower'}; %% datatype=pwr power (re^2+comp^2) -%% datatpe=pwrsincos: power+sin and cos terms +%% datatype=pwrsincos: power+sin and cos terms %% datatype=4: -%% if weights==-1 just set up static variables depnding on datatype +%% if weights==-1 just set up static variables depending on datatype %% flagnames={'peakabs','peakreal','peakimag','peaksin','peakcos','sumpower'}; diff --git a/private/nut_dSPM.m b/private/nut_dSPM.m index ee0d251..27bdb71 100644 --- a/private/nut_dSPM.m +++ b/private/nut_dSPM.m @@ -1,7 +1,7 @@ function [weight]=nut_dSPM(Lp,data, flags) %--------------------------------------------------------- % [weight,eta]=nut_dSPM(Lp,data,flags) % Lp : lead field -% inputs for regularization contant: +% inputs for regularization constant: % [1] data.Ryy = sample covariance, for data-dependent regularization % [2] flags.gamma = user defined regularization constant, or 'auto' for % leadfield-based regularization diff --git a/private/nut_sLORETA.m b/private/nut_sLORETA.m index 9007556..0719a96 100644 --- a/private/nut_sLORETA.m +++ b/private/nut_sLORETA.m @@ -1,6 +1,6 @@ function [weight]=nut_sLORETA(Lp,data,flags) % weight=nut_sLORETA(Lp,data,flags) -% inputs for regularization contant: +% inputs for regularization constant: % [1] data.Ryy = sample covariance, for data-dependent regularization % [2] flags.gamma = user defined regularization constant, or 'auto' for % leadfield-based regularization