@misc{KlugBergGramann, author = {Klug, Marius and Berg, Timotheus and Gramann, Klaus}, title = {No need for extensive artifact rejection for ICA - A multi-study evaluation on stationary and mobile EEG datasets}, series = {bioRxiv beta}, journal = {bioRxiv beta}, doi = {10.1101/2022.09.13.507772}, pages = {25}, abstract = {AbstractObjectiveElectroencephalography (EEG) studies increasingly make use of more ecologically valid experimental protocols involving mobile participants who actively engage with their environment (MoBI; Gramann et al., 2011). These mobile paradigms lead to increased artifacts in the recorded data that are often treated using Independent Component Analysis (ICA). When analyzing EEG data, especially in a mobile context, removing samples regarded as artifactual is a common approach before computing ICA. Automatic tools for this exist, such as the automatic sample rejection of the AMICA algorithm (Palmer et al., 2011), but the impact of the two factors movement intensity and the automatic sample rejection has not been systematically evaluated yet.ApproachWe computed AMICA decompositions on eight datasets from six open-access studies with varying degrees of movement intensities using increasingly conservative sample rejection criteria. We evaluated the subsequent decomposition quality in terms of the component mutual information, the amount of brain, muscle, and "other" components, the residual variance of the brain components, and an exemplary signal-to-noise ratio.Main resultsWe found that increasing movements of participants led to decreasing decomposition quality for individual datasets but not as a general trend across all movement intensities. The cleaning strength had less impact on decomposition results than anticipated, and moderate cleaning of the data resulted in the best decompositions.SignificanceOur results indicate that the AMICA algorithm is very robust even with limited data cleaning. Moderate amounts of cleaning such as 5 to 10 iterations of the AMICA sample rejection with 3 standard deviations as the threshold will likely improve the decomposition of most datasets, irrespective of the movement intensity.}, language = {en} } @misc{KrolPawlitzkiLotteetal., author = {Krol, Laurens R. and Pawlitzki, Juliane and Lotte, Fabien and Gramann, Klaus and Zander, Thorsten O.}, title = {SEREEGA: Simulating event-related EEG activity}, series = {Journal of Neuroscience Methods}, volume = {309}, journal = {Journal of Neuroscience Methods}, issn = {0165-0270}, doi = {10.1016/j.jneumeth.2018.08.001}, pages = {13 -- 24}, language = {en} } @misc{KlugJeungWunderlichetal., author = {Klug, Marius and Jeung, Sein and Wunderlich, Anna and Gehrke, Lukas and Protzak, Janna and Djebbara, Zakaria and Argubi-Wollesen, Andreas and Wollesen, Bettina and Gramann, Klaus}, title = {The BeMoBIL Pipeline for automated analyses of multimodal mobile brain and body imaging data}, series = {bioRxiv beta}, journal = {bioRxiv beta}, doi = {10.1101/2022.09.29.510051}, pages = {40}, abstract = {Advancements in hardware technology and analysis methods allow more and more mobility in electroencephalography (EEG) experiments. Mobile Brain/Body Imaging (MoBI) studies may record various types of data such as motion or eye tracking in addition to neural activity. Although there are options available to analyze EEG data in a standardized way, they do not fully cover complex multimodal data from mobile experiments. We thus propose the BeMoBIL Pipeline, an easy-to-use pipeline in MATLAB that supports the time-synchronized handling of multimodal data. It is based on EEGLAB and fieldtrip and consists of automated functions for EEG preprocessing and subsequent source separation. It also provides functions for motion data processing and extraction of event markers from different data modalities, including the extraction of eye-movement and gait-related events from EEG using independent component analysis. The pipeline introduces a new robust method for region-of-interest-based group-level clustering of independent EEG components. Finally, the BeMoBIL Pipeline provides analytical visualizations at various processing steps, keeping the analysis transparent and allowing for quality checks of the resulting outcomes. All parameters and steps are documented within the data structure and can be fully replicated using the same scripts. This pipeline makes the processing and analysis of (mobile) EEG and body data more reliable and independent of the prior experience of the individual researchers, thus facilitating the use of EEG in general and MoBI in particular. It is an open-source project available for download at https://github.com/BeMoBIL/bemobil-pipeline which allows for community-driven adaptations in the future.}, language = {en} } @misc{DelauxdeSaintAubertRamanoeletal., author = {Delaux, Alexandre and de Saint Aubert, Jean-Baptiste and Ramano{\"e}l, Stephen and B{\´e}cu, Marcia and Gehrke, Lukas and Klug, Marius and Chavarriaga, Ricardo and Sahel, Jos{\´e}-Alain and Gramann, Klaus and Arleo, Angelo}, title = {Mobile brain/body imaging of landmark-based navigation with high-density EEG}, series = {European Journal of Neuroscience}, volume = {54}, journal = {European Journal of Neuroscience}, number = {12}, issn = {0953-816X}, doi = {10.1111/ejn.15190}, pages = {8256 -- 8282}, abstract = {Coupling behavioral measures and brain imaging in naturalistic, ecological conditions is key to comprehend the neural bases of spatial navigation. This highly integrative function encompasses sensorimotor, cognitive, and executive processes that jointly mediate active exploration and spatial learning. However, most neuroimaging approaches in humans are based on static, motion-constrained paradigms and they do not account for all these processes, in particular multisensory integration. Following the Mobile Brain/Body Imaging approach, we aimed to explore the cortical correlates of landmark-based navigation in actively behaving young adults, solving a Y-maze task in immersive virtual reality. EEG analysis identified a set of brain areas matching state-of-the-art brain imaging literature of landmark-based navigation. Spatial behavior in mobile conditions additionally involved sensorimotor areas related to motor execution and proprioception usually overlooked in static fMRI paradigms. Expectedly, we located a cortical source in or near the posterior cingulate, in line with the engagement of the retrosplenial complex in spatial reorientation. Consistent with its role in visuo-spatial processing and coding, we observed an alpha-power desynchronization while participants gathered visual information. We also hypothesized behavior-dependent modulations of the cortical signal during navigation. Despite finding few differences between the encoding and retrieval phases of the task, we identified transient time-frequency patterns attributed, for instance, to attentional demand, as reflected in the alpha/gamma range, or memory workload in the delta/theta range. We confirmed that combining mobile high-density EEG and biometric measures can help unravel the brain structures and the neural modulations subtending ecological landmark-based navigation.}, language = {en} } @misc{GehrkeLopesKlugetal., author = {Gehrke, Lukas and Lopes, Pedro and Klug, Marius and Akman, Sezen and Gramann, Klaus}, title = {Neural sources of prediction errors detect unrealistic VR interactions}, series = {Journal of Neural Engineering}, volume = {19}, journal = {Journal of Neural Engineering}, number = {3}, issn = {1741-2560}, doi = {10.1088/1741-2552/ac69bc}, abstract = {Objective. Neural interfaces hold significant promise to implicitly track user experience. Their application in virtual and augmented reality (VR/AR) simulations is especially favorable as it allows user assessment without breaking the immersive experience. In VR, designing immersion is one key challenge. Subjective questionnaires are the established metrics to assess the effectiveness of immersive VR simulations. However, administering such questionnaires requires breaking the immersive experience they are supposed to assess. Approach. We present a complimentary metric based on a event-related potentials. For the metric to be robust, the neural signal employed must be reliable. Hence, it is beneficial to target the neural signal's cortical origin directly, efficiently separating signal from noise. To test this new complementary metric, we designed a reach-to-tap paradigm in VR to probe electroencephalography (EEG) and movement adaptation to visuo-haptic glitches. Our working hypothesis was, that these glitches, or violations of the predicted action outcome, may indicate a disrupted user experience. Main results. Using prediction error negativity features, we classified VR glitches with 77\% accuracy. We localized the EEG sources driving the classification and found midline cingulate EEG sources and a distributed network of parieto-occipital EEG sources to enable the classification success. Significance. Prediction error signatures from these sources reflect violations of user's predictions during interaction with AR/VR, promising a robust and targeted marker for adaptive user interfaces.}, language = {en} } @misc{OjedaKlugKreutzDelgadoetal., author = {Ojeda, Alejandro and Klug, Marius and Kreutz-Delgado, Kenneth and Gramann, Klaus and Mishra, Jyoti}, title = {A Bayesian framework for unifying data cleaning, source separation and imaging of electroencephalographic signals}, series = {bioRxiv beta}, journal = {bioRxiv beta}, doi = {10.1101/559450}, pages = {20}, abstract = {Electroencephalographic (EEG) source imaging depends upon sophisticated signal processing algorithms for data cleaning, source separation, and localization. Typically, these problems are addressed separately using a variety of heuristics, making it difficult to systematize a methodology for extracting robust EEG source estimates on a wide range of experimental paradigms. In this paper, we propose a unifying Bayesian framework in which these apparently dissimilar problems can be understood and solved in a principled manner using a single algorithm. We explicitly model the effect of non-brain sources by augmenting the lead field matrix with a dictionary of stereotypical artifact scalp projections. We propose to populate the artifact dictionary with non-brain scalp projections obtained by running Independent Component Analysis (ICA) on an EEG database. Within a parametric empirical Bayes (PEB) framework, we use an anatomical brain atlas to parameterize a source prior distribution that encourages sparsity in the number of cortical regions. We show that, in our inversion algorithm, PEB+ (PEB with the addition of artifact modeling), the sparsity prior has the property of inducing the segregation of the cortical activity into a few maximally independent components with known anatomical support. Artifacts produced by electrooculographic and electromyographic activity as well as single-channel spikes are also segregated into their respective components. Of theoretical relevance, we use our framework to point out the connections between Infomax ICA and distributed source imaging. We use real data to demonstrate that PEB+ outperforms Infomax for source separation on short segments of data and, unlike the popular Artifact Subspace Removal algorithm, it can reduce artifacts without significantly distorting clean epochs. Finally, we analyze mobile brain/body imaging data to characterize the brain dynamics supporting heading computation during full-body rotations. In this example, we run PEB+ followed by the spectral analysis of the activity in the retrosplenial cortex, largely replicating the findings of previous experimental literature.}, language = {en} } @misc{ZanderKrolBirbaumeretal., author = {Zander, Thorsten O. and Krol, Laurens R. and Birbaumer, Niels P. and Gramann, Klaus}, title = {Neuroadaptive technology enables implicit cursor control based on medial prefrontal cortex activity}, series = {Proceedings of the National Academy of Sciences}, volume = {113}, journal = {Proceedings of the National Academy of Sciences}, number = {52}, issn = {0027-8424}, doi = {10.1073/pnas.1605155114}, pages = {14898 -- 14903}, abstract = {The human brain continuously and automatically processes information concerning its internal and external context. We demonstrate the elicitation and subsequent detection and decoding of such "automatic interpretations" by means of context-sensitive probes in an ongoing human-computer interaction. Through a sequence of such probe-interpretation cycles, the computer accumulates responses over time to model the operator's cognition, even without that person being aware of it. This brings human cognition directly into the human-computer interaction loop, expanding traditional notions of "interaction." The concept introduces neuroadaptive technology—technology which automatically adapts to an estimate of its operator's mindset. This technology bears relevance to autoadaptive experimental designs, and opens up paradigm-shifting possibilities for human-machine systems in general.}, language = {en} } @misc{GramannHohlefeldGehrkeetal., author = {Gramann, Klaus and Hohlefeld, Friederike U. and Gehrke, Lukas and Klug, Marius}, title = {Human cortical dynamics during full-body heading changes}, series = {Scientific Reports}, volume = {11}, journal = {Scientific Reports}, number = {1}, issn = {2045-2322}, doi = {10.1038/s41598-021-97749-8}, abstract = {The retrosplenial complex (RSC) plays a crucial role in spatial orientation by computing heading direction and translating between distinct spatial reference frames based on multi-sensory information. While invasive studies allow investigating heading computation in moving animals, established non-invasive analyses of human brain dynamics are restricted to stationary setups. To investigate the role of the RSC in heading computation of actively moving humans, we used a Mobile Brain/Body Imaging approach synchronizing electroencephalography with motion capture and virtual reality. Data from physically rotating participants were contrasted with rotations based only on visual flow. During physical rotation, varying rotation velocities were accompanied by pronounced wide frequency band synchronization in RSC, the parietal and occipital cortices. In contrast, the visual flow rotation condition was associated with pronounced alpha band desynchronization, replicating previous findings in desktop navigation studies, and notably absent during physical rotation. These results suggest an involvement of the human RSC in heading computation based on visual, vestibular, and proprioceptive input and implicate revisiting traditional findings of alpha desynchronization in areas of the navigation network during spatial orientation in movement-restricted participants.}, language = {en} } @misc{HarmeningKlugGramannetal., author = {Harmening, Nils and Klug, Marius and Gramann, Klaus and Miklody, Daniel}, title = {HArtMuT—modeling eye and muscle contributors in neuroelectric imaging}, series = {Journal of Neural Engineering}, volume = {19}, journal = {Journal of Neural Engineering}, number = {6}, issn = {1741-2560}, doi = {10.1088/1741-2552/aca8ce}, abstract = {Objective. Magneto- and electroencephalography (M/EEG) measurements record a mix of signals from the brain, eyes, and muscles. These signals can be disentangled for artifact cleaning e.g. using spatial filtering techniques. However, correctly localizing and identifying these components relies on head models that so far only take brain sources into account. Approach. We thus developed the Head Artifact Model using Tripoles (HArtMuT). This volume conduction head model extends to the neck and includes brain sources as well as sources representing eyes and muscles that can be modeled as single dipoles, symmetrical dipoles, and tripoles. We compared a HArtMuT four-layer boundary element model (BEM) with the EEGLAB standard head model on their localization accuracy and residual variance (RV) using a HArtMuT finite element model (FEM) as ground truth. We also evaluated the RV on real-world data of mobile participants, comparing different HArtMuT BEM types with the EEGLAB standard head model. Main results. We found that HArtMuT improves localization for all sources, especially non-brain, and localization error and RV of non-brain sources were in the same range as those of brain sources. The best results were achieved by using cortical dipoles, muscular tripoles, and ocular symmetric dipoles, but dipolar sources alone can already lead to convincing results. Significance. We conclude that HArtMuT is well suited for modeling eye and muscle contributions to the M/EEG signal. It can be used to localize sources and to identify brain, eye, and muscle components. HArtMuT is freely available and can be integrated into standard software.}, language = {en} } @incollection{JungnickelGehrkeKlugetal., author = {Jungnickel, Evelyn and Gehrke, Lukas and Klug, Marius and Gramann, Klaus}, title = {MoBI—Mobile Brain/Body Imaging}, series = {Neuroergonomics : the brain at work and in everyday life}, booktitle = {Neuroergonomics : the brain at work and in everyday life}, editor = {Ayaz, Hasan and Dehais, Fr{\´e}d{\´e}ric}, publisher = {Elsevier}, isbn = {978-0-12-811927-3}, doi = {10.1016/B978-0-12-811926-6.00010-5}, pages = {59 -- 63}, language = {en} }