Grace Edwards; Céline Paeye; Philippe Marque; Rufin VanRullen; Patrick Cavanagh Predictive position computations mediated by parietal areas: TMS evidence Journal Article NeuroImage, 153 , pp. 49–57, 2017. @article{Edwards2017, title = {Predictive position computations mediated by parietal areas: TMS evidence}, author = {Grace Edwards and Céline Paeye and Philippe Marque and Rufin VanRullen and Patrick Cavanagh}, doi = {10.1016/j.neuroimage.2017.03.043}, year = {2017}, date = {2017-01-01}, journal = {NeuroImage}, volume = {153}, pages = {49--57}, abstract = {When objects move or the eyes move, the visual system can predict the consequence and generate a percept of the target at its new position. This predictive localization may depend on eye movement control in the frontal eye fields (FEF) and the intraparietal sulcus (IPS) and on motion analysis in the medial temporal area (MT). Across two experiments we examined whether repetitive transcranial magnetic stimulation (rTMS) over right FEF, right IPS, right MT, and a control site, peripheral V1/V2, diminished participants' perception of two cases of predictive position perception: trans-saccadic fusion, and the flash grab illusion, both presented in the contralateral visual field. In trans-saccadic fusion trials, participants saccade toward a stimulus that is replaced with another stimulus during the saccade. Frequently, predictive position mechanisms lead to a fused percept of pre- and post-saccade stimuli (Paeye et al., 2017). We found that rTMS to IPS significantly decreased the frequency of perceiving trans-saccadic fusion within the first 10 min after stimulation. In the flash grab illusion, a target is flashed on a moving background leading to the percept that the target has shifted in the direction of the motion after the flash (Cavanagh and Anstis, 2013). In the first experiment, the reduction in the flash grab illusion after rTMS to IPS and FEF did not reach significance. In the second experiment, using a stronger version of the flash grab, the illusory shift did decrease significantly after rTMS to IPS although not after rTMS to FEF or to MT. These findings suggest that right IPS contributes to predictive position perception during saccades and motion processing in the contralateral visual field.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When objects move or the eyes move, the visual system can predict the consequence and generate a percept of the target at its new position. This predictive localization may depend on eye movement control in the frontal eye fields (FEF) and the intraparietal sulcus (IPS) and on motion analysis in the medial temporal area (MT). Across two experiments we examined whether repetitive transcranial magnetic stimulation (rTMS) over right FEF, right IPS, right MT, and a control site, peripheral V1/V2, diminished participants' perception of two cases of predictive position perception: trans-saccadic fusion, and the flash grab illusion, both presented in the contralateral visual field. In trans-saccadic fusion trials, participants saccade toward a stimulus that is replaced with another stimulus during the saccade. Frequently, predictive position mechanisms lead to a fused percept of pre- and post-saccade stimuli (Paeye et al., 2017). We found that rTMS to IPS significantly decreased the frequency of perceiving trans-saccadic fusion within the first 10 min after stimulation. In the flash grab illusion, a target is flashed on a moving background leading to the percept that the target has shifted in the direction of the motion after the flash (Cavanagh and Anstis, 2013). In the first experiment, the reduction in the flash grab illusion after rTMS to IPS and FEF did not reach significance. In the second experiment, using a stronger version of the flash grab, the illusory shift did decrease significantly after rTMS to IPS although not after rTMS to FEF or to MT. These findings suggest that right IPS contributes to predictive position perception during saccades and motion processing in the contralateral visual field. |
Grace Edwards; Rufin VanRullen; Patrick Cavanagh Decoding trans-saccadic memory Journal Article Journal of Neuroscience, 38 (5), pp. 1114–1123, 2018. @article{Edwards2018, title = {Decoding trans-saccadic memory}, author = {Grace Edwards and Rufin VanRullen and Patrick Cavanagh}, doi = {10.1523/jneurosci.0854-17.2017}, year = {2018}, date = {2018-01-01}, journal = {Journal of Neuroscience}, volume = {38}, number = {5}, pages = {1114--1123}, abstract = {We examine whether peripheral information at a planned saccade target affects immediate post-saccadic processing at the fovea on saccade landing. Current neuroimaging research suggests that pre-saccadic stimulation has a late effect on post-saccadic processing, in contrast to the early effect seen in behavioral studies. Human participants (both male and female) were instructed to saccade toward a face or a house that, on different trials, remained the same, changed, or disappeared during the saccade. We used a multivariate pattern analysis (MVPA) of electroencephalography (EEG) data to decode face versus house processing directly after the saccade. The classifier was trained on separate trials without a saccade, where a house or face was presented at the fovea. When the saccade target remained the same across the saccade, we could reliably decode the target 123 ms after saccade offset. In contrast, when the target was changed during the saccade, the new target was decoded at a later time-point, 151 ms after saccade offset. The "same" condition advantage suggests that congruent pre-saccadic information facilitates processing of the post-saccadic stimulus compared to incongruent information. Finally, the saccade target could be decoded above chance even when it had been removed during the saccade, albeit with a slower time-course (162 ms) and poorer signal strength. These findings indicate that information about the (peripheral) pre-saccadic stimulus is transferred across the saccade so that it becomes quickly available and influences processing at its expected, new retinal position (the fovea).}, keywords = {}, pubstate = {published}, tppubtype = {article} } We examine whether peripheral information at a planned saccade target affects immediate post-saccadic processing at the fovea on saccade landing. Current neuroimaging research suggests that pre-saccadic stimulation has a late effect on post-saccadic processing, in contrast to the early effect seen in behavioral studies. Human participants (both male and female) were instructed to saccade toward a face or a house that, on different trials, remained the same, changed, or disappeared during the saccade. We used a multivariate pattern analysis (MVPA) of electroencephalography (EEG) data to decode face versus house processing directly after the saccade. The classifier was trained on separate trials without a saccade, where a house or face was presented at the fovea. When the saccade target remained the same across the saccade, we could reliably decode the target 123 ms after saccade offset. In contrast, when the target was changed during the saccade, the new target was decoded at a later time-point, 151 ms after saccade offset. The "same" condition advantage suggests that congruent pre-saccadic information facilitates processing of the post-saccadic stimulus compared to incongruent information. Finally, the saccade target could be decoded above chance even when it had been removed during the saccade, albeit with a slower time-course (162 ms) and poorer signal strength. These findings indicate that information about the (peripheral) pre-saccadic stimulus is transferred across the saccade so that it becomes quickly available and influences processing at its expected, new retinal position (the fovea). |
John M Egan; Gerard M Loughnane; Helen Fletcher; Emma Meade; Edmund C Lalor A gaze independent hybrid-BCI based on visual spatial attention Journal Article Journal of Neural Engineering, 14 (4), pp. 1–8, 2017. @article{Egan2017, title = {A gaze independent hybrid-BCI based on visual spatial attention}, author = {John M Egan and Gerard M Loughnane and Helen Fletcher and Emma Meade and Edmund C Lalor}, doi = {10.1088/1741-2552/aa6bb2}, year = {2017}, date = {2017-01-01}, journal = {Journal of Neural Engineering}, volume = {14}, number = {4}, pages = {1--8}, publisher = {IOP Publishing}, abstract = {Objective. Brain-computer interfaces (BCI) use measures of brain activity to convey a user's intent without the need for muscle movement. Hybrid designs, which use multiple measures of brain activity, have been shown to increase the accuracy of BCIs, including those based on EEG signals reflecting covert attention. Our study examined whether incorporating a measure of the P3 response improved the performance of a previously reported attention-based BCI design that incorporates measures of steady-state visual evoked potentials (SSVEP) and alpha band modulations. Approach. Subjects viewed stimuli consisting of two bi-laterally located flashing white boxes on a black background. Streams of letters were presented sequentially within the boxes, in random order. Subjects were cued to attend to one of the boxes without moving their eyes, and they were tasked with counting the number of target-letters that appeared within. P3 components evoked by target appearance, SSVEPs evoked by the flashing boxes, and power in the alpha band are modulated by covert attention, and the modulations can be used to classify trials as left-attended or right-attended. Main Results. We showed that classification accuracy was improved by including a P3 feature along with the SSVEP and alpha features (the inclusion of a P3 feature lead to a 9% increase in accuracy compared to the use of SSVEP and Alpha features alone). We also showed that the design improves the robustness of BCI performance to individual subject differences. Significance. These results demonstrate that incorporating multiple neurophysiological indices of covert attention can improve performance in a gaze-independent BCI.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objective. Brain-computer interfaces (BCI) use measures of brain activity to convey a user's intent without the need for muscle movement. Hybrid designs, which use multiple measures of brain activity, have been shown to increase the accuracy of BCIs, including those based on EEG signals reflecting covert attention. Our study examined whether incorporating a measure of the P3 response improved the performance of a previously reported attention-based BCI design that incorporates measures of steady-state visual evoked potentials (SSVEP) and alpha band modulations. Approach. Subjects viewed stimuli consisting of two bi-laterally located flashing white boxes on a black background. Streams of letters were presented sequentially within the boxes, in random order. Subjects were cued to attend to one of the boxes without moving their eyes, and they were tasked with counting the number of target-letters that appeared within. P3 components evoked by target appearance, SSVEPs evoked by the flashing boxes, and power in the alpha band are modulated by covert attention, and the modulations can be used to classify trials as left-attended or right-attended. Main Results. We showed that classification accuracy was improved by including a P3 feature along with the SSVEP and alpha features (the inclusion of a P3 feature lead to a 9% increase in accuracy compared to the use of SSVEP and Alpha features alone). We also showed that the design improves the robustness of BCI performance to individual subject differences. Significance. These results demonstrate that incorporating multiple neurophysiological indices of covert attention can improve performance in a gaze-independent BCI. |
Ciara Egan; Filipe Cristino; Joshua S Payne; Guillaume Thierry; Manon W Jones How alliteration enhances conceptual–attentional interactions in reading Journal Article Cortex, 124 , pp. 111–118, 2020. @article{Egan2020, title = {How alliteration enhances conceptual–attentional interactions in reading}, author = {Ciara Egan and Filipe Cristino and Joshua S Payne and Guillaume Thierry and Manon W Jones}, doi = {10.1016/j.cortex.2019.11.005}, year = {2020}, date = {2020-01-01}, journal = {Cortex}, volume = {124}, pages = {111--118}, publisher = {Elsevier Ltd}, abstract = {In linguistics, the relationship between phonological word form and meaning is mostly considered arbitrary. Why, then, do literary authors traditionally craft sound relationships between words? We set out to characterise how dynamic interactions between word form and meaning may account for this literary practice. Here, we show that alliteration influences both meaning integration and attentional engagement during reading. We presented participants with adjective-noun phrases, having manipulated semantic relatedness (congruent, incongruent) and form repetition (alliterating, non-alliterating) orthogonally, as in “dazzling-diamond”; “sparkling-diamond”; “dangerous-diamond”; and “creepy-diamond”. Using simultaneous recording of event-related brain potentials and pupil dilation (PD), we establish that, whilst semantic incongruency increased N400 amplitude as expected, it reduced PD, an index of attentional engagement. Second, alliteration affected semantic evaluation of word pairs, since it reduced N400 amplitude even in the case of unrelated items (e.g., “dangerous-diamond”). Third, alliteration specifically boosted attentional engagement for related words (e.g., “dazzling-diamond”), as shown by a sustained negative correlation between N400 amplitudes and PD change after the window of lexical integration. Thus, alliteration strategically arouses attention during reading and when comprehension is challenged, phonological information helps readers link concepts beyond the level of literal semantics. Overall, our findings provide a tentative mechanism for the empowering effect of sound repetition in literary constructs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In linguistics, the relationship between phonological word form and meaning is mostly considered arbitrary. Why, then, do literary authors traditionally craft sound relationships between words? We set out to characterise how dynamic interactions between word form and meaning may account for this literary practice. Here, we show that alliteration influences both meaning integration and attentional engagement during reading. We presented participants with adjective-noun phrases, having manipulated semantic relatedness (congruent, incongruent) and form repetition (alliterating, non-alliterating) orthogonally, as in “dazzling-diamond”; “sparkling-diamond”; “dangerous-diamond”; and “creepy-diamond”. Using simultaneous recording of event-related brain potentials and pupil dilation (PD), we establish that, whilst semantic incongruency increased N400 amplitude as expected, it reduced PD, an index of attentional engagement. Second, alliteration affected semantic evaluation of word pairs, since it reduced N400 amplitude even in the case of unrelated items (e.g., “dangerous-diamond”). Third, alliteration specifically boosted attentional engagement for related words (e.g., “dazzling-diamond”), as shown by a sustained negative correlation between N400 amplitudes and PD change after the window of lexical integration. Thus, alliteration strategically arouses attention during reading and when comprehension is challenged, phonological information helps readers link concepts beyond the level of literal semantics. Overall, our findings provide a tentative mechanism for the empowering effect of sound repetition in literary constructs. |
Benedikt V Ehinger; Peter Konig; José P Ossandón Predictions of visual content across eye movements and their modulation by inferred information Journal Article Journal of Neuroscience, 35 (19), pp. 7403–7413, 2015. @article{Ehinger2015, title = {Predictions of visual content across eye movements and their modulation by inferred information}, author = {Benedikt V Ehinger and Peter Konig and José P Ossandón}, doi = {10.1523/JNEUROSCI.5114-14.2015}, year = {2015}, date = {2015-01-01}, journal = {Journal of Neuroscience}, volume = {35}, number = {19}, pages = {7403--7413}, abstract = {The brain is proposed to operate through probabilistic inference, testing and refining predictions about the world. Here, we search for neural activity compatible with the violation of active predictions, learned from the contingencies between actions and the consequent changes in sensory input. We focused on vision, where eye movements produce stimuli shifts that could, in principle, be predicted. We compared, in humans, error signals to saccade-contingent changes of veridical and inferred inputs by contrasting the electroencephalographic activity after saccades to a stimulus presented inside or outside the blind spot. We observed early (textless250 ms) and late (textgreater250 ms) error signals after stimulus change, indicating the violation of sensory and associative predictions, respectively. Remarkably, the late response was diminished for blind-spot trials. These results indicate that predictive signals occur across multiple levels of the visual hierarchy, based on generative models that differentiate between signals that originate from the outside world and those that are inferred.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The brain is proposed to operate through probabilistic inference, testing and refining predictions about the world. Here, we search for neural activity compatible with the violation of active predictions, learned from the contingencies between actions and the consequent changes in sensory input. We focused on vision, where eye movements produce stimuli shifts that could, in principle, be predicted. We compared, in humans, error signals to saccade-contingent changes of veridical and inferred inputs by contrasting the electroencephalographic activity after saccades to a stimulus presented inside or outside the blind spot. We observed early (textless250 ms) and late (textgreater250 ms) error signals after stimulus change, indicating the violation of sensory and associative predictions, respectively. Remarkably, the late response was diminished for blind-spot trials. These results indicate that predictive signals occur across multiple levels of the visual hierarchy, based on generative models that differentiate between signals that originate from the outside world and those that are inferred. |
Susanne Eisenhauer; Christian J Fiebach; Benjamin Gagl Context-based facilitation in visual word recognition: Evidence for visual and lexical but not pre-lexical contributions Journal Article eNeuro, 6 (2), pp. 1–25, 2019. @article{Eisenhauer2019, title = {Context-based facilitation in visual word recognition: Evidence for visual and lexical but not pre-lexical contributions}, author = {Susanne Eisenhauer and Christian J Fiebach and Benjamin Gagl}, doi = {10.1523/ENEURO.0321-18.2019}, year = {2019}, date = {2019-01-01}, journal = {eNeuro}, volume = {6}, number = {2}, pages = {1--25}, abstract = {Word familiarity and predictive context facilitate visual word processing, leading to faster recognition times and reduced neuronal responses. Previously, models with and without top-down connections, including lexical-semantic, pre-lexical (e.g., orthographic/phonological), and visual processing levels were successful in accounting for these facilitation effects. Here we systematically assessed context-based facilitation with a repetition priming task and explicitly dissociated pre-lexical and lexical processing levels using a pseudoword (PW) familiarization procedure. Experiment 1 investigated the temporal dynamics of neuronal facilitation effects with magnetoencephalography (MEG; N = 38 human participants), while experiment 2 assessed behavioral facilitation effects (N = 24 human participants). Across all stimulus conditions, MEG demonstrated context-based facilitation across multiple time windows starting at 100 ms, in occipital brain areas. This finding indicates context-based facilitation at an early visual processing level. In both experiments, we furthermore found an interaction of context and lexical familiarity, such that stimuli with associated meaning showed the strongest context-dependent facilitation in brain activation and behavior. Using MEG, this facilitation effect could be localized to the left anterior temporal lobe at around 400 ms, indicating within-level (i.e., exclusively lexical-semantic) facilitation but no top-down effects on earlier processing stages. Increased pre-lexical familiarity (in PWs familiarized utilizing training) did not enhance or reduce context effects significantly. We conclude that context-based facilitation is achieved within visual and lexical processing levels. Finally, by testing alternative hypotheses derived from mechanistic accounts of repetition suppression, we suggest that the facilitatory context effects found here are implemented using a predictive coding mechanism.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Word familiarity and predictive context facilitate visual word processing, leading to faster recognition times and reduced neuronal responses. Previously, models with and without top-down connections, including lexical-semantic, pre-lexical (e.g., orthographic/phonological), and visual processing levels were successful in accounting for these facilitation effects. Here we systematically assessed context-based facilitation with a repetition priming task and explicitly dissociated pre-lexical and lexical processing levels using a pseudoword (PW) familiarization procedure. Experiment 1 investigated the temporal dynamics of neuronal facilitation effects with magnetoencephalography (MEG; N = 38 human participants), while experiment 2 assessed behavioral facilitation effects (N = 24 human participants). Across all stimulus conditions, MEG demonstrated context-based facilitation across multiple time windows starting at 100 ms, in occipital brain areas. This finding indicates context-based facilitation at an early visual processing level. In both experiments, we furthermore found an interaction of context and lexical familiarity, such that stimuli with associated meaning showed the strongest context-dependent facilitation in brain activation and behavior. Using MEG, this facilitation effect could be localized to the left anterior temporal lobe at around 400 ms, indicating within-level (i.e., exclusively lexical-semantic) facilitation but no top-down effects on earlier processing stages. Increased pre-lexical familiarity (in PWs familiarized utilizing training) did not enhance or reduce context effects significantly. We conclude that context-based facilitation is achieved within visual and lexical processing levels. Finally, by testing alternative hypotheses derived from mechanistic accounts of repetition suppression, we suggest that the facilitatory context effects found here are implemented using a predictive coding mechanism. |
Eran Eldar; Gyung Jin Bae; Zeb Kurth-Nelson; Peter Dayan; Raymond J Dolan Magnetoencephalography decoding reveals structural differences within integrative decision processes Journal Article Nature Human Behaviour, 2 (9), pp. 670–681, 2018. @article{Eldar2018, title = {Magnetoencephalography decoding reveals structural differences within integrative decision processes}, author = {Eran Eldar and Gyung Jin Bae and Zeb Kurth-Nelson and Peter Dayan and Raymond J Dolan}, doi = {10.1038/s41562-018-0423-3}, year = {2018}, date = {2018-01-01}, journal = {Nature Human Behaviour}, volume = {2}, number = {9}, pages = {670--681}, publisher = {Springer US}, abstract = {When confronted with complex inputs consisting of multiple elements, humans use various strategies to integrate the elements quickly and accurately. For instance, accuracy may or over be improved by processing elements one at a time1–4 extended periods5–8 ; speed can increase if the internal rep- resentation of elements is accelerated9,10 . However, little is known about how humans actually approach these challenges because behavioural findings can be accounted for by mul- tiple alternative process models11 and neuroimaging investi-gations typically rely on haemodynamic signals that change too slowly. Consequently, to uncover the fast neural dynamics that support information integration, we decoded magnetoencephalographic signals that were recorded as human subjects performed a complex decision task. Our findings reveal three sources of individual differences in the temporal structure of the integration process—sequential representation, partial reinstatement and early computation—each having a dissociable effect on how subjects handled problem complexity and temporal constraints. Our findings shed new light on the structure and influence of self-determined neural integration processes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When confronted with complex inputs consisting of multiple elements, humans use various strategies to integrate the elements quickly and accurately. For instance, accuracy may or over be improved by processing elements one at a time1–4 extended periods5–8 ; speed can increase if the internal rep- resentation of elements is accelerated9,10 . However, little is known about how humans actually approach these challenges because behavioural findings can be accounted for by mul- tiple alternative process models11 and neuroimaging investi-gations typically rely on haemodynamic signals that change too slowly. Consequently, to uncover the fast neural dynamics that support information integration, we decoded magnetoencephalographic signals that were recorded as human subjects performed a complex decision task. Our findings reveal three sources of individual differences in the temporal structure of the integration process—sequential representation, partial reinstatement and early computation—each having a dissociable effect on how subjects handled problem complexity and temporal constraints. Our findings shed new light on the structure and influence of self-determined neural integration processes. |
Mats W J van Es; Jan-Mathijs Schoffelen Stimulus-induced gamma power predicts the amplitude of the subsequent visual evoked response Journal Article NeuroImage, 186 , pp. 703–712, 2019. @article{Es2019l, title = {Stimulus-induced gamma power predicts the amplitude of the subsequent visual evoked response}, author = {Mats W J van Es and Jan-Mathijs Schoffelen}, doi = {10.1016/j.neuroimage.2018.11.029}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {186}, pages = {703--712}, publisher = {Elsevier Ltd}, abstract = {The efficiency of neuronal information transfer in activated brain networks may affect behavioral performance. Gamma-band synchronization has been proposed to be a mechanism that facilitates neuronal processing of behaviorally relevant stimuli. In line with this, it has been shown that strong gamma-band activity in visual cortical areas leads to faster responses to a visual go cue. We investigated whether there are directly observable consequences of trial-by-trial fluctuations in non-invasively observed gamma-band activity on the neuronal response. Specifically, we hypothesized that the amplitude of the visual evoked response to a go cue can be predicted by gamma power in the visual system, in the window preceding the evoked response. Thirty-three human subjects (22 female) performed a visual speeded response task while their magnetoencephalogram (MEG) was recorded. The participants had to respond to a pattern reversal of a concentric moving grating. We estimated single trial stimulus-induced visual cortical gamma power, and correlated this with the estimated single trial amplitude of the most prominent event-related field (ERF) peak within the first 100 ms after the pattern reversal. In parieto-occipital cortical areas, the amplitude of the ERF correlated positively with gamma power, and correlated negatively with reaction times. No effects were observed for the alpha and beta frequency bands, despite clear stimulus onset induced modulation at those frequencies. These results support a mechanistic model, in which gamma-band synchronization enhances the neuronal gain to relevant visual input, thus leading to more efficient downstream processing and to faster responses.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The efficiency of neuronal information transfer in activated brain networks may affect behavioral performance. Gamma-band synchronization has been proposed to be a mechanism that facilitates neuronal processing of behaviorally relevant stimuli. In line with this, it has been shown that strong gamma-band activity in visual cortical areas leads to faster responses to a visual go cue. We investigated whether there are directly observable consequences of trial-by-trial fluctuations in non-invasively observed gamma-band activity on the neuronal response. Specifically, we hypothesized that the amplitude of the visual evoked response to a go cue can be predicted by gamma power in the visual system, in the window preceding the evoked response. Thirty-three human subjects (22 female) performed a visual speeded response task while their magnetoencephalogram (MEG) was recorded. The participants had to respond to a pattern reversal of a concentric moving grating. We estimated single trial stimulus-induced visual cortical gamma power, and correlated this with the estimated single trial amplitude of the most prominent event-related field (ERF) peak within the first 100 ms after the pattern reversal. In parieto-occipital cortical areas, the amplitude of the ERF correlated positively with gamma power, and correlated negatively with reaction times. No effects were observed for the alpha and beta frequency bands, despite clear stimulus onset induced modulation at those frequencies. These results support a mechanistic model, in which gamma-band synchronization enhances the neuronal gain to relevant visual input, thus leading to more efficient downstream processing and to faster responses. |
Mats W J van Es; Tom R Marshall; Eelke Spaak; Ole Jensen; Jan-Mathijs Schoffelen Phasic modulation of visual representations during sustained attention Journal Article European Journal of Neuroscience, pp. 1–18, 2021. @article{Es2021, title = {Phasic modulation of visual representations during sustained attention}, author = {Mats W J van Es and Tom R Marshall and Eelke Spaak and Ole Jensen and Jan-Mathijs Schoffelen}, doi = {10.1111/ejn.15084}, year = {2021}, date = {2021-01-01}, journal = {European Journal of Neuroscience}, pages = {1--18}, abstract = {Sustained attention has long been thought to benefit perception in a continuous fashion, but recent evidence suggests that it affects perception in a discrete, rhythmic way. Periodic fluctuations in behavioral performance over time, and modulations of behavioral performance by the phase of spontaneous oscillatory brain activity point to an attentional sampling rate in the theta or alpha frequency range. We investigated whether such discrete sampling by attention is reflected in periodic fluctuations in the decodability of visual stimulus orientation from magnetoencephalographic (MEG) brain signals. In this exploratory study, human subjects attended one of two grating stimuli while MEG was being recorded. We assessed the strength of the visual representation of the attended stimulus using a support vector machine (SVM) to decode the orientation of the grating (clockwise vs. counterclockwise) from the MEG signal. We tested whether decoder performance depended on the theta/alpha phase of local brain activity. While the phase of ongoing activity in visual cortex did not modulate decoding performance, theta/alpha phase of activity in the FEF and parietal cortex, contralateral to the attended stimulus did modulate decoding performance. These findings suggest that phasic modulations of visual stimulus representations in the brain are caused by frequency-specific top-down activity in the fronto-parietal attention network.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Sustained attention has long been thought to benefit perception in a continuous fashion, but recent evidence suggests that it affects perception in a discrete, rhythmic way. Periodic fluctuations in behavioral performance over time, and modulations of behavioral performance by the phase of spontaneous oscillatory brain activity point to an attentional sampling rate in the theta or alpha frequency range. We investigated whether such discrete sampling by attention is reflected in periodic fluctuations in the decodability of visual stimulus orientation from magnetoencephalographic (MEG) brain signals. In this exploratory study, human subjects attended one of two grating stimuli while MEG was being recorded. We assessed the strength of the visual representation of the attended stimulus using a support vector machine (SVM) to decode the orientation of the grating (clockwise vs. counterclockwise) from the MEG signal. We tested whether decoder performance depended on the theta/alpha phase of local brain activity. While the phase of ongoing activity in visual cortex did not modulate decoding performance, theta/alpha phase of activity in the FEF and parietal cortex, contralateral to the attended stimulus did modulate decoding performance. These findings suggest that phasic modulations of visual stimulus representations in the brain are caused by frequency-specific top-down activity in the fronto-parietal attention network. |
Helene M van Ettinger-Veenstra; W Huijbers; Tjerk P Gutteling; M Vink; Leon J Kenemans; Sebastiaan F W Neggers Journal of Neurophysiology, 102 (6), pp. 3469–3480, 2009. @article{EttingerVeenstra2009, title = {fMRI-guided TMS on cortical eye fields: The frontal but not intraparietal eye fields regulate the coupling between visuospatial attention and eye movements}, author = {Helene M van Ettinger-Veenstra and W Huijbers and Tjerk P Gutteling and M Vink and Leon J Kenemans and Sebastiaan F W Neggers}, doi = {10.1111/jopr.12208}, year = {2009}, date = {2009-01-01}, journal = {Journal of Neurophysiology}, volume = {102}, number = {6}, pages = {3469--3480}, abstract = {It is well known that parts of a visual scene are prioritized for visual processing, depending on the current situation. How the CNS moves this focus of attention across the visual image is largely unknown, although there is substantial evidence that preparation of an action is a key factor. Our results support the view that direct corticocortical feedback connections from frontal oculomotor areas to the visual cortex are responsible for the coupling between eye movements and shifts of visuospatial attention. Functional magnetic resonance imaging (fMRI)-guided transcranial magnetic stimulation (TMS) was applied to the frontal eye fields (FEFs) and intraparietal sulcus (IPS). A single pulse was delivered 60, 30, or 0 ms before a discrimination target was presented at, or next to, the target of a saccade in preparation. Results showed that the known enhancement of discrimination performance specific to locations to which eye movements are being prepared was enhanced by early TMS on the FEF contralateral to eye movement direction, whereas TMS on the IPS resulted in a general performance increase. The current findings indicate that the FEF affects selective visual processing within the visual cortex itself through direct feedback projections.}, keywords = {}, pubstate = {published}, tppubtype = {article} } It is well known that parts of a visual scene are prioritized for visual processing, depending on the current situation. How the CNS moves this focus of attention across the visual image is largely unknown, although there is substantial evidence that preparation of an action is a key factor. Our results support the view that direct corticocortical feedback connections from frontal oculomotor areas to the visual cortex are responsible for the coupling between eye movements and shifts of visuospatial attention. Functional magnetic resonance imaging (fMRI)-guided transcranial magnetic stimulation (TMS) was applied to the frontal eye fields (FEFs) and intraparietal sulcus (IPS). A single pulse was delivered 60, 30, or 0 ms before a discrimination target was presented at, or next to, the target of a saccade in preparation. Results showed that the known enhancement of discrimination performance specific to locations to which eye movements are being prepared was enhanced by early TMS on the FEF contralateral to eye movement direction, whereas TMS on the IPS resulted in a general performance increase. The current findings indicate that the FEF affects selective visual processing within the visual cortex itself through direct feedback projections. |
Tomer Fekete; Felix D C C Beacher; Jiook Cha; Denis Rubin; Lilianne R Mujica-Parodi Small-world network properties in prefrontal cortex correlate with predictors of psychopathology risk in young children: A NIRS study Journal Article NeuroImage, 85 , pp. 345–353, 2014. @article{Fekete2014, title = {Small-world network properties in prefrontal cortex correlate with predictors of psychopathology risk in young children: A NIRS study}, author = {Tomer Fekete and Felix D C C Beacher and Jiook Cha and Denis Rubin and Lilianne R Mujica-Parodi}, doi = {10.1016/j.neuroimage.2013.07.022}, year = {2014}, date = {2014-01-01}, journal = {NeuroImage}, volume = {85}, pages = {345--353}, publisher = {4}, address = {// Age}, abstract = {Near infrared spectroscopy (NIRS) is an emerging imaging technique that is relatively inexpensive, portable, and particularly well suited for collecting data in ecological settings. Therefore, it holds promise as a potential neurodiagnostic for young children. We set out to explore whether NIRS could be utilized in assessing the risk of developmental psychopathology in young children. A growing body of work indicates that temperament at young age is associated with vulnerability to psychopathology later on in life. In particular, it has been shown that low effortful control (EC), which includes the focusing and shifting of attention, inhibitory control, perceptual sensitivity, and a low threshold for pleasure, is linked to conditions such as anxiety, depression and attention deficit hyperactivity disorder (ADHD). Physiologically, EC has been linked to a control network spanning among other sites the prefrontal cortex. Several psychopathologies, such as depression and ADHD, have been shown to result in compromised small-world network properties. Therefore we set out to explore the relationship between EC and the small-world properties of PFC using NIRS. NIRS data were collected from 44 toddlers, ages 3-5, while watching naturalistic stimuli (movie clips). Derived complex network measures were then correlated to EC as derived from the Children's Behavior Questionnaire (CBQ). We found that reduced levels of EC were associated with compromised small-world properties of the prefrontal network. Our results suggest that the longitudinal NIRS studies of complex network properties in young children hold promise in furthering our understanding of developmental psychopathology.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Near infrared spectroscopy (NIRS) is an emerging imaging technique that is relatively inexpensive, portable, and particularly well suited for collecting data in ecological settings. Therefore, it holds promise as a potential neurodiagnostic for young children. We set out to explore whether NIRS could be utilized in assessing the risk of developmental psychopathology in young children. A growing body of work indicates that temperament at young age is associated with vulnerability to psychopathology later on in life. In particular, it has been shown that low effortful control (EC), which includes the focusing and shifting of attention, inhibitory control, perceptual sensitivity, and a low threshold for pleasure, is linked to conditions such as anxiety, depression and attention deficit hyperactivity disorder (ADHD). Physiologically, EC has been linked to a control network spanning among other sites the prefrontal cortex. Several psychopathologies, such as depression and ADHD, have been shown to result in compromised small-world network properties. Therefore we set out to explore the relationship between EC and the small-world properties of PFC using NIRS. NIRS data were collected from 44 toddlers, ages 3-5, while watching naturalistic stimuli (movie clips). Derived complex network measures were then correlated to EC as derived from the Children's Behavior Questionnaire (CBQ). We found that reduced levels of EC were associated with compromised small-world properties of the prefrontal network. Our results suggest that the longitudinal NIRS studies of complex network properties in young children hold promise in furthering our understanding of developmental psychopathology. |
Tobias Feldmann-Wüstefeld Neural measures of working memory in a bilateral change detection task Journal Article Psychophysiology, 58 , pp. 1–22, 2020. @article{FeldmannWuestefeld2020, title = {Neural measures of working memory in a bilateral change detection task}, author = {Tobias Feldmann-Wüstefeld}, doi = {10.1111/psyp.13683}, year = {2020}, date = {2020-01-01}, journal = {Psychophysiology}, volume = {58}, pages = {1--22}, abstract = {The change detection task is a widely used paradigm to examine visual working memory processes. Participants memorize a set of items and then, try to detect changes in the set after a retention period. The negative slow wave (NSW) and contralateral delay activity (CDA) are event-related potentials in the EEG signal that are commonly used in change detection tasks to track working memory load, as both increase with the number of items maintained in working memory (set size). While the CDA was argued to more purely reflect the memory-specific neural activity than the NSW, it also requires a lateralized design and attention shifts prior to memoranda onset, imposing more restrictions on the task than the NSW. The present study proposes a novel change detection task in which both CDA and NSW can be measured at the same time. Memory items were presented bilaterally, but their distribution in the left and right hemifield varied, inducing a target imbalance or “net load.” NSW increased with set size, whereas CDA increased with net load. In addition, a multivariate linear classifier was able to decode the set size and net load from the EEG signal. CDA, NSW, and decoding accuracy predicted an individual's working memory capacity. In line with the notion of a bilateral advantage in working memory, accuracy, and CDA data suggest that participants tended to encode items relatively balanced. In sum, this novel change detection task offers a basis to make use of converging neural measures of working memory in a comprehensive paradigm.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The change detection task is a widely used paradigm to examine visual working memory processes. Participants memorize a set of items and then, try to detect changes in the set after a retention period. The negative slow wave (NSW) and contralateral delay activity (CDA) are event-related potentials in the EEG signal that are commonly used in change detection tasks to track working memory load, as both increase with the number of items maintained in working memory (set size). While the CDA was argued to more purely reflect the memory-specific neural activity than the NSW, it also requires a lateralized design and attention shifts prior to memoranda onset, imposing more restrictions on the task than the NSW. The present study proposes a novel change detection task in which both CDA and NSW can be measured at the same time. Memory items were presented bilaterally, but their distribution in the left and right hemifield varied, inducing a target imbalance or “net load.” NSW increased with set size, whereas CDA increased with net load. In addition, a multivariate linear classifier was able to decode the set size and net load from the EEG signal. CDA, NSW, and decoding accuracy predicted an individual's working memory capacity. In line with the notion of a bilateral advantage in working memory, accuracy, and CDA data suggest that participants tended to encode items relatively balanced. In sum, this novel change detection task offers a basis to make use of converging neural measures of working memory in a comprehensive paradigm. |
Ian C Fiebelkorn; Adam C Snyder; Manuel R Mercier; John S Butler; S Molholm; John J Foxe Cortical cross-frequency coupling predicts perceptual outcomes Journal Article NeuroImage, 69 , pp. 126–137, 2013. @article{Fiebelkorn2013, title = {Cortical cross-frequency coupling predicts perceptual outcomes}, author = {Ian C Fiebelkorn and Adam C Snyder and Manuel R Mercier and John S Butler and S Molholm and John J Foxe}, doi = {10.1016/j.neuroimage.2012.11.021}, year = {2013}, date = {2013-01-01}, journal = {NeuroImage}, volume = {69}, pages = {126--137}, publisher = {Elsevier Inc.}, abstract = {Functional networks are comprised of neuronal ensembles bound through synchronization across multiple intrinsic oscillatory frequencies. Various coupled interactions between brain oscillators have been described (e.g., phase-amplitude coupling), but with little evidence that these interactions actually influence perceptual sensitivity. Here, electroencephalographic (EEG) recordings were made during a sustained-attention task to demonstrate that cross-frequency coupling has significant consequences for perceptual outcomes (i.e., whether participants detect a near-threshold visual target). The data reveal that phase-detection relationships at higher frequencies are dependent on the phase of lower frequencies, such that higher frequencies alternate between periods when their phase is either strongly or weakly predictive of visual-target detection. Moreover, the specific higher frequencies and scalp topographies linked to visual-target detection also alternate as a function of lower-frequency phase. Cross-frequency coupling between lower (i.e., delta and theta) and higher frequencies (e.g., low- and high-beta) thus results in dramatic fluctuations of visual-target detection.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Functional networks are comprised of neuronal ensembles bound through synchronization across multiple intrinsic oscillatory frequencies. Various coupled interactions between brain oscillators have been described (e.g., phase-amplitude coupling), but with little evidence that these interactions actually influence perceptual sensitivity. Here, electroencephalographic (EEG) recordings were made during a sustained-attention task to demonstrate that cross-frequency coupling has significant consequences for perceptual outcomes (i.e., whether participants detect a near-threshold visual target). The data reveal that phase-detection relationships at higher frequencies are dependent on the phase of lower frequencies, such that higher frequencies alternate between periods when their phase is either strongly or weakly predictive of visual-target detection. Moreover, the specific higher frequencies and scalp topographies linked to visual-target detection also alternate as a function of lower-frequency phase. Cross-frequency coupling between lower (i.e., delta and theta) and higher frequencies (e.g., low- and high-beta) thus results in dramatic fluctuations of visual-target detection. |
Ruth Filik; Hartmut Leuthold; Katie Wallington; Jemma Page Testing theories of irony processing using eye-tracking and ERPs Journal Article Journal of Experimental Psychology: Learning, Memory, and Cognition, 40 (3), pp. 811–828, 2014. @article{Filik2014, title = {Testing theories of irony processing using eye-tracking and ERPs}, author = {Ruth Filik and Hartmut Leuthold and Katie Wallington and Jemma Page}, doi = {10.1037/a0035658}, year = {2014}, date = {2014-01-01}, journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition}, volume = {40}, number = {3}, pages = {811--828}, abstract = {Not much is known about how people comprehend ironic utterances, and to date, most studies have simply compared processing of ironic versus non-ironic statements. A key aspect of the graded salience hypothesis, distinguishing it from other accounts (such as the standard pragmatic view and direct access view), is that it predicts differences between processing of familiar and unfamiliar ironies. Specifically, if an ironic utterance is familiar, then the ironic interpretation should be available without the need for extra inferential processes, whereas for unfamiliar ironies, the literal interpretation would be computed first, and a mismatch with context would lead to a re-interpretation of the statement as being ironic. We recorded participants' eye movements while they were reading (Experiment 1), and electrical brain activity while they were listening to (Experiment 2), familiar and unfamiliar ironies compared to non-ironic controls. Results show disruption to eye movements and an N400-like effect for unfamiliar ironies only, supporting the predictions of the graded salience hypothesis. In addition, in Experiment 2, a late positivity was found for both familiar and unfamiliar ironic materials, compared to non-ironic controls. We interpret this positivity as reflecting ongoing conflict between the literal and ironic interpretations of the utterance.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Not much is known about how people comprehend ironic utterances, and to date, most studies have simply compared processing of ironic versus non-ironic statements. A key aspect of the graded salience hypothesis, distinguishing it from other accounts (such as the standard pragmatic view and direct access view), is that it predicts differences between processing of familiar and unfamiliar ironies. Specifically, if an ironic utterance is familiar, then the ironic interpretation should be available without the need for extra inferential processes, whereas for unfamiliar ironies, the literal interpretation would be computed first, and a mismatch with context would lead to a re-interpretation of the statement as being ironic. We recorded participants' eye movements while they were reading (Experiment 1), and electrical brain activity while they were listening to (Experiment 2), familiar and unfamiliar ironies compared to non-ironic controls. Results show disruption to eye movements and an N400-like effect for unfamiliar ironies only, supporting the predictions of the graded salience hypothesis. In addition, in Experiment 2, a late positivity was found for both familiar and unfamiliar ironic materials, compared to non-ironic controls. We interpret this positivity as reflecting ongoing conflict between the literal and ironic interpretations of the utterance. |
Thomas Fischer; Sven-Thomas Graupner; Boris M Velichkovsky; Sebastian Pannasch Attentional dynamics during free picture viewing: Evidence from oculomotor behavior and electrocortical activity Journal Article Frontiers in Systems Neuroscience, 7 , pp. 1–9, 2013. @article{Fischer2013, title = {Attentional dynamics during free picture viewing: Evidence from oculomotor behavior and electrocortical activity}, author = {Thomas Fischer and Sven-Thomas Graupner and Boris M Velichkovsky and Sebastian Pannasch}, doi = {10.3389/fnsys.2013.00017}, year = {2013}, date = {2013-01-01}, journal = {Frontiers in Systems Neuroscience}, volume = {7}, pages = {1--9}, abstract = {Most empirical evidence on attentional control is based on brief presentations of rather abstract stimuli. Results revealed indications for a dynamic interplay between bottom-up and top-down attentional mechanisms. Here we used a more naturalistic task to examine temporal signatures of attentional mechanisms on fine and coarse time scales. Subjects had to inspect digitized copies of 60 paintings, each shown for 40 s. We simultaneously measured oculomotor behavior and electrophysiological correlates of brain activity to compare early and late intervals (1) of inspection time of each picture (picture viewing) and (2) of the full experiment (time on task). For picture viewing, we found an increase in fixation duration and a decrease of saccadic amplitude while these parameters did not change with time on task. Furthermore, early in picture viewing we observed higher spatial and temporal similarity of gaze behavior. Analyzing electrical brain activity revealed changes in three components (C1, N1 and P2) of the eye fixation-related potential (EFRP); during picture viewing; no variation was obtained for the power in the frontal beta- and in the theta activity. Time on task analyses demonstrated no effects on the EFRP amplitudes but an increase of power in the frontal theta and beta band activity. Thus, behavioral and electrophysiological measures similarly show characteristic changes during picture viewing, indicating a shifting balance of its underlying (bottom-up and top-down) attentional mechanisms. Time on task also modulated top-down attention but probably represents a different attentional mechanism.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most empirical evidence on attentional control is based on brief presentations of rather abstract stimuli. Results revealed indications for a dynamic interplay between bottom-up and top-down attentional mechanisms. Here we used a more naturalistic task to examine temporal signatures of attentional mechanisms on fine and coarse time scales. Subjects had to inspect digitized copies of 60 paintings, each shown for 40 s. We simultaneously measured oculomotor behavior and electrophysiological correlates of brain activity to compare early and late intervals (1) of inspection time of each picture (picture viewing) and (2) of the full experiment (time on task). For picture viewing, we found an increase in fixation duration and a decrease of saccadic amplitude while these parameters did not change with time on task. Furthermore, early in picture viewing we observed higher spatial and temporal similarity of gaze behavior. Analyzing electrical brain activity revealed changes in three components (C1, N1 and P2) of the eye fixation-related potential (EFRP); during picture viewing; no variation was obtained for the power in the frontal beta- and in the theta activity. Time on task analyses demonstrated no effects on the EFRP amplitudes but an increase of power in the frontal theta and beta band activity. Thus, behavioral and electrophysiological measures similarly show characteristic changes during picture viewing, indicating a shifting balance of its underlying (bottom-up and top-down) attentional mechanisms. Time on task also modulated top-down attention but probably represents a different attentional mechanism. |
Matthew W Flounders; Carlos González-García; Richard Hardstone; Biyu J He Neural dynamics of visual ambiguity resolution by perceptual prior Journal Article eLife, 8 , pp. 1–25, 2019. @article{Flounders2019, title = {Neural dynamics of visual ambiguity resolution by perceptual prior}, author = {Matthew W Flounders and Carlos González-García and Richard Hardstone and Biyu J He}, doi = {10.7554/eLife.41861}, year = {2019}, date = {2019-01-01}, journal = {eLife}, volume = {8}, pages = {1--25}, abstract = {Past experiences have enormous power in shaping our daily perception. Currently, dynamical neural mechanisms underlying this process remain mysterious. Exploiting a dramatic visual phenomenon, where a single experience of viewing a clear image allows instant recognition of a related degraded image, we investigated this question using MEG and 7 Tesla fMRI in humans. We observed that following the acquisition of perceptual priors, different degraded images are represented much more distinctly in neural dynamics starting from ~500 ms after stimulus onset. Content-specific neural activity related to stimulus-feature processing dominated within 300 ms after stimulus onset, while content-specific neural activity related to recognition processing dominated from 500 ms onward. Model-driven MEG-fMRI data fusion revealed the spatiotemporal evolution of neural activities involved in stimulus, attentional, and recognition processing. Together, these findings shed light on how experience shapes perceptual processing across space and time in the brain.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Past experiences have enormous power in shaping our daily perception. Currently, dynamical neural mechanisms underlying this process remain mysterious. Exploiting a dramatic visual phenomenon, where a single experience of viewing a clear image allows instant recognition of a related degraded image, we investigated this question using MEG and 7 Tesla fMRI in humans. We observed that following the acquisition of perceptual priors, different degraded images are represented much more distinctly in neural dynamics starting from ~500 ms after stimulus onset. Content-specific neural activity related to stimulus-feature processing dominated within 300 ms after stimulus onset, while content-specific neural activity related to recognition processing dominated from 500 ms onward. Model-driven MEG-fMRI data fusion revealed the spatiotemporal evolution of neural activities involved in stimulus, attentional, and recognition processing. Together, these findings shed light on how experience shapes perceptual processing across space and time in the brain. |
Joshua J Foster; Emma M Bsales; Russell J Jaffe; Edward Awh Alpha-band activity reveals spontaneous representations of spatial position in visual working memory Journal Article Current Biology, 27 (20), pp. 3216–3223, 2017. @article{Foster2017, title = {Alpha-band activity reveals spontaneous representations of spatial position in visual working memory}, author = {Joshua J Foster and Emma M Bsales and Russell J Jaffe and Edward Awh}, doi = {10.1016/j.cub.2017.09.031}, year = {2017}, date = {2017-01-01}, journal = {Current Biology}, volume = {27}, number = {20}, pages = {3216--3223}, publisher = {Elsevier Ltd.}, abstract = {An emerging view suggests that spatial position is an integral component of working memory (WM), such that non-spatial features are bound to locations regardless of whether space is relevant [1, 2]. For instance, past work has shown that stimulus position is spontaneously remembered when non-spatial fea- tures are stored. Item recognition is enhanced when memoranda appear at the same location where they were encoded [3–5], and accessing non-spatial infor- mation elicits shifts of spatial attention to the original position of the stimulus [6, 7]. However, these find- ings do not establish that a persistent, active repre- sentation of stimulus position is maintained in WM because similar effects have also been documented following storage in long-termmemory [8, 9]. Here we show that the spatial position of the memorandum is actively coded by persistent neural activity during a non-spatial WM task. We used a spatial encoding model in conjunction with electroencephalogram (EEG) measurements of oscillatory alpha-band (8– 12 Hz) activity to track active representations of spatial position. The position of the stimulus varied trial to trial but was wholly irrelevant to the tasks. We nevertheless observed active neural representa- tions of the original stimulus position that persisted throughout the retention interval. Further experi- ments established that these spatial representations are dependent on the volitional storage of non- spatial features rather than being a lingering effect of sensory energy or initial encoding demands. These findings provide strong evidence that online spatial representations are spontaneously main- tained in WM—regardless of task relevance—during the storage of non-spatial features.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An emerging view suggests that spatial position is an integral component of working memory (WM), such that non-spatial features are bound to locations regardless of whether space is relevant [1, 2]. For instance, past work has shown that stimulus position is spontaneously remembered when non-spatial fea- tures are stored. Item recognition is enhanced when memoranda appear at the same location where they were encoded [3–5], and accessing non-spatial infor- mation elicits shifts of spatial attention to the original position of the stimulus [6, 7]. However, these find- ings do not establish that a persistent, active repre- sentation of stimulus position is maintained in WM because similar effects have also been documented following storage in long-termmemory [8, 9]. Here we show that the spatial position of the memorandum is actively coded by persistent neural activity during a non-spatial WM task. We used a spatial encoding model in conjunction with electroencephalogram (EEG) measurements of oscillatory alpha-band (8– 12 Hz) activity to track active representations of spatial position. The position of the stimulus varied trial to trial but was wholly irrelevant to the tasks. We nevertheless observed active neural representa- tions of the original stimulus position that persisted throughout the retention interval. Further experi- ments established that these spatial representations are dependent on the volitional storage of non- spatial features rather than being a lingering effect of sensory energy or initial encoding demands. These findings provide strong evidence that online spatial representations are spontaneously main- tained in WM—regardless of task relevance—during the storage of non-spatial features. |
Steven L Franconeri; Jason M Scimeca; Jessica C Roth; Sarah A Helseth; Lauren E Kahn Flexible visual processing of spatial relationships Journal Article Cognition, 122 (2), pp. 210–227, 2012. @article{Franconeri2012, title = {Flexible visual processing of spatial relationships}, author = {Steven L Franconeri and Jason M Scimeca and Jessica C Roth and Sarah A Helseth and Lauren E Kahn}, doi = {10.1016/j.cognition.2011.11.002}, year = {2012}, date = {2012-01-01}, journal = {Cognition}, volume = {122}, number = {2}, pages = {210--227}, publisher = {Elsevier B.V.}, abstract = {Visual processing breaks the world into parts and objects, allowing us not only to examine the pieces individually, but also to perceive the relationships among them. There is work exploring how we perceive spatial relationships within structures with existing representations, such as faces, common objects, or prototypical scenes. But strikingly, there is little work on the perceptual mechanisms that allow us to flexibly represent arbitrary spatial relationships, e.g., between objects in a novel room, or the elements within a map, graph or diagram. We describe two classes of mechanism that might allow such judgments. In the simultaneous class, both objects are selected concurrently. In contrast, we propose a sequential class, where objects are selected individually over time. We argue that this latter mechanism is more plausible even though it violates our intuitions. We demonstrate that shifts of selection do occur during spatial relationship judgments that feel simultaneous, by tracking selection with an electrophysiological correlate. We speculate that static structure across space may be encoded as a dynamic sequence across time. Flexible visual spatial relationship processing may serve as a case study of more general visual relation processing beyond space, to other dimensions such as size or numerosity.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual processing breaks the world into parts and objects, allowing us not only to examine the pieces individually, but also to perceive the relationships among them. There is work exploring how we perceive spatial relationships within structures with existing representations, such as faces, common objects, or prototypical scenes. But strikingly, there is little work on the perceptual mechanisms that allow us to flexibly represent arbitrary spatial relationships, e.g., between objects in a novel room, or the elements within a map, graph or diagram. We describe two classes of mechanism that might allow such judgments. In the simultaneous class, both objects are selected concurrently. In contrast, we propose a sequential class, where objects are selected individually over time. We argue that this latter mechanism is more plausible even though it violates our intuitions. We demonstrate that shifts of selection do occur during spatial relationship judgments that feel simultaneous, by tracking selection with an electrophysiological correlate. We speculate that static structure across space may be encoded as a dynamic sequence across time. Flexible visual spatial relationship processing may serve as a case study of more general visual relation processing beyond space, to other dimensions such as size or numerosity. |
Hans Peter Frey; Shane P Kelly; Edmund C Lalor; John J Foxe Early spatial attentional modulation of inputs to the fovea Journal Article Journal of Neuroscience, 30 (13), pp. 4547–4551, 2010. @article{Frey2010, title = {Early spatial attentional modulation of inputs to the fovea}, author = {Hans Peter Frey and Shane P Kelly and Edmund C Lalor and John J Foxe}, doi = {10.1523/JNEUROSCI.5217-09.2010}, year = {2010}, date = {2010-01-01}, journal = {Journal of Neuroscience}, volume = {30}, number = {13}, pages = {4547--4551}, abstract = {Attending to a specific spatial location modulates responsivity of neurons with receptive fields processing that part of the environment. A major outstanding question is whether attentional modulation operates differently for the foveal (central) representation of the visual field than it does for the periphery. Indeed, recent animal electrophysiological recordings suggest that attention differentially affects spatial integration for central and peripheral receptive fields in primary visual cortex. In human electroencephalographic recordings, spatial attention to peripheral locations robustly modulates activity in early visual regions, but it has been claimed that this mechanism does not operate in foveal vision. Here, however, we show clear early attentional modulation of foveal stimulation with the same timing and cortical sources as seen for peripheral stimuli, demonstrating that attentional gain control operates similarly across the entire field of view. These results imply that covertly attending away from the center of gaze, which is a common paradigm in behavioral and electrophysiological studies of attention, results in a precisely timed push–pull mechanism. While the amplitude of the initial response to stimulation at attended peripheral locations is significantly increased beginning at 80 ms, the amplitude of the response to foveal stimulation begins to be attenuated.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attending to a specific spatial location modulates responsivity of neurons with receptive fields processing that part of the environment. A major outstanding question is whether attentional modulation operates differently for the foveal (central) representation of the visual field than it does for the periphery. Indeed, recent animal electrophysiological recordings suggest that attention differentially affects spatial integration for central and peripheral receptive fields in primary visual cortex. In human electroencephalographic recordings, spatial attention to peripheral locations robustly modulates activity in early visual regions, but it has been claimed that this mechanism does not operate in foveal vision. Here, however, we show clear early attentional modulation of foveal stimulation with the same timing and cortical sources as seen for peripheral stimuli, demonstrating that attentional gain control operates similarly across the entire field of view. These results imply that covertly attending away from the center of gaze, which is a common paradigm in behavioral and electrophysiological studies of attention, results in a precisely timed push–pull mechanism. While the amplitude of the initial response to stimulation at attended peripheral locations is significantly increased beginning at 80 ms, the amplitude of the response to foveal stimulation begins to be attenuated. |
Aline Frey; Gelu Ionescu; Benoît Lemaire; Francisco López-Orozco; Thierry Baccino; Anne Guérin-Dugué Decision-making in information seeking on texts: an eye-fixation-related potentials investigation Journal Article Frontiers in Systems Neuroscience, 7 , pp. 1–22, 2013. @article{Frey2013, title = {Decision-making in information seeking on texts: an eye-fixation-related potentials investigation}, author = {Aline Frey and Gelu Ionescu and Beno{î}t Lemaire and Francisco López-Orozco and Thierry Baccino and Anne Guérin-Dugué}, doi = {10.3389/fnsys.2013.00039}, year = {2013}, date = {2013-01-01}, journal = {Frontiers in Systems Neuroscience}, volume = {7}, pages = {1--22}, abstract = {Reading on a web page is known to be not linear and people need to make fast decisions about whether they have to stop or not reading. In such context, reading, and decision-making processes are intertwined and this experiment attempts to separate them through electrophysiological patterns provided by the Eye-Fixation-Related Potentials technique (EFRPs). We conducted an experiment in which EFRPs were recorded while participants read blocks of text that were semantically highly related, moderately related, and unrelated to a given goal. Participants had to decide as fast as possible whether the text was related or not to the semantic goal given at a prior stage. Decision making (stopping information search) may occur when the paragraph is highly related to the goal (positive decision) or when it is unrelated to the goal (negative decision). EFRPs were analyzed on and around typical eye fixations: either on words belonging to the goal (target), subjected to a high rate of positive decisions, or on low frequency unrelated words (incongruent), subjected to a high rate of negative decisions. In both cases, we found EFRPs specific patterns (amplitude peaking between 51 to 120 ms after fixation onset) spreading out on the next words following the goal word and the second fixation after an incongruent word, in parietal and occipital areas. We interpreted these results as delayed late components (P3b and N400), reflecting the decision to stop information searching. Indeed, we show a clear spill-over effect showing that the effect on word N spread out on word N + 1 and N + 2.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Reading on a web page is known to be not linear and people need to make fast decisions about whether they have to stop or not reading. In such context, reading, and decision-making processes are intertwined and this experiment attempts to separate them through electrophysiological patterns provided by the Eye-Fixation-Related Potentials technique (EFRPs). We conducted an experiment in which EFRPs were recorded while participants read blocks of text that were semantically highly related, moderately related, and unrelated to a given goal. Participants had to decide as fast as possible whether the text was related or not to the semantic goal given at a prior stage. Decision making (stopping information search) may occur when the paragraph is highly related to the goal (positive decision) or when it is unrelated to the goal (negative decision). EFRPs were analyzed on and around typical eye fixations: either on words belonging to the goal (target), subjected to a high rate of positive decisions, or on low frequency unrelated words (incongruent), subjected to a high rate of negative decisions. In both cases, we found EFRPs specific patterns (amplitude peaking between 51 to 120 ms after fixation onset) spreading out on the next words following the goal word and the second fixation after an incongruent word, in parietal and occipital areas. We interpreted these results as delayed late components (P3b and N400), reflecting the decision to stop information searching. Indeed, we show a clear spill-over effect showing that the effect on word N spread out on word N + 1 and N + 2. |
Hans Peter Frey; Sophie Molholm; Edmund C Lalor; Natalie N Russo; John J Foxe Atypical cortical representation of peripheral visual space in children with an autism spectrum disorder Journal Article European Journal of Neuroscience, 38 (1), pp. 2125–2138, 2013. @article{Frey2013a, title = {Atypical cortical representation of peripheral visual space in children with an autism spectrum disorder}, author = {Hans Peter Frey and Sophie Molholm and Edmund C Lalor and Natalie N Russo and John J Foxe}, doi = {10.1111/ejn.12243}, year = {2013}, date = {2013-01-01}, journal = {European Journal of Neuroscience}, volume = {38}, number = {1}, pages = {2125--2138}, abstract = {A key feature of early visual cortical regions is that they contain discretely organized retinotopic maps. Titration of these maps must occur through experience, and the fidelity of their spatial tuning will depend on the consistency and accuracy of the eye movement system. Anomalies in fixation patterns and the ballistics of eye movements are well documented in autism spectrum disorder (ASD), with off-center fixations a hallmark of the phenotype. We hypothesized that these atypicalities might affect the development of visuo-spatial maps and specifically that peripheral inputs might receive altered processing in ASD. Using high-density recordings of visual evoked potentials (VEPs) and a novel system-identification approach known as VESPA (visual evoked spread spectrum analysis), we assessed sensory responses to centrally and peripherally presented stimuli. Additionally, input luminance was varied to bias responsiveness to the magnocellular system, given previous suggestions of magnocellular-specific deficits in ASD. Participants were 22 ASD children (7-17 years of age) and 31 age- and performance-IQ-matched neurotypical controls. Both VEP and VESPA responses to central presentations were indistinguishable between groups. In contrast, peripheral presentations resulted in significantly greater early VEP and VESPA amplitudes in the ASD cohort. We found no evidence that anomalous enhancement was restricted to magnocellular-biased responses. The extent of peripheral response enhancement was related to the severity of stereotyped behaviors and restricted interests, cardinal symptoms of ASD. The current results point to differential visuo-spatial cortical mapping in ASD, shedding light on the consequences of peculiarities in gaze and stereotyped visual behaviors often reported by clinicians working with this population.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A key feature of early visual cortical regions is that they contain discretely organized retinotopic maps. Titration of these maps must occur through experience, and the fidelity of their spatial tuning will depend on the consistency and accuracy of the eye movement system. Anomalies in fixation patterns and the ballistics of eye movements are well documented in autism spectrum disorder (ASD), with off-center fixations a hallmark of the phenotype. We hypothesized that these atypicalities might affect the development of visuo-spatial maps and specifically that peripheral inputs might receive altered processing in ASD. Using high-density recordings of visual evoked potentials (VEPs) and a novel system-identification approach known as VESPA (visual evoked spread spectrum analysis), we assessed sensory responses to centrally and peripherally presented stimuli. Additionally, input luminance was varied to bias responsiveness to the magnocellular system, given previous suggestions of magnocellular-specific deficits in ASD. Participants were 22 ASD children (7-17 years of age) and 31 age- and performance-IQ-matched neurotypical controls. Both VEP and VESPA responses to central presentations were indistinguishable between groups. In contrast, peripheral presentations resulted in significantly greater early VEP and VESPA amplitudes in the ASD cohort. We found no evidence that anomalous enhancement was restricted to magnocellular-biased responses. The extent of peripheral response enhancement was related to the severity of stereotyped behaviors and restricted interests, cardinal symptoms of ASD. The current results point to differential visuo-spatial cortical mapping in ASD, shedding light on the consequences of peculiarities in gaze and stereotyped visual behaviors often reported by clinicians working with this population. |
Hans Peter Frey; Anita M Schmid; Jeremy W Murphy; Sophie Molholm; Edmund C Lalor; John J Foxe Modulation of early cortical processing during divided attention to non-contiguous locations Journal Article European Journal of Neuroscience, 39 (9), pp. 1499–1507, 2014. @article{Frey2014, title = {Modulation of early cortical processing during divided attention to non-contiguous locations}, author = {Hans Peter Frey and Anita M Schmid and Jeremy W Murphy and Sophie Molholm and Edmund C Lalor and John J Foxe}, doi = {10.1111/ejn.12523}, year = {2014}, date = {2014-01-01}, journal = {European Journal of Neuroscience}, volume = {39}, number = {9}, pages = {1499--1507}, abstract = {We often face the challenge of simultaneously attending to multiple non-contiguous regions of space. There is ongoing debate as to how spatial attention is divided under these situations. Whereas, for several years, the predominant view was that humans could divide the attentional spotlight, several recent studies argue in favor of a unitary spotlight that rhythmically samples relevant locations. Here, this issue was addressed by the use of high-density electrophysiology in concert with the multifocal m-sequence technique to examine visual evoked responses to multiple simultaneous streams of stimulation. Concurrently, we assayed the topographic distribution of alpha-band oscillatory mechanisms, a measure of attentional suppression. Participants performed a difficult detection task that required simultaneous attention to two stimuli in contiguous (undivided) or non-contiguous parts of space. In the undivided condition, the classic pattern of attentional modulation was observed, with increased amplitude of the early visual evoked response and increased alpha amplitude ipsilateral to the attended hemifield. For the divided condition, early visual responses to attended stimuli were also enhanced, and the observed multifocal topographic distribution of alpha suppression was in line with the divided attention hypothesis. These results support the existence of divided attentional spotlights, providing evidence that the corresponding modulation occurs during initial sensory processing time-frames in hierarchically early visual regions, and that suppressive mechanisms of visual attention selectively target distracter locations during divided spatial attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We often face the challenge of simultaneously attending to multiple non-contiguous regions of space. There is ongoing debate as to how spatial attention is divided under these situations. Whereas, for several years, the predominant view was that humans could divide the attentional spotlight, several recent studies argue in favor of a unitary spotlight that rhythmically samples relevant locations. Here, this issue was addressed by the use of high-density electrophysiology in concert with the multifocal m-sequence technique to examine visual evoked responses to multiple simultaneous streams of stimulation. Concurrently, we assayed the topographic distribution of alpha-band oscillatory mechanisms, a measure of attentional suppression. Participants performed a difficult detection task that required simultaneous attention to two stimuli in contiguous (undivided) or non-contiguous parts of space. In the undivided condition, the classic pattern of attentional modulation was observed, with increased amplitude of the early visual evoked response and increased alpha amplitude ipsilateral to the attended hemifield. For the divided condition, early visual responses to attended stimuli were also enhanced, and the observed multifocal topographic distribution of alpha suppression was in line with the divided attention hypothesis. These results support the existence of divided attentional spotlights, providing evidence that the corresponding modulation occurs during initial sensory processing time-frames in hierarchically early visual regions, and that suppressive mechanisms of visual attention selectively target distracter locations during divided spatial attention. |
Galit Fuhrmann Alpert; Ran Manor; Assaf B Spanier; Leon Y Deouell; Amir B Geva Spatiotemporal representations of rapid visual target detection: A single-trial EEG classification algorithm Journal Article IEEE Transactions on Biomedical Engineering, 61 (8), pp. 2290–2303, 2014. @article{FuhrmannAlpert2014a, title = {Spatiotemporal representations of rapid visual target detection: A single-trial EEG classification algorithm}, author = {Galit {Fuhrmann Alpert} and Ran Manor and Assaf B Spanier and Leon Y Deouell and Amir B Geva}, doi = {10.1109/TBME.2013.2289898}, year = {2014}, date = {2014-01-01}, journal = {IEEE Transactions on Biomedical Engineering}, volume = {61}, number = {8}, pages = {2290--2303}, abstract = {Brain computer interface applications, developed for both healthy and clinical populations, critically depend on decoding brain activity in single trials. The goal of the present study was to detect distinctive spatiotemporal brain patterns within a set of event related responses. We introduce a novel classification algorithm, the spatially weighted FLD-PCA (SWFP), which is based on a two-step linear classification of event-related responses, using fisher linear discriminant (FLD) classifier and principal component analysis (PCA) for dimensionality reduction. As a benchmark algorithm, we consider the hierarchical discriminant component Analysis (HDCA), introduced by Parra, et al. 2007. We also consider a modified version of the HDCA, namely the hierarchical discriminant principal component analysis algorithm (HDPCA). We compare single-trial classification accuracies of all the three algorithms, each applied to detect target images within a rapid serial visual presentation (RSVP, 10 Hz) of images from five different object categories, based on single-trial brain responses. We find a systematic superiority of our classification algorithm in the tested paradigm. Additionally, HDPCA significantly increases classification accuracies compared to the HDCA. Finally, we show that presenting several repetitions of the same image exemplars improve accuracy, and thus may be important in cases where high accuracy is crucial.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Brain computer interface applications, developed for both healthy and clinical populations, critically depend on decoding brain activity in single trials. The goal of the present study was to detect distinctive spatiotemporal brain patterns within a set of event related responses. We introduce a novel classification algorithm, the spatially weighted FLD-PCA (SWFP), which is based on a two-step linear classification of event-related responses, using fisher linear discriminant (FLD) classifier and principal component analysis (PCA) for dimensionality reduction. As a benchmark algorithm, we consider the hierarchical discriminant component Analysis (HDCA), introduced by Parra, et al. 2007. We also consider a modified version of the HDCA, namely the hierarchical discriminant principal component analysis algorithm (HDPCA). We compare single-trial classification accuracies of all the three algorithms, each applied to detect target images within a rapid serial visual presentation (RSVP, 10 Hz) of images from five different object categories, based on single-trial brain responses. We find a systematic superiority of our classification algorithm in the tested paradigm. Additionally, HDPCA significantly increases classification accuracies compared to the HDCA. Finally, we show that presenting several repetitions of the same image exemplars improve accuracy, and thus may be important in cases where high accuracy is crucial. |
Galit Fuhrmann Alpert; Ran Manor; Assaf B Spanier; Leon Y Deouell; Amir B Geva Spatio-temporal representations of rapid visual target detection: A single trial EEG classification Journal Article IEEE Transactions on Biomedical Engineering, 61 (8), pp. 2290–2303, 2014. @article{FuhrmannAlpert2014b, title = {Spatio-temporal representations of rapid visual target detection: A single trial EEG classification}, author = {Galit {Fuhrmann Alpert} and Ran Manor and Assaf B Spanier and Leon Y Deouell and Amir B Geva}, year = {2014}, date = {2014-01-01}, journal = {IEEE Transactions on Biomedical Engineering}, volume = {61}, number = {8}, pages = {2290--2303}, abstract = {Brain computer interface applications, developed for both healthy and clinical populations, critically depend on decod- ing brain activity in single trials. The goal of the present study was to detect distinctive spatiotemporal brain patterns within a set of event related responses. We introduce a novel classification algo- rithm, the spatially weighted FLD-PCA (SWFP), which is based on a two-step linear classification of event-related responses, using fisher linear discriminant (FLD) classifier and principal compo- nent analysis (PCA) for dimensionality reduction. As a benchmark algorithm, we consider the hierarchical discriminant component Analysis (HDCA), introduced by Parra, et al. 2007. We also con- sider a modified version of the HDCA, namely the hierarchical discriminant principal component analysis algorithm (HDPCA). We compare single-trial classification accuracies of all the three algorithms, each applied to detect target images within a rapid serial visual presentation (RSVP, 10 Hz) of images from five dif- ferent object categories, based on single-trial brain responses. We find a systematic superiority of our classification algorithm in the tested paradigm.Additionally, HDPCA significantly increases clas- sification accuracies compared to the HDCA. Finally, we show that presenting several repetitions of the same image exemplars im- prove accuracy, and thus may be important in cases where high accuracy is crucial.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Brain computer interface applications, developed for both healthy and clinical populations, critically depend on decod- ing brain activity in single trials. The goal of the present study was to detect distinctive spatiotemporal brain patterns within a set of event related responses. We introduce a novel classification algo- rithm, the spatially weighted FLD-PCA (SWFP), which is based on a two-step linear classification of event-related responses, using fisher linear discriminant (FLD) classifier and principal compo- nent analysis (PCA) for dimensionality reduction. As a benchmark algorithm, we consider the hierarchical discriminant component Analysis (HDCA), introduced by Parra, et al. 2007. We also con- sider a modified version of the HDCA, namely the hierarchical discriminant principal component analysis algorithm (HDPCA). We compare single-trial classification accuracies of all the three algorithms, each applied to detect target images within a rapid serial visual presentation (RSVP, 10 Hz) of images from five dif- ferent object categories, based on single-trial brain responses. We find a systematic superiority of our classification algorithm in the tested paradigm.Additionally, HDPCA significantly increases clas- sification accuracies compared to the HDCA. Finally, we show that presenting several repetitions of the same image exemplars im- prove accuracy, and thus may be important in cases where high accuracy is crucial. |
Danny Gagnon Transcranial magnetic stimulation of frontal oculomotor regions during smooth pursuit Journal Article Journal of Neuroscience, 26 (2), pp. 458–466, 2006. @article{Gagnon2006, title = {Transcranial magnetic stimulation of frontal oculomotor regions during smooth pursuit}, author = {Danny Gagnon}, doi = {10.1523/JNEUROSCI.2789-05.2006}, year = {2006}, date = {2006-01-01}, journal = {Journal of Neuroscience}, volume = {26}, number = {2}, pages = {458--466}, abstract = {Both the frontal eye fields (FEFs) and supplementary eye fields (SEFs) are known to be involved in smooth pursuit eye movements. It has been shown recently that stimulation of the smooth-pursuit area of the FEF [frontal pursuit area (FPA)] in monkey increases the pursuit response to unexpected changes in target motion during pursuit. In the current study, we applied transcranial magnetic stimulation (TMS) to the FPA and SEF in humans during sinusoidal pursuit to assess its effects on the pursuit response to predictable, rather than unexpected, changes in target motion. For the FPA, we found that TMS applied immediately before the target reversed direction increased eye velocity in the new direction, whereas TMS applied in mid-cycle, immediately before the target began to slow, decreased eye velocity. For the SEF, TMS applied at target reversal increased eye velocity in the new direction but had no effect on eye velocity when applied at mid-cycle. TMS of the control region (leg region of the somatosensory cortex) did not affect eye velocity at either point. Previous stimulation studies of FPA during pursuit have suggested that this region is involved in controlling the gain of the transformation of visual signals into pursuit motor commands. The current results suggest that the gain of the transformation of predictive signals into motor commands is also controlled by the FPA. The effect of stimulation of the SEF is distinct from that of the FPA and suggests that its role in sinusoidal pursuit is primarily at the target direction reversal.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Both the frontal eye fields (FEFs) and supplementary eye fields (SEFs) are known to be involved in smooth pursuit eye movements. It has been shown recently that stimulation of the smooth-pursuit area of the FEF [frontal pursuit area (FPA)] in monkey increases the pursuit response to unexpected changes in target motion during pursuit. In the current study, we applied transcranial magnetic stimulation (TMS) to the FPA and SEF in humans during sinusoidal pursuit to assess its effects on the pursuit response to predictable, rather than unexpected, changes in target motion. For the FPA, we found that TMS applied immediately before the target reversed direction increased eye velocity in the new direction, whereas TMS applied in mid-cycle, immediately before the target began to slow, decreased eye velocity. For the SEF, TMS applied at target reversal increased eye velocity in the new direction but had no effect on eye velocity when applied at mid-cycle. TMS of the control region (leg region of the somatosensory cortex) did not affect eye velocity at either point. Previous stimulation studies of FPA during pursuit have suggested that this region is involved in controlling the gain of the transformation of visual signals into pursuit motor commands. The current results suggest that the gain of the transformation of predictive signals into motor commands is also controlled by the FPA. The effect of stimulation of the SEF is distinct from that of the FPA and suggests that its role in sinusoidal pursuit is primarily at the target direction reversal. |
Steffen Gais; Sabine Köster; Andreas Sprenger; Judith Bethke; Wolfgang Heide; Hubert Kimmig Sleep is required for improving reaction times after training on a procedural visuo-motor task Journal Article Neurobiology of Learning and Memory, 90 (4), pp. 610–615, 2008. @article{Gais2008, title = {Sleep is required for improving reaction times after training on a procedural visuo-motor task}, author = {Steffen Gais and Sabine Köster and Andreas Sprenger and Judith Bethke and Wolfgang Heide and Hubert Kimmig}, doi = {10.1016/j.nlm.2008.07.016}, year = {2008}, date = {2008-01-01}, journal = {Neurobiology of Learning and Memory}, volume = {90}, number = {4}, pages = {610--615}, abstract = {Sleep has been found to enhance consolidation of many different forms of memory. However in most procedural tasks, a sleep-independent, fast learning component interacts with slow, sleep-dependent improvements. Here, we show that in humans a visuo-motor saccade learning task shows no improvements during training, but only during a delayed recall testing after a period of sleep. Subjects were trained in a prosaccade task (saccade to a visual target). Performance was tested in the prosaccade and the antisaccade task (saccade to opposite direction of the target) before training, after a night of sleep or sleep deprivation, after a night of recovery sleep, and finally in a follow-up test 4 weeks later. We found no immediate improvement in saccadic reaction time (SRT) during training, but a delayed reduction in SRT, indicating a slow-learning process. This reduction occurred only after a period of sleep, i.e. after the first night in the sleep group and after recovery sleep in the sleep deprivation group. This improvement was stable during the 4-week follow-up. Saccadic training can thus induce covert changes in the saccade generation pathway. During the following sleep period, these changes in turn bring about overt performance improvements, presuming a learning effect based on synaptic tagging.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Sleep has been found to enhance consolidation of many different forms of memory. However in most procedural tasks, a sleep-independent, fast learning component interacts with slow, sleep-dependent improvements. Here, we show that in humans a visuo-motor saccade learning task shows no improvements during training, but only during a delayed recall testing after a period of sleep. Subjects were trained in a prosaccade task (saccade to a visual target). Performance was tested in the prosaccade and the antisaccade task (saccade to opposite direction of the target) before training, after a night of sleep or sleep deprivation, after a night of recovery sleep, and finally in a follow-up test 4 weeks later. We found no immediate improvement in saccadic reaction time (SRT) during training, but a delayed reduction in SRT, indicating a slow-learning process. This reduction occurred only after a period of sleep, i.e. after the first night in the sleep group and after recovery sleep in the sleep deprivation group. This improvement was stable during the 4-week follow-up. Saccadic training can thus induce covert changes in the saccade generation pathway. During the following sleep period, these changes in turn bring about overt performance improvements, presuming a learning effect based on synaptic tagging. |
Matthew A Gannon; Stephanie M Long; Nathan A Parks Homeostatic plasticity in human extrastriate cortex following a simulated peripheral scotoma Journal Article Experimental Brain Research, 235 (11), pp. 3391–3401, 2017. @article{Gannon2017, title = {Homeostatic plasticity in human extrastriate cortex following a simulated peripheral scotoma}, author = {Matthew A Gannon and Stephanie M Long and Nathan A Parks}, doi = {10.1007/s00221-017-5042-0}, year = {2017}, date = {2017-01-01}, journal = {Experimental Brain Research}, volume = {235}, number = {11}, pages = {3391--3401}, publisher = {Springer Berlin Heidelberg}, abstract = {Neuroimaging and patient work over the past decade have indicated that, following retinal deafferenta- tion, the human visual cortex undergoes a large-scale and enduring reorganization of its topography such that the clas- sical retinotopic organization of deafferented visual cortex remaps to represent non-classical regions of visual space. Such long-term visual reorganization is proposed to occur through changes in the functional balance of deafferented visual circuits that engage more lasting changes through activity-dependent neuroplasticity. Here, we investigated the short-term changes in functional balance (short-term plastic- ity; homeostatic plasticity) that occur within deafferented human visual cortices. We recorded electroencephalogram (EEG) while observers were conditioned for 6 s with a simu- lated retinal scotoma (artificial scotoma) positioned 8.0° in the periphery. Visual evoked potentials (VEPs) evoked by the onset of sinusoidal visual probes that varied in their tilt were used to examine changes in cortical excitability within and around cortical representations of the simulated sco- toma. Psychophysical orientation functions obtained from discrimination of visual probe tilt were used to examine alterations in the stimulus selectivity within the scotoma representations. Consistent with a mechanism of homeo- static disinhibition, an early extrastriate component of the VEP (the early phase P1) exhibited increased amplitude fol- lowing the condition with a simulated scotoma relative to a stimulus-matched control condition. This increased visual cortical response was associated with a reduction in the slope of the psychophysical orientation function, suggesting a broader tuning of neural populations within scotoma repre- sentations. Together, these findings support a mechanism of disinhibition in promoting visual plasticity and topographi- cal reorganization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Neuroimaging and patient work over the past decade have indicated that, following retinal deafferenta- tion, the human visual cortex undergoes a large-scale and enduring reorganization of its topography such that the clas- sical retinotopic organization of deafferented visual cortex remaps to represent non-classical regions of visual space. Such long-term visual reorganization is proposed to occur through changes in the functional balance of deafferented visual circuits that engage more lasting changes through activity-dependent neuroplasticity. Here, we investigated the short-term changes in functional balance (short-term plastic- ity; homeostatic plasticity) that occur within deafferented human visual cortices. We recorded electroencephalogram (EEG) while observers were conditioned for 6 s with a simu- lated retinal scotoma (artificial scotoma) positioned 8.0° in the periphery. Visual evoked potentials (VEPs) evoked by the onset of sinusoidal visual probes that varied in their tilt were used to examine changes in cortical excitability within and around cortical representations of the simulated sco- toma. Psychophysical orientation functions obtained from discrimination of visual probe tilt were used to examine alterations in the stimulus selectivity within the scotoma representations. Consistent with a mechanism of homeo- static disinhibition, an early extrastriate component of the VEP (the early phase P1) exhibited increased amplitude fol- lowing the condition with a simulated scotoma relative to a stimulus-matched control condition. This increased visual cortical response was associated with a reduction in the slope of the psychophysical orientation function, suggesting a broader tuning of neural populations within scotoma repre- sentations. Together, these findings support a mechanism of disinhibition in promoting visual plasticity and topographi- cal reorganization. |
Nigel Gebodh; Isabel M Vanegas; Simon P Kelly Effects of stimulus size and contrast on the initial primary visual cortical response in humans Journal Article Brain Topography, 30 (4), pp. 450–460, 2017. @article{Gebodh2017, title = {Effects of stimulus size and contrast on the initial primary visual cortical response in humans}, author = {Nigel Gebodh and Isabel M Vanegas and Simon P Kelly}, doi = {10.1007/s10548-016-0530-2}, year = {2017}, date = {2017-01-01}, journal = {Brain Topography}, volume = {30}, number = {4}, pages = {450--460}, publisher = {Springer US}, abstract = {Decades of intracranial electrophysiological investigation into the primary visual cortex (V1) have produced many fundamental insights into the computations carried out in low-level visual circuits of the brain. Some of the most important work has been simply concerned with the precise measurement of neural response variations as a function of elementary stimulus attributes such as contrast and size. Surprisingly, such simple but fundamental characterization of V1 responses has not been carried out in human electrophysiology. Here we report such a detailed characterization for the initial “C1” component of the scalp-recorded visual evoked potential (VEP). The C1 is known to be dominantly generated by initial afferent activation in V1, but is difficult to record reliably due to interindividual anatomical variability. We used pattern-pulse multifocal VEP mapping to identify a stimulus position that activates the left lower calcarine bank in each individual, and afterwards measured robust negative C1s over posterior midline scalp to gratings presented sequentially at that location. We found clear and systematic increases in C1 peak amplitude and decreases in peak latency with increasing size as well as with increasing contrast. With a sample of 15 subjects and ~180 trials per condition, reliable C1 amplitudes of −0.46 µV were evoked at as low a contrast as 3.13% and as large as −4.82 µV at 100% contrast, using stimuli of 3.33° diameter. A practical implication is that by placing sufficiently-sized stimuli to target favorable calcarine cortical loci, robust V1 responses can be measured at contrasts close to perceptual thresholds, which could greatly facilitate principled studies of early visual perception and attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Decades of intracranial electrophysiological investigation into the primary visual cortex (V1) have produced many fundamental insights into the computations carried out in low-level visual circuits of the brain. Some of the most important work has been simply concerned with the precise measurement of neural response variations as a function of elementary stimulus attributes such as contrast and size. Surprisingly, such simple but fundamental characterization of V1 responses has not been carried out in human electrophysiology. Here we report such a detailed characterization for the initial “C1” component of the scalp-recorded visual evoked potential (VEP). The C1 is known to be dominantly generated by initial afferent activation in V1, but is difficult to record reliably due to interindividual anatomical variability. We used pattern-pulse multifocal VEP mapping to identify a stimulus position that activates the left lower calcarine bank in each individual, and afterwards measured robust negative C1s over posterior midline scalp to gratings presented sequentially at that location. We found clear and systematic increases in C1 peak amplitude and decreases in peak latency with increasing size as well as with increasing contrast. With a sample of 15 subjects and ~180 trials per condition, reliable C1 amplitudes of −0.46 µV were evoked at as low a contrast as 3.13% and as large as −4.82 µV at 100% contrast, using stimuli of 3.33° diameter. A practical implication is that by placing sufficiently-sized stimuli to target favorable calcarine cortical loci, robust V1 responses can be measured at contrasts close to perceptual thresholds, which could greatly facilitate principled studies of early visual perception and attention. |
Hagar Gelbard-Sagiv; Efrat Magidov; Haggai Sharon; Talma Hendler Noradrenaline modulates visual perception and late visually evoked activity Journal Article Current Biology, 28 , pp. 2239–2249, 2018. @article{GelbardSagiv2018, title = {Noradrenaline modulates visual perception and late visually evoked activity}, author = {Hagar Gelbard-Sagiv and Efrat Magidov and Haggai Sharon and Talma Hendler}, doi = {10.1016/j.cub.2018.05.051}, year = {2018}, date = {2018-01-01}, journal = {Current Biology}, volume = {28}, pages = {2239--2249}, abstract = {An identical sensory stimulus may or may not be incorporated into perceptual experience, depending on the behavioral and cognitive state of the organism. What determines whether a sensory stimulus will be perceived? While different behavioral and cognitive states may share a similar profile of electrophysiology, metabolism, and early sensory responses, neuromodulation is often different and therefore may constitute a key mechanism enabling perceptual awareness. Specifically, noradrenaline improves sensory responses, correlates with orienting toward behaviorally relevant stimuli, and is markedly reduced during sleep, while experience is largely ‘‘disconnected'' from external events. Despite correlative evidence hinting at a relationship between noradrenaline and perception, causal evidence remains absent. Here, we pharmacologically down- and upregulated noradrenaline signaling in healthy volunteers using clonidine and reboxetine in double-blind placebo-controlled experiments, testing the effects on perceptual abilities and visually evoked electroencephalography (EEG) and fMRI responses. We found that detection sensitivity, discrimination accuracy, and subjective visibility change in accordance with noradrenaline (NE) levels, whereas decision bias (criterion) is not affected. Similarly, noradrenaline increases the consistency of EEG visually evoked potentials, while lower noradrenaline levels delay response components around 200 ms. Furthermore, bloodoxygen-level-dependent (BOLD) fMRI activations in high-order visual cortex selectively vary along with noradrenaline signaling. Taken together, these results point to noradrenaline as a key factor causally linking visual awareness to external world events.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An identical sensory stimulus may or may not be incorporated into perceptual experience, depending on the behavioral and cognitive state of the organism. What determines whether a sensory stimulus will be perceived? While different behavioral and cognitive states may share a similar profile of electrophysiology, metabolism, and early sensory responses, neuromodulation is often different and therefore may constitute a key mechanism enabling perceptual awareness. Specifically, noradrenaline improves sensory responses, correlates with orienting toward behaviorally relevant stimuli, and is markedly reduced during sleep, while experience is largely ‘‘disconnected'' from external events. Despite correlative evidence hinting at a relationship between noradrenaline and perception, causal evidence remains absent. Here, we pharmacologically down- and upregulated noradrenaline signaling in healthy volunteers using clonidine and reboxetine in double-blind placebo-controlled experiments, testing the effects on perceptual abilities and visually evoked electroencephalography (EEG) and fMRI responses. We found that detection sensitivity, discrimination accuracy, and subjective visibility change in accordance with noradrenaline (NE) levels, whereas decision bias (criterion) is not affected. Similarly, noradrenaline increases the consistency of EEG visually evoked potentials, while lower noradrenaline levels delay response components around 200 ms. Furthermore, bloodoxygen-level-dependent (BOLD) fMRI activations in high-order visual cortex selectively vary along with noradrenaline signaling. Taken together, these results point to noradrenaline as a key factor causally linking visual awareness to external world events. |
Edden M Gerber; Tal Golan; Robert T Knight; Leon Y Deouell Cortical representation of persistent visual stimuli Journal Article NeuroImage, 161 , pp. 67–79, 2017. @article{Gerber2017, title = {Cortical representation of persistent visual stimuli}, author = {Edden M Gerber and Tal Golan and Robert T Knight and Leon Y Deouell}, doi = {10.1016/j.neuroimage.2017.08.028}, year = {2017}, date = {2017-01-01}, journal = {NeuroImage}, volume = {161}, pages = {67--79}, publisher = {Elsevier Ltd}, abstract = {Research into visual neural activity has focused almost exclusively on onset- or change-driven responses and little is known about how information is encoded in the brain during sustained periods of visual perception. We used intracranial recordings in humans to determine the degree to which the presence of a visual stimulus is persistently encoded by neural activity. The correspondence between stimulus duration and neural response duration was strongest in early visual cortex and gradually diminished along the visual hierarchy, such that is was weakest in inferior-temporal category-selective regions. A similar posterior-anterior gradient was found within inferior temporal face-selective regions, with posterior but not anterior sites showing persistent face-selective activity. The results suggest that regions that appear uniform in terms of their category selectivity are dissociated by how they temporally represent a stimulus in support of ongoing visual perception, and delineate a large-scale organizing principle of the ventral visual stream.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Research into visual neural activity has focused almost exclusively on onset- or change-driven responses and little is known about how information is encoded in the brain during sustained periods of visual perception. We used intracranial recordings in humans to determine the degree to which the presence of a visual stimulus is persistently encoded by neural activity. The correspondence between stimulus duration and neural response duration was strongest in early visual cortex and gradually diminished along the visual hierarchy, such that is was weakest in inferior-temporal category-selective regions. A similar posterior-anterior gradient was found within inferior temporal face-selective regions, with posterior but not anterior sites showing persistent face-selective activity. The results suggest that regions that appear uniform in terms of their category selectivity are dissociated by how they temporally represent a stimulus in support of ongoing visual perception, and delineate a large-scale organizing principle of the ventral visual stream. |
Mónika Gergelyfi; Benvenuto Jacob; Etienne Olivier; Alexandre Zénon Dissociation between mental fatigue and motivational state during prolonged mental activity Journal Article Frontiers in Behavioral Neuroscience, 9 , pp. 1–15, 2015. @article{Gergelyfi2015, title = {Dissociation between mental fatigue and motivational state during prolonged mental activity}, author = {Mónika Gergelyfi and Benvenuto Jacob and Etienne Olivier and Alexandre Zénon}, doi = {10.3389/fnbeh.2015.00176}, year = {2015}, date = {2015-01-01}, journal = {Frontiers in Behavioral Neuroscience}, volume = {9}, pages = {1--15}, abstract = {Mental fatigue (MF) is commonly observed following prolonged cognitive activity and can have major repercussions on the daily life of patients as well as healthy individuals. Despite its important impact, the cognitive processes involved in MF remain largely unknown. An influential hypothesis states that MF does not arise from a disruption of overused neural processes but, rather, is caused by a progressive decrease in motivation-related task engagement. Here, to test this hypothesis, we measured various neural, autonomic, psychometric and behavioral signatures of MF and motivation (EEG, ECG, pupil size, eye blinks, Skin conductance responses (SCRs), questionnaires and performance in a working memory (WM) task) in healthy volunteers, while MF was induced by Sudoku tasks performed for 120 min. Moreover extrinsic motivation was manipulated by using different levels of monetary reward. We found that, during the course of the experiment, the participants' subjective feeling of fatigue increased and their performance worsened while their blink rate and heart rate variability (HRV) increased. Conversely, reward-induced EEG, pupillometric and skin conductance signal changes, regarded as indicators of task engagement, remained constant during the experiment, and failed to correlate with the indices of MF. In addition, MF did not affect a simple reaction time task, despite the strong influence of extrinsic motivation on this task. Finally, alterations of the motivational state through monetary incentives failed to compensate the effects of MF. These findings indicate that MF in healthy subjects is not caused by an alteration of task engagement but is likely to be the consequence of a decrease in the efficiency, or availability, of cognitive resources.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Mental fatigue (MF) is commonly observed following prolonged cognitive activity and can have major repercussions on the daily life of patients as well as healthy individuals. Despite its important impact, the cognitive processes involved in MF remain largely unknown. An influential hypothesis states that MF does not arise from a disruption of overused neural processes but, rather, is caused by a progressive decrease in motivation-related task engagement. Here, to test this hypothesis, we measured various neural, autonomic, psychometric and behavioral signatures of MF and motivation (EEG, ECG, pupil size, eye blinks, Skin conductance responses (SCRs), questionnaires and performance in a working memory (WM) task) in healthy volunteers, while MF was induced by Sudoku tasks performed for 120 min. Moreover extrinsic motivation was manipulated by using different levels of monetary reward. We found that, during the course of the experiment, the participants' subjective feeling of fatigue increased and their performance worsened while their blink rate and heart rate variability (HRV) increased. Conversely, reward-induced EEG, pupillometric and skin conductance signal changes, regarded as indicators of task engagement, remained constant during the experiment, and failed to correlate with the indices of MF. In addition, MF did not affect a simple reaction time task, despite the strong influence of extrinsic motivation on this task. Finally, alterations of the motivational state through monetary incentives failed to compensate the effects of MF. These findings indicate that MF in healthy subjects is not caused by an alteration of task engagement but is likely to be the consequence of a decrease in the efficiency, or availability, of cognitive resources. |
Thomas Geyer; Franziska Günther; Hermann J Müller; Jim Kacian; Heinrich René Liesefeld; Stella Pierides Reading English-language haiku: An eye-movement study of the 'cut effect' Journal Article Journal of Eye Movement Research, 13 (2), pp. 1–29, 2020. @article{Geyer2020, title = {Reading English-language haiku: An eye-movement study of the 'cut effect'}, author = {Thomas Geyer and Franziska Günther and Hermann J Müller and Jim Kacian and Heinrich René Liesefeld and Stella Pierides}, doi = {10.16910/jemr.13.2.2}, year = {2020}, date = {2020-01-01}, journal = {Journal of Eye Movement Research}, volume = {13}, number = {2}, pages = {1--29}, abstract = {The current study, set within the larger enterprise of Neuro-Cognitive Poetics, was designed to examine how readers deal with the 'cut'-a more or less sharp semantic-conceptual break-in normative, three-line English-language haiku poems (ELH). Readers were presented with three-line haiku that consisted of two (seemingly) disparate parts, a (two-line) 'phrase' image and a one-line 'fragment' image, in order to determine how they process the conceptual gap between these images when constructing the poem's meaning-as reflected in their patterns of reading eye movements. In addition to replicating the basic 'cut effect', i.e., the extended fixation dwell time on the fragment line relative to the other lines, the present study examined (a) how this effect is influenced by whether the cut is purely implicit or explicitly marked by punctuation, and (b) whether the effect pattern could be delineated against a control condition of 'uncut', one-image haiku. For 'cut' vs. 'uncut' haiku, the results revealed the distribution of fixations across the poems to be modulated by the position of the cut (after line 1 vs. after line 2), the presence vs. absence of a cut marker, and the semanticconceptual distance between the two images (context-action vs. juxtaposition haiku). These formal-structural and conceptual-semantic properties were associated with systematic changes in how individual poem lines were scanned at first reading and then (selectively) re-sampled in second-and third-pass reading to construct and check global meaning. No such effects were found for one-image (control) haiku. We attribute this pattern to the operation of different meaning resolution processes during the comprehension of two-image haiku, which are invoked by both form-and meaning-related features of the poems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The current study, set within the larger enterprise of Neuro-Cognitive Poetics, was designed to examine how readers deal with the 'cut'-a more or less sharp semantic-conceptual break-in normative, three-line English-language haiku poems (ELH). Readers were presented with three-line haiku that consisted of two (seemingly) disparate parts, a (two-line) 'phrase' image and a one-line 'fragment' image, in order to determine how they process the conceptual gap between these images when constructing the poem's meaning-as reflected in their patterns of reading eye movements. In addition to replicating the basic 'cut effect', i.e., the extended fixation dwell time on the fragment line relative to the other lines, the present study examined (a) how this effect is influenced by whether the cut is purely implicit or explicitly marked by punctuation, and (b) whether the effect pattern could be delineated against a control condition of 'uncut', one-image haiku. For 'cut' vs. 'uncut' haiku, the results revealed the distribution of fixations across the poems to be modulated by the position of the cut (after line 1 vs. after line 2), the presence vs. absence of a cut marker, and the semanticconceptual distance between the two images (context-action vs. juxtaposition haiku). These formal-structural and conceptual-semantic properties were associated with systematic changes in how individual poem lines were scanned at first reading and then (selectively) re-sampled in second-and third-pass reading to construct and check global meaning. No such effects were found for one-image (control) haiku. We attribute this pattern to the operation of different meaning resolution processes during the comprehension of two-image haiku, which are invoked by both form-and meaning-related features of the poems. |
Marcello Giannini; David M Alexander; Andrey R Nikolaev; Cees van Leeuwen Large-scale traveling waves in EEG activity following eye movement Journal Article Brain Topography, 31 (4), pp. 608–622, 2018. @article{Giannini2018, title = {Large-scale traveling waves in EEG activity following eye movement}, author = {Marcello Giannini and David M Alexander and Andrey R Nikolaev and Cees van Leeuwen}, doi = {10.1007/s10548-018-0622-2}, year = {2018}, date = {2018-01-01}, journal = {Brain Topography}, volume = {31}, number = {4}, pages = {608--622}, publisher = {Springer US}, abstract = {In spontaneous, stimulus-evoked, and eye-movement evoked EEG, the oscillatory signal shows large scale, dynamically organized patterns of phase. We investigated eye-movement evoked patterns in free-viewing conditions. Participants viewed photographs of natural scenes in anticipation of a memory test. From 200 ms intervals following saccades, we estimated the EEG phase gradient over the entire scalp, and the wave activity, i.e. the goodness of fit of a wave model involving a phase gradient assumed to be smooth over the scalp. In frequencies centered at 6.5 Hz, large-scale phase organization occurred, peaking around 70 ms after fixation onset and taking the form of a traveling wave. According to the wave gradient, most of the times the wave spreads from the posterior-inferior to anterior–superior direction. In these directions, the gradients depended on the size and direction of the saccade. Wave propagation velocity decreased in the course of the fixation, particularly in the interval from 50 to 150 ms after fixation onset. This interval corresponds to the fixation-related lambda activity, which reflects early perceptual processes following fixation onset. We conclude that lambda activity has a prominent traveling wave component. This component consists of a short-term whole-head phase pattern of specific direction and velocity, which may reflect feedforward propagation of visual information at fixation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In spontaneous, stimulus-evoked, and eye-movement evoked EEG, the oscillatory signal shows large scale, dynamically organized patterns of phase. We investigated eye-movement evoked patterns in free-viewing conditions. Participants viewed photographs of natural scenes in anticipation of a memory test. From 200 ms intervals following saccades, we estimated the EEG phase gradient over the entire scalp, and the wave activity, i.e. the goodness of fit of a wave model involving a phase gradient assumed to be smooth over the scalp. In frequencies centered at 6.5 Hz, large-scale phase organization occurred, peaking around 70 ms after fixation onset and taking the form of a traveling wave. According to the wave gradient, most of the times the wave spreads from the posterior-inferior to anterior–superior direction. In these directions, the gradients depended on the size and direction of the saccade. Wave propagation velocity decreased in the course of the fixation, particularly in the interval from 50 to 150 ms after fixation onset. This interval corresponds to the fixation-related lambda activity, which reflects early perceptual processes following fixation onset. We conclude that lambda activity has a prominent traveling wave component. This component consists of a short-term whole-head phase pattern of specific direction and velocity, which may reflect feedforward propagation of visual information at fixation. |
Lauren R Godier; Jessica C Scaife; Sven Braeutigam; Rebecca J Park Enhanced early neuronal processing of food pictures in Anorexia Nervosa: A magnetoencephalography study Journal Article Psychiatry Journal, 2016 , pp. 1–13, 2016. @article{Godier2016, title = {Enhanced early neuronal processing of food pictures in Anorexia Nervosa: A magnetoencephalography study}, author = {Lauren R Godier and Jessica C Scaife and Sven Braeutigam and Rebecca J Park}, doi = {10.1155/2016/1795901}, year = {2016}, date = {2016-01-01}, journal = {Psychiatry Journal}, volume = {2016}, pages = {1--13}, abstract = {Neuroimaging studies in Anorexia Nervosa (AN) have shown increased activation in reward and cognitive control regions in response to food, and a behavioral attentional bias (AB) towards food stimuli is reported. This study aimed to further investigate the neural processing of food using magnetoencephalography (MEG). Participants were 13 females with restricting-type AN, 14 females recovered from restricting-type AN, and 15 female healthy controls. MEG data was acquired whilst participants viewed high- and low-calorie food pictures. Attention was assessed with a reaction time task and eye tracking. Time-series analysis suggested increased neural activity in response to both calorie conditions in the AN groups, consistent with an early AB. Increased activity was observed at 150 ms in the current AN group. Neuronal activity at this latency was at normal level in the recovered group; however, this group exhibited enhanced activity at 320 ms after stimulus. Consistent with previous studies, analysis in source space and behavioral data suggested enhanced attention and cognitive control processes in response to food stimuli in AN. This may enable avoidance of salient food stimuli and maintenance of dietary restraint in AN. A later latency of increased activity in the recovered group may reflect a reversal of this avoidance, with source space and behavioral data indicating increased visual and cognitive processing of food stimuli.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Neuroimaging studies in Anorexia Nervosa (AN) have shown increased activation in reward and cognitive control regions in response to food, and a behavioral attentional bias (AB) towards food stimuli is reported. This study aimed to further investigate the neural processing of food using magnetoencephalography (MEG). Participants were 13 females with restricting-type AN, 14 females recovered from restricting-type AN, and 15 female healthy controls. MEG data was acquired whilst participants viewed high- and low-calorie food pictures. Attention was assessed with a reaction time task and eye tracking. Time-series analysis suggested increased neural activity in response to both calorie conditions in the AN groups, consistent with an early AB. Increased activity was observed at 150 ms in the current AN group. Neuronal activity at this latency was at normal level in the recovered group; however, this group exhibited enhanced activity at 320 ms after stimulus. Consistent with previous studies, analysis in source space and behavioral data suggested enhanced attention and cognitive control processes in response to food stimuli in AN. This may enable avoidance of salient food stimuli and maintenance of dietary restraint in AN. A later latency of increased activity in the recovered group may reflect a reversal of this avoidance, with source space and behavioral data indicating increased visual and cognitive processing of food stimuli. |
Tal Golan; Ido Davidesco; Meir Meshulam; David M Groppe; Pierre Mégevand; Erin M Yeagle; Matthew S Goldfinger; Michal Harel; Lucia Melloni; Charles E Schroeder; D L Deouell; Ashesh D Mehta; Rafael Malach Human intracranial recordings link suppressed transients rather than 'filling-in' to perceptual continuity across blinks Journal Article eLife, 5 , pp. 1–28, 2016. @article{Golan2016, title = {Human intracranial recordings link suppressed transients rather than 'filling-in' to perceptual continuity across blinks}, author = {Tal Golan and Ido Davidesco and Meir Meshulam and David M Groppe and Pierre Mégevand and Erin M Yeagle and Matthew S Goldfinger and Michal Harel and Lucia Melloni and Charles E Schroeder and D L Deouell and Ashesh D Mehta and Rafael Malach}, doi = {10.7554/eLife.17243}, year = {2016}, date = {2016-01-01}, journal = {eLife}, volume = {5}, pages = {1--28}, abstract = {We hardly notice our eye blinks, yet an externally generated retinal interruption of a similar duration is perceptually salient. We examined the neural correlates of this perceptual distinction using intracranially measured ECoG signals from human visual cortex in 14 patients. In early visual areas (V1 and V2), the disappearance of the stimulus due to either invisible blinks or salient blank video frames ('gaps') led to a similar drop in activity level, followed by a positive overshoot beyond baseline, triggered by stimulus reappearance. Ascending the visual hierarchy, the reappearance-related overshoot gradually subsided for blinks but not for gaps. By contrast, the disappearance-related drop did not follow the perceptual distinction - it was actually slightly more pronounced for blinks than for gaps. These findings suggest that blinks' limited visibility compared with gaps is correlated with suppression of blink-related visual activity transients, rather than with 'filling-in' of the occluded content during blinks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We hardly notice our eye blinks, yet an externally generated retinal interruption of a similar duration is perceptually salient. We examined the neural correlates of this perceptual distinction using intracranially measured ECoG signals from human visual cortex in 14 patients. In early visual areas (V1 and V2), the disappearance of the stimulus due to either invisible blinks or salient blank video frames ('gaps') led to a similar drop in activity level, followed by a positive overshoot beyond baseline, triggered by stimulus reappearance. Ascending the visual hierarchy, the reappearance-related overshoot gradually subsided for blinks but not for gaps. By contrast, the disappearance-related drop did not follow the perceptual distinction - it was actually slightly more pronounced for blinks than for gaps. These findings suggest that blinks' limited visibility compared with gaps is correlated with suppression of blink-related visual activity transients, rather than with 'filling-in' of the occluded content during blinks. |
Gil Gonen-Yaacovi; Ayelet Arazi; Nitzan Shahar; Anat Karmon; Shlomi Haar; Nachshon Meiran; Ilan Dinstein Increased ongoing neural variability in ADHD Journal Article Cortex, 81 , pp. 50–63, 2016. @article{GonenYaacovi2016, title = {Increased ongoing neural variability in ADHD}, author = {Gil Gonen-Yaacovi and Ayelet Arazi and Nitzan Shahar and Anat Karmon and Shlomi Haar and Nachshon Meiran and Ilan Dinstein}, doi = {10.1016/j.cortex.2016.04.010}, year = {2016}, date = {2016-01-01}, journal = {Cortex}, volume = {81}, pages = {50--63}, publisher = {Elsevier Ltd}, abstract = {Attention Deficit Hyperactivity Disorder (ADHD) has been described as a disorder where frequent lapses of attention impair the ability of an individual to focus/attend in a sustained manner, thereby generating abnormally large intra-individual behavioral variability across trials. Indeed, increased reaction time (RT) variability is a fundamental behavioral characteristic of individuals with ADHD found across a large number of cognitive tasks. But what is the underlying neurophysiology that might generate such behavioral instability? Here, we examined trial-by-trial EEG response variability to visual and auditory stimuli while subjects' attention was diverted to an unrelated task at the fixation cross. Comparisons between adult ADHD and control participants revealed that neural response variability was significantly larger in the ADHD group as compared with the control group in both sensory modalities. Importantly, larger trial-by-trial variability in ADHD was apparent before and after stimulus presentation as well as in trials where the stimulus was omitted, suggesting that ongoing (rather than stimulus-evoked) neural activity is continuously more variable (noisier) in ADHD. While the patho-physiological mechanisms causing this increased neural variability remain unknown, they appear to act continuously rather than being tied to a specific sensory or cognitive process.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attention Deficit Hyperactivity Disorder (ADHD) has been described as a disorder where frequent lapses of attention impair the ability of an individual to focus/attend in a sustained manner, thereby generating abnormally large intra-individual behavioral variability across trials. Indeed, increased reaction time (RT) variability is a fundamental behavioral characteristic of individuals with ADHD found across a large number of cognitive tasks. But what is the underlying neurophysiology that might generate such behavioral instability? Here, we examined trial-by-trial EEG response variability to visual and auditory stimuli while subjects' attention was diverted to an unrelated task at the fixation cross. Comparisons between adult ADHD and control participants revealed that neural response variability was significantly larger in the ADHD group as compared with the control group in both sensory modalities. Importantly, larger trial-by-trial variability in ADHD was apparent before and after stimulus presentation as well as in trials where the stimulus was omitted, suggesting that ongoing (rather than stimulus-evoked) neural activity is continuously more variable (noisier) in ADHD. While the patho-physiological mechanisms causing this increased neural variability remain unknown, they appear to act continuously rather than being tied to a specific sensory or cognitive process. |
Praghajieeth Raajhen Santhana Gopalan; Otto Loberg; Jarmo A Hämäläinen; Paavo H T Leppänen Scientific Reports, 9 , pp. 2940, 2019. @article{Gopalan2019, title = {Attentional processes in typically developing children as revealed using brain event-related potentials and their source localization in Attention Network Test}, author = {Praghajieeth Raajhen Santhana Gopalan and Otto Loberg and Jarmo A Hämäläinen and Paavo H T Leppänen}, doi = {10.1038/s41598-018-36947-3}, year = {2019}, date = {2019-12-01}, journal = {Scientific Reports}, volume = {9}, pages = {2940}, publisher = {Nature Publishing Group}, abstract = {Attention-related processes include three functional sub-components: alerting, orienting, and inhibition. We investigated these components using EEG-based, brain event-related potentials and their neuronal source activations during the Attention Network Test in typically developing school-aged children. Participants were asked to detect the swimming direction of the centre fish in a group of five fish. The target stimulus was either preceded by a cue (centre, double, or spatial) or no cue. An EEG using 128 electrodes was recorded for 83 children aged 12–13 years. RTs showed significant effects across all three sub-components of attention. Alerting and orienting (responses to double vs non-cued target stimulus and spatially vs centre-cued target stimulus, respectively) resulted in larger N1 amplitude, whereas inhibition (responses to incongruent vs congruent target stimulus) resulted in larger P3 amplitude. Neuronal source activation for the alerting effect was localized in the right anterior temporal and bilateral occipital lobes, for the orienting effect bilaterally in the occipital lobe, and for the inhibition effect in the medial prefrontal cortex and left anterior temporal lobe. Neuronal sources of ERPs revealed that sub-processes related to the attention network are different in children as compared to earlier adult fMRI studies, which was not evident from scalp ERPs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attention-related processes include three functional sub-components: alerting, orienting, and inhibition. We investigated these components using EEG-based, brain event-related potentials and their neuronal source activations during the Attention Network Test in typically developing school-aged children. Participants were asked to detect the swimming direction of the centre fish in a group of five fish. The target stimulus was either preceded by a cue (centre, double, or spatial) or no cue. An EEG using 128 electrodes was recorded for 83 children aged 12–13 years. RTs showed significant effects across all three sub-components of attention. Alerting and orienting (responses to double vs non-cued target stimulus and spatially vs centre-cued target stimulus, respectively) resulted in larger N1 amplitude, whereas inhibition (responses to incongruent vs congruent target stimulus) resulted in larger P3 amplitude. Neuronal source activation for the alerting effect was localized in the right anterior temporal and bilateral occipital lobes, for the orienting effect bilaterally in the occipital lobe, and for the inhibition effect in the medial prefrontal cortex and left anterior temporal lobe. Neuronal sources of ERPs revealed that sub-processes related to the attention network are different in children as compared to earlier adult fMRI studies, which was not evident from scalp ERPs. |
Tom A de Graaf; Felix Duecker; Martin H P Fernholz; Alexander T Sack Spatially specific vs. unspecific disruption of visual orientation perception using chronometric pre-stimulus TMS Journal Article Frontiers in Behavioral Neuroscience, 9 (5), pp. 1–11, 2015. @article{Graaf2015, title = {Spatially specific vs. unspecific disruption of visual orientation perception using chronometric pre-stimulus TMS}, author = {Tom A de Graaf and Felix Duecker and Martin H P Fernholz and Alexander T Sack}, doi = {10.3389/fnbeh.2015.00005}, year = {2015}, date = {2015-01-01}, journal = {Frontiers in Behavioral Neuroscience}, volume = {9}, number = {5}, pages = {1--11}, abstract = {Transcranial magnetic stimulation (TMS) over occipital cortex can impair visual processing. Such ‘TMS masking' has repeatedly been shown at several stimulus onset asynchronies (SOAs), with TMS pulses generally applied after the onset of a visual stimulus. Following increased interest in the neuronal state-dependency of visual processing, we recently explored the efficacy of TMS at ‘negative SOAs', when no visual processing can yet occur. We could reveal pre-stimulus TMS disruption, with results moreover hinting at two separate mechanisms in occipital cortex biasing subsequent orientation perception. Here we extended this work, including a chronometric design to map the temporal dynamics of spatially specific and unspecific mechanisms of state-dependent visual processing, while moreover controlling for TMS-induced pupil covering. TMS pulses applied 60-40 ms prior to a visual stimulus decreased orientation processing independent of stimulus location, while a local suppressive effect was found for TMS applied 30-10 ms pre-stimulus. These results contribute to our understanding of spatiotemporal mechanisms in occipital cortex underlying the state-dependency of visual processing, providing a basis for future work to link pre-stimulus TMS suppression effects to other known visual biasing mechanisms.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Transcranial magnetic stimulation (TMS) over occipital cortex can impair visual processing. Such ‘TMS masking' has repeatedly been shown at several stimulus onset asynchronies (SOAs), with TMS pulses generally applied after the onset of a visual stimulus. Following increased interest in the neuronal state-dependency of visual processing, we recently explored the efficacy of TMS at ‘negative SOAs', when no visual processing can yet occur. We could reveal pre-stimulus TMS disruption, with results moreover hinting at two separate mechanisms in occipital cortex biasing subsequent orientation perception. Here we extended this work, including a chronometric design to map the temporal dynamics of spatially specific and unspecific mechanisms of state-dependent visual processing, while moreover controlling for TMS-induced pupil covering. TMS pulses applied 60-40 ms prior to a visual stimulus decreased orientation processing independent of stimulus location, while a local suppressive effect was found for TMS applied 30-10 ms pre-stimulus. These results contribute to our understanding of spatiotemporal mechanisms in occipital cortex underlying the state-dependency of visual processing, providing a basis for future work to link pre-stimulus TMS suppression effects to other known visual biasing mechanisms. |
Sven-Thomas Graupner; Boris M Velichkovsky; Sebastian Pannasch; Johannes Marx Surprise, surprise: Two distinct components in the visually evoked distractor effect Journal Article Psychophysiology, 44 (2), pp. 251–261, 2007. @article{Graupner2007, title = {Surprise, surprise: Two distinct components in the visually evoked distractor effect}, author = {Sven-Thomas Graupner and Boris M Velichkovsky and Sebastian Pannasch and Johannes Marx}, doi = {10.1111/j.1469-8986.2007.00504.x}, year = {2007}, date = {2007-01-01}, journal = {Psychophysiology}, volume = {44}, number = {2}, pages = {251--261}, abstract = {The distractor effect is an inhibition of saccades shortly after a sudden visual event. It has been explained both as an oculomotor reflex and as a manifestation of the orienting response. To clarify which explanation is more appropriate, we investigated a possible habituation of this effect. Visual and auditory distractors were presented at gaze-contingent intervals during the perception of meaningful pictures. Both reflexlike and modifiable components were present in the visual distractor effect, with latencies of about 110 and 180 ms, respectively. The influence of visual and auditory distractors on saccades preceded the earliest changes in cortical ERPs. Only for long-term habituation in the visual modality was a correlation with ERPs (N1) found.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The distractor effect is an inhibition of saccades shortly after a sudden visual event. It has been explained both as an oculomotor reflex and as a manifestation of the orienting response. To clarify which explanation is more appropriate, we investigated a possible habituation of this effect. Visual and auditory distractors were presented at gaze-contingent intervals during the perception of meaningful pictures. Both reflexlike and modifiable components were present in the visual distractor effect, with latencies of about 110 and 180 ms, respectively. The influence of visual and auditory distractors on saccades preceded the earliest changes in cortical ERPs. Only for long-term habituation in the visual modality was a correlation with ERPs (N1) found. |
Sven-Thomas Graupner; Sebastian Pannasch; Boris M Velichkovsky International Journal of Psychophysiology, 80 (1), pp. 54–62, 2011. @article{Graupner2011, title = {Saccadic context indicates information processing within visual fixations: Evidence from event-related potentials and eye-movements analysis of the distractor effect}, author = {Sven-Thomas Graupner and Sebastian Pannasch and Boris M Velichkovsky}, doi = {10.1016/j.ijpsycho.2011.01.013}, year = {2011}, date = {2011-01-01}, journal = {International Journal of Psychophysiology}, volume = {80}, number = {1}, pages = {54--62}, publisher = {Elsevier B.V.}, abstract = {Attention, visual information processing, and oculomotor control are integrated functions of closely related brain mechanisms. Recently, it was shown that the processing of visual distractors appearing during a fixation is modulated by the amplitude of its preceding saccade (Pannasch & Velichkovsky, 2009). So far, this was demonstrated only at the behavioral level in terms of saccadic inhibition. The present study investigated distractor-related brain activity with cortical eye fixation-related potentials (EFRPs). Moreover, the following saccade was included as an additional classification criterion. Eye movements and EFRPs were recorded during free visual exploration of paintings. During some of the fixations, a visual distractor was shown as an annulus around the fixation position, 100. ms after the fixation onset. The saccadic context of a fixation was classified by its preceding and following saccade amplitudes with the cut-off criterion set to 4° of visual angle. The prolongation of fixation duration induced by distractors was largest for fixations preceded and followed by short saccades. EFRP data revealed a difference in distractor-related P2 amplitude between the saccadic context conditions, following the same trend as in eye movements. Furthermore, influences of the following saccade amplitude on the latency of the saccadic inhibition and on the N1 amplitude were found. The EFRP results cannot be explained by the influence of saccades per se since this bias was removed by subtracting the baseline from the distractor EFRP. Rather, the data suggest that saccadic context indicates differences in how information is processed within single visual fixations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attention, visual information processing, and oculomotor control are integrated functions of closely related brain mechanisms. Recently, it was shown that the processing of visual distractors appearing during a fixation is modulated by the amplitude of its preceding saccade (Pannasch & Velichkovsky, 2009). So far, this was demonstrated only at the behavioral level in terms of saccadic inhibition. The present study investigated distractor-related brain activity with cortical eye fixation-related potentials (EFRPs). Moreover, the following saccade was included as an additional classification criterion. Eye movements and EFRPs were recorded during free visual exploration of paintings. During some of the fixations, a visual distractor was shown as an annulus around the fixation position, 100. ms after the fixation onset. The saccadic context of a fixation was classified by its preceding and following saccade amplitudes with the cut-off criterion set to 4° of visual angle. The prolongation of fixation duration induced by distractors was largest for fixations preceded and followed by short saccades. EFRP data revealed a difference in distractor-related P2 amplitude between the saccadic context conditions, following the same trend as in eye movements. Furthermore, influences of the following saccade amplitude on the latency of the saccadic inhibition and on the N1 amplitude were found. The EFRP results cannot be explained by the influence of saccades per se since this bias was removed by subtracting the baseline from the distractor EFRP. Rather, the data suggest that saccadic context indicates differences in how information is processed within single visual fixations. |
Sarah Gregory; Marco Fusca; Geraint Rees; Samuel D Schwarzkopf; Gareth Barnes Gamma frequency and the spatial tuning of primary visual cortex Journal Article PLoS ONE, 11 (6), pp. 1–12, 2016. @article{Gregory2016a, title = {Gamma frequency and the spatial tuning of primary visual cortex}, author = {Sarah Gregory and Marco Fusca and Geraint Rees and Samuel D Schwarzkopf and Gareth Barnes}, doi = {10.1371/journal.pone.0157374}, year = {2016}, date = {2016-01-01}, journal = {PLoS ONE}, volume = {11}, number = {6}, pages = {1--12}, abstract = {Visual stimulation produces oscillatory gamma responses in human primary visual cortex (V1) that also relate to visual perception. We have shown previously that peak gamma frequency positively correlates with central V1 cortical surface area. We hypothesized that people with larger V1 would have smaller receptive fields and that receptive field size, not V1 are, might explain this relationship. Here we set out to test this hypothesis directly by investigating the relationship between fMRI estimated population receptive field (pRF) size and gamma frequency in V1. We stimulated both the near-centre and periphery of the visual field using both large and small stimuli in each location and replicated our previous finding of a positive correlation between V1 surface area and peak gamma frequency. Counter to our expectation, we found that between participants V1 size (and not pRF size) accounted for most of the variability in gamma frequency. Within-participants we found that gamma frequency increased, rather than decreased, with stimulus eccentricity directly contradicting our initial hypothesis.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual stimulation produces oscillatory gamma responses in human primary visual cortex (V1) that also relate to visual perception. We have shown previously that peak gamma frequency positively correlates with central V1 cortical surface area. We hypothesized that people with larger V1 would have smaller receptive fields and that receptive field size, not V1 are, might explain this relationship. Here we set out to test this hypothesis directly by investigating the relationship between fMRI estimated population receptive field (pRF) size and gamma frequency in V1. We stimulated both the near-centre and periphery of the visual field using both large and small stimuli in each location and replicated our previous finding of a positive correlation between V1 surface area and peak gamma frequency. Counter to our expectation, we found that between participants V1 size (and not pRF size) accounted for most of the variability in gamma frequency. Within-participants we found that gamma frequency increased, rather than decreased, with stimulus eccentricity directly contradicting our initial hypothesis. |
M Guitart-Masip; G R Barnes; A Horner; Markus Bauer; Raymond J Dolan; E Duzel Synchronization of medial temporal lobe and prefrontal rhythms in human decision making Journal Article Journal of Neuroscience, 33 (2), pp. 442–451, 2013. @article{GuitartMasip2013, title = {Synchronization of medial temporal lobe and prefrontal rhythms in human decision making}, author = {M Guitart-Masip and G R Barnes and A Horner and Markus Bauer and Raymond J Dolan and E Duzel}, doi = {10.1523/JNEUROSCI.2573-12.2013}, year = {2013}, date = {2013-01-01}, journal = {Journal of Neuroscience}, volume = {33}, number = {2}, pages = {442--451}, abstract = {Optimal decision making requires that we integrate mnemonic information regarding previous decisions with value signals that entail likely rewards and punishments. The fact that memory and value signals appear to be coded by segregated brain regions, the hippocampus in the case of memory and sectors of prefrontal cortex in the case of value, raises the question as to how they are integrated during human decision making. Using magnetoencephalography to study healthy human participants, we show increased theta oscillations over frontal and temporal sensors during nonspatial decisions based on memories from previous trials. Using source reconstruction we found that the medial temporal lobe (MTL), in a location compatible with the anterior hippocampus, and the anterior cingulate cortex in the medial wall of the frontal lobe are the source of this increased theta power. Moreover, we observed a correlation between theta power in the MTL source and behavioral performance in decision making, supporting a role for MTL theta oscillations in decision-making performance. These MTL theta oscillations were synchronized with several prefrontal sources, including lateral superior frontal gyrus, dorsal anterior cingulate gyrus, and medial frontopolar cortex. There was no relationship between the strength of synchronization and the expected value of choices. Our results indicate a mnemonic guidance of human decision making, beyond anticipation of expected reward, is supported by hippocampal-prefrontal theta synchronization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Optimal decision making requires that we integrate mnemonic information regarding previous decisions with value signals that entail likely rewards and punishments. The fact that memory and value signals appear to be coded by segregated brain regions, the hippocampus in the case of memory and sectors of prefrontal cortex in the case of value, raises the question as to how they are integrated during human decision making. Using magnetoencephalography to study healthy human participants, we show increased theta oscillations over frontal and temporal sensors during nonspatial decisions based on memories from previous trials. Using source reconstruction we found that the medial temporal lobe (MTL), in a location compatible with the anterior hippocampus, and the anterior cingulate cortex in the medial wall of the frontal lobe are the source of this increased theta power. Moreover, we observed a correlation between theta power in the MTL source and behavioral performance in decision making, supporting a role for MTL theta oscillations in decision-making performance. These MTL theta oscillations were synchronized with several prefrontal sources, including lateral superior frontal gyrus, dorsal anterior cingulate gyrus, and medial frontopolar cortex. There was no relationship between the strength of synchronization and the expected value of choices. Our results indicate a mnemonic guidance of human decision making, beyond anticipation of expected reward, is supported by hippocampal-prefrontal theta synchronization. |
Rasa Gulbinaite; Diane H M Roozendaal; Rufin VanRullen Attention differentially modulates the amplitude of resonance frequencies in the visual cortex Journal Article NeuroImage, 203 , pp. 1–17, 2019. @article{Gulbinaite2019, title = {Attention differentially modulates the amplitude of resonance frequencies in the visual cortex}, author = {Rasa Gulbinaite and Diane H M Roozendaal and Rufin VanRullen}, doi = {10.1016/j.neuroimage.2019.116146}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {203}, pages = {1--17}, abstract = {Rhythmic visual stimuli (flicker) elicit rhythmic brain responses at the frequency of the stimulus, and attention generally enhances these oscillatory brain responses (steady state visual evoked potentials, SSVEPs). Although SSVEP responses have been tested for flicker frequencies up to 100 Hz [Herrmann, 2001], effects of attention on SSVEP amplitude have only been reported for lower frequencies (up to ~30 Hz), with no systematic comparison across a wide, finely sampled frequency range. Does attention modulate SSVEP amplitude at higher flicker frequencies (gamma band, 30–80 Hz), and is attentional modulation constant across frequencies? By isolating SSVEP responses from the broadband EEG signal using a multivariate spatiotemporal source separation method, we demonstrate that flicker in the alpha and gamma bands elicit strongest and maximally phase stable brain responses (resonance), on which the effect of attention is opposite: positive for gamma and negative for alpha. Finding subject-specific gamma resonance frequency and a positive attentional modulation of gamma-band SSVEPs points to the untapped potential of flicker as a non-invasive tool for studying the causal effects of interactions between visual gamma-band rhythmic stimuli and endogenous gamma oscillations on perception and attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Rhythmic visual stimuli (flicker) elicit rhythmic brain responses at the frequency of the stimulus, and attention generally enhances these oscillatory brain responses (steady state visual evoked potentials, SSVEPs). Although SSVEP responses have been tested for flicker frequencies up to 100 Hz [Herrmann, 2001], effects of attention on SSVEP amplitude have only been reported for lower frequencies (up to ~30 Hz), with no systematic comparison across a wide, finely sampled frequency range. Does attention modulate SSVEP amplitude at higher flicker frequencies (gamma band, 30–80 Hz), and is attentional modulation constant across frequencies? By isolating SSVEP responses from the broadband EEG signal using a multivariate spatiotemporal source separation method, we demonstrate that flicker in the alpha and gamma bands elicit strongest and maximally phase stable brain responses (resonance), on which the effect of attention is opposite: positive for gamma and negative for alpha. Finding subject-specific gamma resonance frequency and a positive attentional modulation of gamma-band SSVEPs points to the untapped potential of flicker as a non-invasive tool for studying the causal effects of interactions between visual gamma-band rhythmic stimuli and endogenous gamma oscillations on perception and attention. |
Seref Can Gurel; Miguel Castelo-Branco; Alexander T Sack; Felix Duecker Assessing the functional role of frontal eye fields in voluntary and reflexive saccades using continuous theta burst stimulation Journal Article Frontiers in Neuroscience, 12 , pp. 1–11, 2018. @article{Gurel2018, title = {Assessing the functional role of frontal eye fields in voluntary and reflexive saccades using continuous theta burst stimulation}, author = {Seref Can Gurel and Miguel Castelo-Branco and Alexander T Sack and Felix Duecker}, doi = {10.3389/fnins.2018.00944}, year = {2018}, date = {2018-01-01}, journal = {Frontiers in Neuroscience}, volume = {12}, pages = {1--11}, abstract = {The frontal eye fields (FEFs) are core nodes of the oculomotor system contributing to saccade planning, control, and execution. Here, we aimed to reveal hemispheric asymmetries between left and right FEF in both voluntary and reflexive saccades toward horizontal and vertical targets. To this end, we applied fMRI-guided continuous theta burst stimulation (cTBS) over either left or right FEF and assessed the consequences of this disruption on saccade latencies. Using a fully counterbalanced within-subject design, we measured saccade latencies before and after the application of cTBS in eighteen healthy volunteers. In general, saccade latencies on both tasks were susceptible to our experimental manipulations, that is, voluntary saccades were slower than reflexive saccades, and downward saccades were slower than upward saccades. Contrary to our expectations, we failed to reveal any TMS-related effects on saccade latencies, and Bayesian analyses provided strong support in favor of a TMS null result for both tasks. Keeping in mind the interpretative challenges of null results, we discuss possible explanations for this absence of behavioral TMS effects, focusing on methodological differences compared to previous studies (task parameters and online vs. offline TMS interventions). We also speculate about what our results might reveal about the functional role of FEF.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The frontal eye fields (FEFs) are core nodes of the oculomotor system contributing to saccade planning, control, and execution. Here, we aimed to reveal hemispheric asymmetries between left and right FEF in both voluntary and reflexive saccades toward horizontal and vertical targets. To this end, we applied fMRI-guided continuous theta burst stimulation (cTBS) over either left or right FEF and assessed the consequences of this disruption on saccade latencies. Using a fully counterbalanced within-subject design, we measured saccade latencies before and after the application of cTBS in eighteen healthy volunteers. In general, saccade latencies on both tasks were susceptible to our experimental manipulations, that is, voluntary saccades were slower than reflexive saccades, and downward saccades were slower than upward saccades. Contrary to our expectations, we failed to reveal any TMS-related effects on saccade latencies, and Bayesian analyses provided strong support in favor of a TMS null result for both tasks. Keeping in mind the interpretative challenges of null results, we discuss possible explanations for this absence of behavioral TMS effects, focusing on methodological differences compared to previous studies (task parameters and online vs. offline TMS interventions). We also speculate about what our results might reveal about the functional role of FEF. |
Tjerk P Gutteling; Helene M van Ettinger-Veenstra; Leon J Kenemans; Sebastiaan F W Neggers Journal of Cognitive Neuroscience, 22 (9), pp. 1931–1943, 2010. @article{Gutteling2010, title = {Lateralized frontal eye field activity precedes occipital activity shortly before saccades: Evidence for cortico-cortical feedback as a mechanism underlying covert attention shifts}, author = {Tjerk P Gutteling and Helene M van Ettinger-Veenstra and Leon J Kenemans and Sebastiaan F W Neggers}, doi = {10.1162/jocn.2009.21342}, year = {2010}, date = {2010-01-01}, journal = {Journal of Cognitive Neuroscience}, volume = {22}, number = {9}, pages = {1931--1943}, abstract = {When an eye movement is prepared, attention is shifted toward the saccade end-goal. This coupling of eye movements and spatial attention is thought to be mediated by cortical connections between the FEFs and the visual cortex. Here, we present evidence for the existence of these connections. A visual discrimination task was performed while recording the EEG. Discrimination performance was significantly improved when the discrimination target and the saccade target matched. EEG results show that frontal activity precedes occipital activity contralateral to saccade direction when the saccade is prepared but not yet executed; these effects were absent in fixation conditions. This is consistent with the idea that the FEF exerts a direct modulatory influence on the visual cortex and enhances perception at the saccade end-goal.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When an eye movement is prepared, attention is shifted toward the saccade end-goal. This coupling of eye movements and spatial attention is thought to be mediated by cortical connections between the FEFs and the visual cortex. Here, we present evidence for the existence of these connections. A visual discrimination task was performed while recording the EEG. Discrimination performance was significantly improved when the discrimination target and the saccade target matched. EEG results show that frontal activity precedes occipital activity contralateral to saccade direction when the saccade is prepared but not yet executed; these effects were absent in fixation conditions. This is consistent with the idea that the FEF exerts a direct modulatory influence on the visual cortex and enhances perception at the saccade end-goal. |
Tjerk P Gutteling; Luc P J Selen; Pieter W Medendorp Parallax-sensitive remapping of visual space in occipito-parietal alpha-band activity during whole-body motion Journal Article Journal of Neurophysiology, 113 , pp. 1574–1584, 2015. @article{Gutteling2015, title = {Parallax-sensitive remapping of visual space in occipito-parietal alpha-band activity during whole-body motion}, author = {Tjerk P Gutteling and Luc P J Selen and Pieter W Medendorp}, year = {2015}, date = {2015-01-01}, journal = {Journal of Neurophysiology}, volume = {113}, pages = {1574--1584}, abstract = {Despite the constantly changing retinal image due to eye, head, and body movements, we are able to maintain a stable representation of the visual environment. Various studies on retinal image shifts caused by saccades have suggested that occipital and parietal areas correct for these perturbations by a gaze-centered remapping of the neural image. However, such a uniform, rotational, remapping mechanism cannot work during translations when objects shift on the retina in a more complex, depth-dependent fashion due to motion parallax. Here we tested whether the brain's activity patterns show parallax-sensitive remapping of remembered visual space during whole-body motion. Under continuous recording of electroencephalography (EEG), we passively translated human subjects while they had to remember the location of a world-fixed visual target, briefly presented in front of or behind the eyes' fixation point prior to the motion. Using a psychometric approach we assessed the quality of the memory update, which had to be made based on vestibular feedback and other extraretinal motion cues. All subjects showed a variable amount of parallax-sensitive updating errors, i.e., the direction of the errors depended on the depth of the target relative to fixation. The EEG recordings show a neural correlate of this parallax-sensitive remapping in the alpha-band power at occipito-parietal electrodes. At parietal electrodes, the strength of these alpha-band modulations correlated significantly with updating performance. These results suggest that alpha-band oscillatory activity reflects the time-varying updating of gaze-centered spatial information during parallax-sensitive remapping during whole-body motion.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Despite the constantly changing retinal image due to eye, head, and body movements, we are able to maintain a stable representation of the visual environment. Various studies on retinal image shifts caused by saccades have suggested that occipital and parietal areas correct for these perturbations by a gaze-centered remapping of the neural image. However, such a uniform, rotational, remapping mechanism cannot work during translations when objects shift on the retina in a more complex, depth-dependent fashion due to motion parallax. Here we tested whether the brain's activity patterns show parallax-sensitive remapping of remembered visual space during whole-body motion. Under continuous recording of electroencephalography (EEG), we passively translated human subjects while they had to remember the location of a world-fixed visual target, briefly presented in front of or behind the eyes' fixation point prior to the motion. Using a psychometric approach we assessed the quality of the memory update, which had to be made based on vestibular feedback and other extraretinal motion cues. All subjects showed a variable amount of parallax-sensitive updating errors, i.e., the direction of the errors depended on the depth of the target relative to fixation. The EEG recordings show a neural correlate of this parallax-sensitive remapping in the alpha-band power at occipito-parietal electrodes. At parietal electrodes, the strength of these alpha-band modulations correlated significantly with updating performance. These results suggest that alpha-band oscillatory activity reflects the time-varying updating of gaze-centered spatial information during parallax-sensitive remapping during whole-body motion. |
Tjerk P Gutteling; Pieter W Medendorp Role of alpha-band oscillations in spatial updating across whole body motion Journal Article Frontiers in Psychology, 7 , pp. 1–12, 2016. @article{Gutteling2016, title = {Role of alpha-band oscillations in spatial updating across whole body motion}, author = {Tjerk P Gutteling and Pieter W Medendorp}, doi = {10.3389/fpsyg.2016.00671}, year = {2016}, date = {2016-01-01}, journal = {Frontiers in Psychology}, volume = {7}, pages = {1--12}, abstract = {When moving around in the world, we have to keep track of important locations in our surroundings. In this process, called spatial updating, we must estimate our body motion and correct representations of memorized spatial locations in accordance with this motion. While the behavioral characteristics of spatial updating across whole body motion have been studied in detail, its neural implementation lacks detailed study. Here we use electro-encephalography (EEG) to distinguish various spectral components of this process. Subjects gazed at a central body-fixed point in otherwise complete darkness, while a target was briefly flashed, either left or right from this point. Subjects had to remember the location of this target as either moving along with the body or remaining fixed in the world while being translated sideways on a passive motion platform. After the motion, subjects had to indicate the remembered target location in the instructed reference frame using a mouse response. While the body motion, as detected by the vestibular system, should not affect the representation of body-fixed targets, it should interact with the representation of a world-centered target to update its location relative to the body. We show that the initial presentation of the visual target induced a reduction of alpha band power in contralateral parieto-occipital areas, which evolved to a sustained increase during the subsequent memory period. Motion of the body led to a reduction of alpha band power in central parietal areas extending to lateral parieto-temporal areas, irrespective of whether the targets had to be memorized relative to world or body. When updating a world-fixed target, its internal representation shifts hemispheres, only when subjects' behavioral responses suggested an update across the body midline. Our results suggest that parietal cortex is involved in both self-motion estimation and the selective application of this motion information to maintaining target locations as fixed in the world or fixed to the body.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When moving around in the world, we have to keep track of important locations in our surroundings. In this process, called spatial updating, we must estimate our body motion and correct representations of memorized spatial locations in accordance with this motion. While the behavioral characteristics of spatial updating across whole body motion have been studied in detail, its neural implementation lacks detailed study. Here we use electro-encephalography (EEG) to distinguish various spectral components of this process. Subjects gazed at a central body-fixed point in otherwise complete darkness, while a target was briefly flashed, either left or right from this point. Subjects had to remember the location of this target as either moving along with the body or remaining fixed in the world while being translated sideways on a passive motion platform. After the motion, subjects had to indicate the remembered target location in the instructed reference frame using a mouse response. While the body motion, as detected by the vestibular system, should not affect the representation of body-fixed targets, it should interact with the representation of a world-centered target to update its location relative to the body. We show that the initial presentation of the visual target induced a reduction of alpha band power in contralateral parieto-occipital areas, which evolved to a sustained increase during the subsequent memory period. Motion of the body led to a reduction of alpha band power in central parietal areas extending to lateral parieto-temporal areas, irrespective of whether the targets had to be memorized relative to world or body. When updating a world-fixed target, its internal representation shifts hemispheres, only when subjects' behavioral responses suggested an update across the body midline. Our results suggest that parietal cortex is involved in both self-motion estimation and the selective application of this motion information to maintaining target locations as fixed in the world or fixed to the body. |
Julia Habicht; Mareike Finke; Tobias Neher Auditory acclimatization to bilateral hearing aids: Effects on sentence-in-noise processing times and speech-evoked potentials Journal Article Ear and Hearing, 39 (1), pp. 161–171, 2018. @article{Habicht2018, title = {Auditory acclimatization to bilateral hearing aids: Effects on sentence-in-noise processing times and speech-evoked potentials}, author = {Julia Habicht and Mareike Finke and Tobias Neher}, doi = {10.1097/AUD.0000000000000476}, year = {2018}, date = {2018-01-01}, journal = {Ear and Hearing}, volume = {39}, number = {1}, pages = {161--171}, abstract = {Objectives: Using a longitudinal design, the present study sought to substantiate indications from two previous cross-sectional studies that hearing aid (HA) experience leads to improved speech processing abilities as quantified using eye-gaze measurements. Another aim was to explore potential concomitant changes in event-related potentials (ERPs) to speech stimuli. Design: Groups of elderly novice (novHA) and experienced (expHA) HA users matched in terms of age and working memory capacity participated. The novHA users were acclimatized to bilateral HA fittings for up to 24 weeks. The expHA users continued to use their own HAs during the same period. The participants' speech processing abilities were assessed after 0 weeks (novHA: N = 16; expHA: N = 14), 12 weeks (novHA: N = 16; expHA: N = 14), and 24 weeks (N = 10 each). To that end, an eye-tracking paradigm was used for estimating how quickly the participants could grasp the meaning of sentences presented against background noise together with two similar pictures that either correctly or incorrectly depicted the meaning conveyed by the sentences (the “processing time”). Additionally, ERPs were measured with an active oddball paradigm requiring the participants to categorize word stimuli as living (targets) or nonliving (nontargets) entities. For all measurements, the stimuli were spectrally shaped according to individual real-ear insertion gains and presented via earphones. Results: Concerning the processing times, no changes across time were found for the expHA group. After 0 weeks of HA use, the novHA group had significantly longer (poorer) processing times than the expHA group, consistent with previous findings. After 24 weeks, a significant mean improvement of ~30% was observed for the novHA users, leading to a performance comparable with that of the expHA group. Concerning the ERPs, no changes across time were found. Conclusions: The results from this exploratory study are consistent with the view that auditory acclimatization to HAs positively impacts speech comprehension in noise. Further research is needed to substantiate them.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objectives: Using a longitudinal design, the present study sought to substantiate indications from two previous cross-sectional studies that hearing aid (HA) experience leads to improved speech processing abilities as quantified using eye-gaze measurements. Another aim was to explore potential concomitant changes in event-related potentials (ERPs) to speech stimuli. Design: Groups of elderly novice (novHA) and experienced (expHA) HA users matched in terms of age and working memory capacity participated. The novHA users were acclimatized to bilateral HA fittings for up to 24 weeks. The expHA users continued to use their own HAs during the same period. The participants' speech processing abilities were assessed after 0 weeks (novHA: N = 16; expHA: N = 14), 12 weeks (novHA: N = 16; expHA: N = 14), and 24 weeks (N = 10 each). To that end, an eye-tracking paradigm was used for estimating how quickly the participants could grasp the meaning of sentences presented against background noise together with two similar pictures that either correctly or incorrectly depicted the meaning conveyed by the sentences (the “processing time”). Additionally, ERPs were measured with an active oddball paradigm requiring the participants to categorize word stimuli as living (targets) or nonliving (nontargets) entities. For all measurements, the stimuli were spectrally shaped according to individual real-ear insertion gains and presented via earphones. Results: Concerning the processing times, no changes across time were found for the expHA group. After 0 weeks of HA use, the novHA group had significantly longer (poorer) processing times than the expHA group, consistent with previous findings. After 24 weeks, a significant mean improvement of ~30% was observed for the novHA users, leading to a performance comparable with that of the expHA group. Concerning the ERPs, no changes across time were found. Conclusions: The results from this exploratory study are consistent with the view that auditory acclimatization to HAs positively impacts speech comprehension in noise. Further research is needed to substantiate them. |
Nicole Hakim; Kirsten C S Adam; Eren Gunseli; Edward Awh; Edward K Vogel Dissecting the neural focus of attention reveals distinct processes for spatial attention and object-based storage in visual working memory Journal Article Psychological Science, 30 (4), pp. 526–540, 2019. @article{Hakim2019, title = {Dissecting the neural focus of attention reveals distinct processes for spatial attention and object-based storage in visual working memory}, author = {Nicole Hakim and Kirsten C S Adam and Eren Gunseli and Edward Awh and Edward K Vogel}, doi = {10.1177/0956797619830384}, year = {2019}, date = {2019-01-01}, journal = {Psychological Science}, volume = {30}, number = {4}, pages = {526--540}, abstract = {Complex cognition relies on both on-line representations in working memory (WM), said to reside in the focus of attention, and passive off-line representations of related information. Here, we dissected the focus of attention by showing that distinct neural signals index the on-line storage of objects and sustained spatial attention. We recorded electroencephalogram (EEG) activity during two tasks that employed identical stimulus displays but varied the relative demands for object storage and spatial attention. We found distinct delay-period signatures for an attention task (which required only spatial attention) and a WM task (which invoked both spatial attention and object storage). Although both tasks required active maintenance of spatial information, only the WM task elicited robust contralateral delay activity that was sensitive to mnemonic load. Thus, we argue that the focus of attention is maintained via a collaboration between distinct processes for covert spatial orienting and object-based storage.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Complex cognition relies on both on-line representations in working memory (WM), said to reside in the focus of attention, and passive off-line representations of related information. Here, we dissected the focus of attention by showing that distinct neural signals index the on-line storage of objects and sustained spatial attention. We recorded electroencephalogram (EEG) activity during two tasks that employed identical stimulus displays but varied the relative demands for object storage and spatial attention. We found distinct delay-period signatures for an attention task (which required only spatial attention) and a WM task (which invoked both spatial attention and object storage). Although both tasks required active maintenance of spatial information, only the WM task elicited robust contralateral delay activity that was sensitive to mnemonic load. Thus, we argue that the focus of attention is maintained via a collaboration between distinct processes for covert spatial orienting and object-based storage. |
Nicole Hakim; Tobias Feldmann-Wüstefeld; Edward Awh; Edward K Vogel Perturbing neural representations of working memory with task-irrelevant interruption Journal Article Journal of Cognitive Neuroscience, 32 (3), pp. 558–569, 2019. @article{Hakim2019a, title = {Perturbing neural representations of working memory with task-irrelevant interruption}, author = {Nicole Hakim and Tobias Feldmann-Wüstefeld and Edward Awh and Edward K Vogel}, doi = {10.1101/716613}, year = {2019}, date = {2019-01-01}, journal = {Journal of Cognitive Neuroscience}, volume = {32}, number = {3}, pages = {558--569}, abstract = {Working memory maintains information so that it can be used in complex cognitive tasks. A key challenge for this system is to maintain relevant information in the face of task-irrelevant perturbations. Across two experiments, we investigated the impact of task-irrelevant interruptions on neural representations of working memory. We recorded EEG activity in humans while they performed a working memory task. On a subset of trials, we interrupted participants with salient but task-irrelevant objects. To track the impact of these task-irrelevant interruptions on neural representations of working memory, we measured two well-characterized, temporally sensitive EEG markers that reflect active, prioritized working memory representations: the contralateral delay activity and lateralized alpha power (8–12 Hz). After interruption, we found that contralateral delay activity amplitude momentarily sustained but was gone by the end of the trial. Lateralized alpha power was immediately influenced by the interrupters but recovered by the end of the trial. This suggests that dissociable neural processes contribute to the maintenance of working memory information and that brief irrelevant onsets disrupt two distinct online aspects of working memory. In addition, we found that task expectancy modulated the timing and magnitude of how these two neural signals responded to task-irrelevant interruptions, suggesting that the brain's response to task-irrelevant interruption is shaped by task context.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Working memory maintains information so that it can be used in complex cognitive tasks. A key challenge for this system is to maintain relevant information in the face of task-irrelevant perturbations. Across two experiments, we investigated the impact of task-irrelevant interruptions on neural representations of working memory. We recorded EEG activity in humans while they performed a working memory task. On a subset of trials, we interrupted participants with salient but task-irrelevant objects. To track the impact of these task-irrelevant interruptions on neural representations of working memory, we measured two well-characterized, temporally sensitive EEG markers that reflect active, prioritized working memory representations: the contralateral delay activity and lateralized alpha power (8–12 Hz). After interruption, we found that contralateral delay activity amplitude momentarily sustained but was gone by the end of the trial. Lateralized alpha power was immediately influenced by the interrupters but recovered by the end of the trial. This suggests that dissociable neural processes contribute to the maintenance of working memory information and that brief irrelevant onsets disrupt two distinct online aspects of working memory. In addition, we found that task expectancy modulated the timing and magnitude of how these two neural signals responded to task-irrelevant interruptions, suggesting that the brain's response to task-irrelevant interruption is shaped by task context. |
Carlos M Hamamé; Juan R Vidal; Marcela Perrone-Bertolotti; Tomás Ossandón; Karim Jerbi; Philippe Kahane; Olivier Bertrand; Jean Philippe Lachaux Functional selectivity in the human occipitotemporal cortex during natural vision: Evidence from combined intracranial EEG and eye-tracking Journal Article NeuroImage, 95 , pp. 276–286, 2014. @article{Hamame2014, title = {Functional selectivity in the human occipitotemporal cortex during natural vision: Evidence from combined intracranial EEG and eye-tracking}, author = {Carlos M Hamamé and Juan R Vidal and Marcela Perrone-Bertolotti and Tomás Ossandón and Karim Jerbi and Philippe Kahane and Olivier Bertrand and Jean Philippe Lachaux}, doi = {10.1016/j.neuroimage.2014.03.025}, year = {2014}, date = {2014-01-01}, journal = {NeuroImage}, volume = {95}, pages = {276--286}, publisher = {Elsevier Inc.}, abstract = {Eye movements are a constant and essential component of natural vision, yet, most of our knowledge about the human visual system comes from experiments that restrict them. This experimental constraint is mostly in place to control visual stimuli presentation and to avoid artifacts in non-invasive measures of brain activity, however, this limitation can be overcome with intracranial EEG (iEEG) recorded from epilepsy patients. Moreover, the high-frequency components of the iEEG signal (between about 50 and 150. Hz) can provide a proxy of population-level spiking activity in any cortical area during free-viewing. We combined iEEG with high precision eye-tracking to study fine temporal dynamics and functional specificity in the fusiform face (FFA) and visual word form area (VWFA) while patients inspected natural pictures containing faces and text. We defined the first local measure of visual (electrophysiological) responsiveness adapted to free-viewing in humans: amplitude modulations in the high-frequency activity range (50-150. Hz) following fixations (fixation-related high-frequency response). We showed that despite the large size of receptive fields in the ventral occipito-temporal cortex, neural activity during natural vision of realistic cluttered scenes is mostly dependent upon the category of the foveated stimulus - suggesting that category-specificity is preserved during free-viewing and that attention mechanisms might filter out the influence of objects surrounding the fovea.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Eye movements are a constant and essential component of natural vision, yet, most of our knowledge about the human visual system comes from experiments that restrict them. This experimental constraint is mostly in place to control visual stimuli presentation and to avoid artifacts in non-invasive measures of brain activity, however, this limitation can be overcome with intracranial EEG (iEEG) recorded from epilepsy patients. Moreover, the high-frequency components of the iEEG signal (between about 50 and 150. Hz) can provide a proxy of population-level spiking activity in any cortical area during free-viewing. We combined iEEG with high precision eye-tracking to study fine temporal dynamics and functional specificity in the fusiform face (FFA) and visual word form area (VWFA) while patients inspected natural pictures containing faces and text. We defined the first local measure of visual (electrophysiological) responsiveness adapted to free-viewing in humans: amplitude modulations in the high-frequency activity range (50-150. Hz) following fixations (fixation-related high-frequency response). We showed that despite the large size of receptive fields in the ventral occipito-temporal cortex, neural activity during natural vision of realistic cluttered scenes is mostly dependent upon the category of the foveated stimulus - suggesting that category-specificity is preserved during free-viewing and that attention mechanisms might filter out the influence of objects surrounding the fovea. |
Qiming Han; Huan Luo Visual crowding involves delayed frontoparietal response and enhanced top-down modulation Journal Article European Journal of Neuroscience, 50 (6), pp. 2931–2941, 2019. @article{Han2019a, title = {Visual crowding involves delayed frontoparietal response and enhanced top-down modulation}, author = {Qiming Han and Huan Luo}, doi = {10.1111/ejn.14401}, year = {2019}, date = {2019-01-01}, journal = {European Journal of Neuroscience}, volume = {50}, number = {6}, pages = {2931--2941}, abstract = {Crowding, the disrupted recognition of a peripheral target in the presence of nearby flankers, sets a fundamental limit on peripheral vision perception. Debates persist on whether the limit occurs at early visual cortices or is induced by top-down modulation, leaving the neural mechanism for visual crowding largely unclear. To resolve the debate, it is crucial to extract the neural signals elicited by the target from that by the target-flanker clutter, with high temporal resolution. To achieve this purpose, here we employed a temporal response function (TRF) approach to dissociate target-specific response from the overall electroencephalograph (EEG) recordings when the target was presented with (crowded) or without flankers (uncrowded) while subjects were performing a discrimination task on the peripherally presented target. Our results demonstrated two components in the target-specific contrast-tracking TRF response—an early component (100–170 ms) in occipital channels and a late component (210–450 ms) in frontoparietal channels. The late frontoparietal component, which was delayed in time under the crowded condition, was correlated with target discrimination performance, suggesting its involvement in visual crowding. Granger causality analysis further revealed stronger top-down modulation on the target stimulus under the crowded condition. Taken together, our findings support that crowding is associated with a top-down process which modulates the low-level sensory processing and delays the behavioral-relevant response in the high-level region.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Crowding, the disrupted recognition of a peripheral target in the presence of nearby flankers, sets a fundamental limit on peripheral vision perception. Debates persist on whether the limit occurs at early visual cortices or is induced by top-down modulation, leaving the neural mechanism for visual crowding largely unclear. To resolve the debate, it is crucial to extract the neural signals elicited by the target from that by the target-flanker clutter, with high temporal resolution. To achieve this purpose, here we employed a temporal response function (TRF) approach to dissociate target-specific response from the overall electroencephalograph (EEG) recordings when the target was presented with (crowded) or without flankers (uncrowded) while subjects were performing a discrimination task on the peripherally presented target. Our results demonstrated two components in the target-specific contrast-tracking TRF response—an early component (100–170 ms) in occipital channels and a late component (210–450 ms) in frontoparietal channels. The late frontoparietal component, which was delayed in time under the crowded condition, was correlated with target discrimination performance, suggesting its involvement in visual crowding. Granger causality analysis further revealed stronger top-down modulation on the target stimulus under the crowded condition. Taken together, our findings support that crowding is associated with a top-down process which modulates the low-level sensory processing and delays the behavioral-relevant response in the high-level region. |
Siobhán Harty; Peter R Murphy; Ian H Robertson; Redmond G O'Connell Parsing the neural signatures of reduced error detection in older age Journal Article NeuroImage, 161 , pp. 43–55, 2017. @article{Harty2017, title = {Parsing the neural signatures of reduced error detection in older age}, author = {Siobhán Harty and Peter R Murphy and Ian H Robertson and Redmond G O'Connell}, doi = {10.1016/j.neuroimage.2017.08.032}, year = {2017}, date = {2017-01-01}, journal = {NeuroImage}, volume = {161}, pages = {43--55}, abstract = {Recent work has demonstrated that explicit error detection relies on a neural evidence accumulation process that can be traced in the human electroencephalogram (EEG). Here, we sought to establish the impact of natural aging on this process by recording EEG from young (18–35 years) and older adults (65–88 years) during the performance of a Go/No-Go paradigm in which participants were required to overtly signal their errors. Despite performing the task with equivalent accuracy, older adults reported substantially fewer errors, and the timing of their reports were both slower and more variable. These behavioral differences were linked to three key neurophysiological changes reflecting distinct parameters of the error detection decision process: a reduction in medial frontal delta/theta (2–7 Hz) activity, indicating diminished top-down input to the decision process; a slower rate of evidence accumulation as indexed by the rate of rise of a centro-parietal signal, known as the error positivity; and a higher motor execution threshold as indexed by lateralized beta-band (16–30 Hz) activity. Our data provide novel insight into how the natural aging process affects the neural underpinnings of error detection.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent work has demonstrated that explicit error detection relies on a neural evidence accumulation process that can be traced in the human electroencephalogram (EEG). Here, we sought to establish the impact of natural aging on this process by recording EEG from young (18–35 years) and older adults (65–88 years) during the performance of a Go/No-Go paradigm in which participants were required to overtly signal their errors. Despite performing the task with equivalent accuracy, older adults reported substantially fewer errors, and the timing of their reports were both slower and more variable. These behavioral differences were linked to three key neurophysiological changes reflecting distinct parameters of the error detection decision process: a reduction in medial frontal delta/theta (2–7 Hz) activity, indicating diminished top-down input to the decision process; a slower rate of evidence accumulation as indexed by the rate of rise of a centro-parietal signal, known as the error positivity; and a higher motor execution threshold as indexed by lateralized beta-band (16–30 Hz) activity. Our data provide novel insight into how the natural aging process affects the neural underpinnings of error detection. |
Ben M Harvey; O J Braddick; A Cowey Journal of Vision, 10 (5), pp. 1–15, 2010. @article{Harvey2010, title = {Similar effects of repetitive transcranial magnetic stimulation of MT+ and a dorsomedial extrastriate site including V3A on pattern detection and position discrimination of rotating and radial motion patterns}, author = {Ben M Harvey and O J Braddick and A Cowey}, doi = {10.1167/10.5.21}, year = {2010}, date = {2010-01-01}, journal = {Journal of Vision}, volume = {10}, number = {5}, pages = {1--15}, abstract = {Our recent psychophysical experiments have identified differences in the spatial summation characteristics of pattern detection and position discrimination tasks performed with rotating, expanding, and contracting stimuli. Areas MT and MST are well established to be involved in processing these stimuli. fMRI results have shown retinotopic activation of area V3A depending on the location of the center of radial motion in vision. This suggests the possibility that V3A may be involved in position discrimination tasks with these motion patterns. Here we use repetitive transcranial magnetic stimulation (rTMS) over MT+ and a dorsomedial extrastriate region including V3A to try to distinguish between TMS effects on pattern detection and position discrimination tasks. If V3A were involved in position discrimination, we would expect to see effects on position discrimination tasks, but not pattern detection tasks, with rTMS over this dorsomedial extrastriate region. In fact, we could not dissociate TMS effects on the two tasks, suggesting that they are performed by the same extrastriate area, in MT+.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Our recent psychophysical experiments have identified differences in the spatial summation characteristics of pattern detection and position discrimination tasks performed with rotating, expanding, and contracting stimuli. Areas MT and MST are well established to be involved in processing these stimuli. fMRI results have shown retinotopic activation of area V3A depending on the location of the center of radial motion in vision. This suggests the possibility that V3A may be involved in position discrimination tasks with these motion patterns. Here we use repetitive transcranial magnetic stimulation (rTMS) over MT+ and a dorsomedial extrastriate region including V3A to try to distinguish between TMS effects on pattern detection and position discrimination tasks. If V3A were involved in position discrimination, we would expect to see effects on position discrimination tasks, but not pattern detection tasks, with rTMS over this dorsomedial extrastriate region. In fact, we could not dissociate TMS effects on the two tasks, suggesting that they are performed by the same extrastriate area, in MT+. |
Uwe Hassler; Uwe Friese; Ulla Martens; Nelson Trujillo-Barreto; Thomas Gruber Repetition priming effects dissociate between miniature eye movements and induced gamma-band responses in the human electroencephalogram Journal Article European Journal of Neuroscience, 38 (3), pp. 2425–2433, 2013. @article{Hassler2013, title = {Repetition priming effects dissociate between miniature eye movements and induced gamma-band responses in the human electroencephalogram}, author = {Uwe Hassler and Uwe Friese and Ulla Martens and Nelson Trujillo-Barreto and Thomas Gruber}, doi = {10.1111/ejn.12244}, year = {2013}, date = {2013-01-01}, journal = {European Journal of Neuroscience}, volume = {38}, number = {3}, pages = {2425--2433}, abstract = {The role of induced gamma-band responses (iGBRs) in the human electroencephalogram (EEG) is a controversial topic. On the one hand, iGBRs have been associated with neuronal activity reflecting the (re-)activation of cortical object representations. On the other hand, it was shown that miniature saccades (MSs) lead to high-frequency artifacts in the EEG that can mimic cortical iGBRs. We recorded EEG and eye movements simultaneously while participants were engaged in a combined repetition priming and object recognition experiment. MS rates were mainly modulated by object familiarity in a time window from 100 to 300 ms after stimulus onset. In contrast, artifact-corrected iGBRs were sensitive to object repetition and object familiarity in a prolonged time window. EEG source analyses revealed that stimulus repetitions modulated iGBRs in temporal and occipital cortex regions while familiarity was associated with activity in parieto-occipital regions. These results are in line with neuroimaging studies employing functional magnetic resonance imaging or magnetoencephalography. We conclude that MSs reflect early mechanisms of visual perception while iGBRs mirror the activation of cortical networks representing a perceived object.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The role of induced gamma-band responses (iGBRs) in the human electroencephalogram (EEG) is a controversial topic. On the one hand, iGBRs have been associated with neuronal activity reflecting the (re-)activation of cortical object representations. On the other hand, it was shown that miniature saccades (MSs) lead to high-frequency artifacts in the EEG that can mimic cortical iGBRs. We recorded EEG and eye movements simultaneously while participants were engaged in a combined repetition priming and object recognition experiment. MS rates were mainly modulated by object familiarity in a time window from 100 to 300 ms after stimulus onset. In contrast, artifact-corrected iGBRs were sensitive to object repetition and object familiarity in a prolonged time window. EEG source analyses revealed that stimulus repetitions modulated iGBRs in temporal and occipital cortex regions while familiarity was associated with activity in parieto-occipital regions. These results are in line with neuroimaging studies employing functional magnetic resonance imaging or magnetoencephalography. We conclude that MSs reflect early mechanisms of visual perception while iGBRs mirror the activation of cortical networks representing a perceived object. |
Maximilian F A Hauser; Stefanie Heba; Tobias Schmidt-Wilcke; Martin Tegenthoff; Denise Manahan-Vaughan Cerebellar-hippocampal processing in passive perception of visuospatial change: An ego- and allocentric axis? Journal Article Human Brain Mapping, 41 (5), pp. 1153–1166, 2020. @article{Hauser2020, title = {Cerebellar-hippocampal processing in passive perception of visuospatial change: An ego- and allocentric axis?}, author = {Maximilian F A Hauser and Stefanie Heba and Tobias Schmidt-Wilcke and Martin Tegenthoff and Denise Manahan-Vaughan}, doi = {10.1002/hbm.24865}, year = {2020}, date = {2020-01-01}, journal = {Human Brain Mapping}, volume = {41}, number = {5}, pages = {1153--1166}, abstract = {In addition to its role in visuospatial navigation and the generation of spatial representations, in recent years, the hippocampus has been proposed to support perceptual processes. This is especially the case where high-resolution details, in the form of fine-grained relationships between features such as angles between components of a visual scene, are involved. An unresolved question is how, in the visual domain, perspective-changes are differentiated from allocentric changes to these perceived feature relationships, both of which may be argued to involve the hippocampus. We conducted functional magnetic resonance imaging of the brain response (corroborated through separate event-related potential source-localization) in a passive visuospatial oddball-paradigm to examine to what extent the hippocampus and other brain regions process changes in perspective, or configuration of abstract, three-dimensional structures. We observed activation of the left superior parietal cortex during perspective shifts, and right anterior hippocampus in configuration-changes. Strikingly, we also found the cerebellum to differentiate between the two, in a way that appeared tightly coupled to hippocampal processing. These results point toward a relationship between the cerebellum and the hippocampus that occurs during perception of changes in visuospatial information that has previously only been reported with regard to visuospatial navigation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In addition to its role in visuospatial navigation and the generation of spatial representations, in recent years, the hippocampus has been proposed to support perceptual processes. This is especially the case where high-resolution details, in the form of fine-grained relationships between features such as angles between components of a visual scene, are involved. An unresolved question is how, in the visual domain, perspective-changes are differentiated from allocentric changes to these perceived feature relationships, both of which may be argued to involve the hippocampus. We conducted functional magnetic resonance imaging of the brain response (corroborated through separate event-related potential source-localization) in a passive visuospatial oddball-paradigm to examine to what extent the hippocampus and other brain regions process changes in perspective, or configuration of abstract, three-dimensional structures. We observed activation of the left superior parietal cortex during perspective shifts, and right anterior hippocampus in configuration-changes. Strikingly, we also found the cerebellum to differentiate between the two, in a way that appeared tightly coupled to hippocampal processing. These results point toward a relationship between the cerebellum and the hippocampus that occurs during perception of changes in visuospatial information that has previously only been reported with regard to visuospatial navigation. |
Wei He; Jon Brock; Blake W Johnson Face-sensitive brain responses measured from a four-year-old child with a custom-sized child MEG system Journal Article Journal of Neuroscience Methods, 222 , pp. 213–217, 2014. @article{He2014c, title = {Face-sensitive brain responses measured from a four-year-old child with a custom-sized child MEG system}, author = {Wei He and Jon Brock and Blake W Johnson}, doi = {10.1016/j.jneumeth.2013.11.020}, year = {2014}, date = {2014-01-01}, journal = {Journal of Neuroscience Methods}, volume = {222}, pages = {213--217}, publisher = {222}, abstract = {Background: Previous magnetoencephalography (MEG) studies have failed to find a facesensitive, brain response-M170 in children. If this is the case, this suggests that the developmental trajectory of the M170 is different from that of its electrical equivalent, the N170. We investigated the alternative possibility that the child M170 may not be detectable in conventional adult-sized MEG systems. New method: Brain responses to pictures of faces and well controlled stimuli were measured from the same four-year-old child with a custom child MEG system and an adult-sized MEG system. Results: The goodness of fit of the child's head was about the same over the occipital head surface in both systems, but was much worse over all other parts of the head surface in the adult MEG system compared to the child MEG system. The face-sensitive M170 was measured from the child in both MEG systems, but was larger in amplitude, clearer in morphology, and had a more accurate source localization when measured in the child MEG system. Comparison with existing method: The custom-sized child MEG system is superior for measuring the face-sensitive M170 brain response in children than the conventional adult MEG system. Conclusions: The present results show that the face-sensitive M170 brain response can be elicited in a four-year-old child. This provides new evidence for early maturation of face processing brain mechanisms in humans, and offers new opportunities for the study of neurodevelopmental disorders that show atypical face processing capabilities, such as autism spectrum disorder.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Previous magnetoencephalography (MEG) studies have failed to find a facesensitive, brain response-M170 in children. If this is the case, this suggests that the developmental trajectory of the M170 is different from that of its electrical equivalent, the N170. We investigated the alternative possibility that the child M170 may not be detectable in conventional adult-sized MEG systems. New method: Brain responses to pictures of faces and well controlled stimuli were measured from the same four-year-old child with a custom child MEG system and an adult-sized MEG system. Results: The goodness of fit of the child's head was about the same over the occipital head surface in both systems, but was much worse over all other parts of the head surface in the adult MEG system compared to the child MEG system. The face-sensitive M170 was measured from the child in both MEG systems, but was larger in amplitude, clearer in morphology, and had a more accurate source localization when measured in the child MEG system. Comparison with existing method: The custom-sized child MEG system is superior for measuring the face-sensitive M170 brain response in children than the conventional adult MEG system. Conclusions: The present results show that the face-sensitive M170 brain response can be elicited in a four-year-old child. This provides new evidence for early maturation of face processing brain mechanisms in humans, and offers new opportunities for the study of neurodevelopmental disorders that show atypical face processing capabilities, such as autism spectrum disorder. |
Wei He; Jon Brock; Blake W Johnson Face processing in the brains of pre-school aged children measured with MEG Journal Article NeuroImage, 106 , pp. 317–327, 2015. @article{He2015a, title = {Face processing in the brains of pre-school aged children measured with MEG}, author = {Wei He and Jon Brock and Blake W Johnson}, doi = {10.1016/j.neuroimage.2014.11.029}, year = {2015}, date = {2015-01-01}, journal = {NeuroImage}, volume = {106}, pages = {317--327}, publisher = {Elsevier Inc.}, abstract = {There are two competing theories concerning the development of face perception: a late maturation account and an early maturation account. Magnetoencephalography (MEG) neuroimaging holds promise for adjudicating between the two opposing accounts by providing objective neurophysiological measures of face processing, with sufficient temporal resolution to isolate face-specific brain responses from those associated with other sensory, cognitive and motor processes. The current study used a customized child MEG system to measure M100 and M170 brain responses in 15 children aged three to six years while they viewed faces, cars and their phase-scrambled counterparts. Compared to adults tested using the same stimuli in a conventional MEG system, children showed significantly larger and later M100 responses. Children's M170 responses, derived by subtracting the responses to phase-scrambled images from the corresponding images (faces or cars) were delayed in latency but otherwise resembled the adult M170. This component has not been obtained in previous studies of young children tested using conventional adult MEG systems. However children did show a markedly reduced M170 response to cars in comparison to adults. This may reflect children's lack of expertise with cars relative to faces. Taken together, these data are in accord with recent behavioural and neuroimaging data that support early maturation of the basic face processing functions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } There are two competing theories concerning the development of face perception: a late maturation account and an early maturation account. Magnetoencephalography (MEG) neuroimaging holds promise for adjudicating between the two opposing accounts by providing objective neurophysiological measures of face processing, with sufficient temporal resolution to isolate face-specific brain responses from those associated with other sensory, cognitive and motor processes. The current study used a customized child MEG system to measure M100 and M170 brain responses in 15 children aged three to six years while they viewed faces, cars and their phase-scrambled counterparts. Compared to adults tested using the same stimuli in a conventional MEG system, children showed significantly larger and later M100 responses. Children's M170 responses, derived by subtracting the responses to phase-scrambled images from the corresponding images (faces or cars) were delayed in latency but otherwise resembled the adult M170. This component has not been obtained in previous studies of young children tested using conventional adult MEG systems. However children did show a markedly reduced M170 response to cars in comparison to adults. This may reflect children's lack of expertise with cars relative to faces. Taken together, these data are in accord with recent behavioural and neuroimaging data that support early maturation of the basic face processing functions. |
Wei He; Marta I Garrido; Paul F Sowman; Jon Brock; Blake W Johnson Development of effective connectivity in the core network for face perception Journal Article Human Brain Mapping, 36 (6), pp. 2161–2173, 2015. @article{He2015b, title = {Development of effective connectivity in the core network for face perception}, author = {Wei He and Marta I Garrido and Paul F Sowman and Jon Brock and Blake W Johnson}, doi = {10.1002/hbm.22762}, year = {2015}, date = {2015-01-01}, journal = {Human Brain Mapping}, volume = {36}, number = {6}, pages = {2161--2173}, abstract = {This study measured effective connectivity within the core face network in young children using a paediatric magnetoencephalograph (MEG). Dynamic casual modeling (DCM) of brain responses was performed in a group of adults (N = 14) and a group of young children aged from 3 to 6 years (N = 15). Three candidate DCM models were tested, and the fits of the MEG data to the three models were compared at both individual and group levels. The results show that the connectivity structure of the core face network differs significantly between adults and children. Further, the relative strengths of face network connections were differentially modulated by experimental conditions in the two groups. These results support the interpretation that the core face network undergoes significant structural configuration and functional specialization between four years of age and adulthood.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study measured effective connectivity within the core face network in young children using a paediatric magnetoencephalograph (MEG). Dynamic casual modeling (DCM) of brain responses was performed in a group of adults (N = 14) and a group of young children aged from 3 to 6 years (N = 15). Three candidate DCM models were tested, and the fits of the MEG data to the three models were compared at both individual and group levels. The results show that the connectivity structure of the core face network differs significantly between adults and children. Further, the relative strengths of face network connections were differentially modulated by experimental conditions in the two groups. These results support the interpretation that the core face network undergoes significant structural configuration and functional specialization between four years of age and adulthood. |
Wei He; Blake W Johnson Development of face recognition: Dynamic causal modelling of MEG data Journal Article Developmental Cognitive Neuroscience, 30 , pp. 13–22, 2018. @article{He2018f, title = {Development of face recognition: Dynamic causal modelling of MEG data}, author = {Wei He and Blake W Johnson}, doi = {10.1016/j.dcn.2017.11.010}, year = {2018}, date = {2018-01-01}, journal = {Developmental Cognitive Neuroscience}, volume = {30}, pages = {13--22}, publisher = {Elsevier}, abstract = {Electrophysiological studies of adults indicate that brain activity is enhanced during viewing of repeated faces, at a latency of about 250 ms after the onset of the face (M250/N250). The present study aimed to determine if this effect was also present in preschool-aged children, whose brain activity was measured in a custom-sized pediatric MEG system. The results showed that, unlike adults, face repetition did not show any significant modulation of M250 amplitude in children; however children's M250 latencies were significantly faster for repeated than non-repeated faces. Dynamic causal modelling (DCM) of the M250 in both age groups tested the effects of face repetition within the core face network including the occipital face area (OFA), the fusiform face area (FFA), and the superior temporal sulcus (STS). DCM revealed that repetition of identical faces altered both forward and backward connections in children and adults; however the modulations involved inputs to both FFA and OFA in adults but only to OFA in children. These findings suggest that the amplitude-insensitivity of the immature M250 may be due to a weaker connection between the FFA and lower visual areas.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Electrophysiological studies of adults indicate that brain activity is enhanced during viewing of repeated faces, at a latency of about 250 ms after the onset of the face (M250/N250). The present study aimed to determine if this effect was also present in preschool-aged children, whose brain activity was measured in a custom-sized pediatric MEG system. The results showed that, unlike adults, face repetition did not show any significant modulation of M250 amplitude in children; however children's M250 latencies were significantly faster for repeated than non-repeated faces. Dynamic causal modelling (DCM) of the M250 in both age groups tested the effects of face repetition within the core face network including the occipital face area (OFA), the fusiform face area (FFA), and the superior temporal sulcus (STS). DCM revealed that repetition of identical faces altered both forward and backward connections in children and adults; however the modulations involved inputs to both FFA and OFA in adults but only to OFA in children. These findings suggest that the amplitude-insensitivity of the immature M250 may be due to a weaker connection between the FFA and lower visual areas. |
Simone G Heideman; Gustavo Rohenkohl; Joshua J Chauvin; Clare E Palmer; Freek van Ede; Anna C Nobre Anticipatory neural dynamics of spatial-temporal orienting of attention in younger and older adults Journal Article NeuroImage, 178 , pp. 46–56, 2018. @article{Heideman2018a, title = {Anticipatory neural dynamics of spatial-temporal orienting of attention in younger and older adults}, author = {Simone G Heideman and Gustavo Rohenkohl and Joshua J Chauvin and Clare E Palmer and Freek van Ede and Anna C Nobre}, doi = {10.1016/j.neuroimage.2018.05.002}, year = {2018}, date = {2018-01-01}, journal = {NeuroImage}, volume = {178}, pages = {46--56}, publisher = {Elsevier Ltd}, abstract = {Spatial and temporal expectations act synergistically to facilitate visual perception. In the current study, we sought to investigate the anticipatory oscillatory markers of combined spatial-temporal orienting and to test whether these decline with ageing. We examined anticipatory neural dynamics associated with joint spatial-temporal orienting of attention using magnetoencephalography (MEG) in both younger and older adults. Participants performed a cued covert spatial-temporal orienting task requiring the discrimination of a visual target. Cues indicated both where and when targets would appear. In both age groups, valid spatial-temporal cues significantly enhanced perceptual sensitivity and reduced reaction times. In the MEG data, the main effect of spatial orienting was the lateralised anticipatory modulation of posterior alpha and beta oscillations. In contrast to previous reports, this modulation was not attenuated in older adults; instead it was even more pronounced. The main effect of temporal orienting was a bilateral suppression of posterior alpha and beta oscillations. This effect was restricted to younger adults. Our results also revealed a striking interaction between anticipatory spatial and temporal orienting in the gamma-band (60–75 Hz). When considering both age groups separately, this effect was only clearly evident and only survived statistical evaluation in the older adults. Together, these observations provide several new insights into the neural dynamics supporting separate as well as combined effects of spatial and temporal orienting of attention, and suggest that different neural dynamics associated with attentional orienting appear differentially sensitive to ageing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Spatial and temporal expectations act synergistically to facilitate visual perception. In the current study, we sought to investigate the anticipatory oscillatory markers of combined spatial-temporal orienting and to test whether these decline with ageing. We examined anticipatory neural dynamics associated with joint spatial-temporal orienting of attention using magnetoencephalography (MEG) in both younger and older adults. Participants performed a cued covert spatial-temporal orienting task requiring the discrimination of a visual target. Cues indicated both where and when targets would appear. In both age groups, valid spatial-temporal cues significantly enhanced perceptual sensitivity and reduced reaction times. In the MEG data, the main effect of spatial orienting was the lateralised anticipatory modulation of posterior alpha and beta oscillations. In contrast to previous reports, this modulation was not attenuated in older adults; instead it was even more pronounced. The main effect of temporal orienting was a bilateral suppression of posterior alpha and beta oscillations. This effect was restricted to younger adults. Our results also revealed a striking interaction between anticipatory spatial and temporal orienting in the gamma-band (60–75 Hz). When considering both age groups separately, this effect was only clearly evident and only survived statistical evaluation in the older adults. Together, these observations provide several new insights into the neural dynamics supporting separate as well as combined effects of spatial and temporal orienting of attention, and suggest that different neural dynamics associated with attentional orienting appear differentially sensitive to ageing. |
Simone G Heideman; Freek van Ede; Anna C Nobre Temporal alignment of anticipatory motor cortical beta lateralisation in hidden visual-motor sequences Journal Article European Journal of Neuroscience, 48 (8), pp. 2684–2695, 2018. @article{Heideman2018b, title = {Temporal alignment of anticipatory motor cortical beta lateralisation in hidden visual-motor sequences}, author = {Simone G Heideman and Freek van Ede and Anna C Nobre}, doi = {10.1111/ejn.13700}, year = {2018}, date = {2018-01-01}, journal = {European Journal of Neuroscience}, volume = {48}, number = {8}, pages = {2684--2695}, abstract = {Performance improves when participants respond to events that are structured in repeating sequences, suggesting that learning can lead to proactive anticipatory preparation. Whereas most sequence-learning studies have emphasised spatial structure, most sequences also contain a prominent temporal structure. We used MEG to investigate spatial and temporal anticipatory neural dynamics in a modified serial reaction time (SRT) task. Performance and brain activity were compared between blocks with learned spatial-temporal sequences and blocks with new sequences. After confirming a strong behavioural benefit of spatial-temporal predictability, we show lateralisation of beta oscillations in anticipation of the response associated with the upcoming target location and show that this also aligns to the expected timing of these forthcoming events. This effect was found both when comparing between repeated (learned) and new (unlearned) sequences, as well as when comparing targets that were expected after short vs. long intervals within the repeated (learned) sequence. Our findings suggest that learning of spatial-temporal structure leads to proactive and dynamic modulation of motor cortical excitability in anticipation of both the location and timing of events that are relevant to guide action.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Performance improves when participants respond to events that are structured in repeating sequences, suggesting that learning can lead to proactive anticipatory preparation. Whereas most sequence-learning studies have emphasised spatial structure, most sequences also contain a prominent temporal structure. We used MEG to investigate spatial and temporal anticipatory neural dynamics in a modified serial reaction time (SRT) task. Performance and brain activity were compared between blocks with learned spatial-temporal sequences and blocks with new sequences. After confirming a strong behavioural benefit of spatial-temporal predictability, we show lateralisation of beta oscillations in anticipation of the response associated with the upcoming target location and show that this also aligns to the expected timing of these forthcoming events. This effect was found both when comparing between repeated (learned) and new (unlearned) sequences, as well as when comparing targets that were expected after short vs. long intervals within the repeated (learned) sequence. Our findings suggest that learning of spatial-temporal structure leads to proactive and dynamic modulation of motor cortical excitability in anticipation of both the location and timing of events that are relevant to guide action. |
Simone G Heideman; Andrew J Quinn; Mark W Woolrich; Freek van Ede; Anna C Nobre Dissecting beta-state changes during timed movement preparation in Parkinson's disease Journal Article Progress in Neurobiology, 184 , pp. 1–11, 2020. @article{Heideman2020, title = {Dissecting beta-state changes during timed movement preparation in Parkinson's disease}, author = {Simone G Heideman and Andrew J Quinn and Mark W Woolrich and Freek van Ede and Anna C Nobre}, doi = {10.1016/j.pneurobio.2019.101731}, year = {2020}, date = {2020-01-01}, journal = {Progress in Neurobiology}, volume = {184}, pages = {1--11}, publisher = {Elsevier Ltd}, abstract = {An emerging perspective describes beta-band (15−28 Hz) activity as consisting of short-lived high-amplitude events that only appear sustained in conventional measures of trial-average power. This has important implications for characterising abnormalities observed in beta-band activity in disorders like Parkinson's disease. Measuring parameters associated with beta-event dynamics may yield more sensitive measures, provide more selective diagnostic neural markers, and provide greater mechanistic insight into the breakdown of brain dynamics in this disease. Here, we used magnetoencephalography in eighteen Parkinson's disease participants off dopaminergic medication and eighteen healthy control participants to investigate beta-event dynamics during timed movement preparation. We used the Hidden Markov Model to classify event dynamics in a data-driven manner and derived three parameters of beta events: (1) beta-state amplitude, (2) beta-state lifetime, and (3) beta-state interval time. Of these, changes in beta-state interval time explained the overall decreases in beta power during timed movement preparation and uniquely captured the impairment in such preparation in patients with Parkinson's disease. Thus, the increased granularity of the Hidden Markov Model analysis (compared with conventional analysis of power) provides increased sensitivity and suggests a possible reason for impairments of timed movement preparation in Parkinson's disease.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An emerging perspective describes beta-band (15−28 Hz) activity as consisting of short-lived high-amplitude events that only appear sustained in conventional measures of trial-average power. This has important implications for characterising abnormalities observed in beta-band activity in disorders like Parkinson's disease. Measuring parameters associated with beta-event dynamics may yield more sensitive measures, provide more selective diagnostic neural markers, and provide greater mechanistic insight into the breakdown of brain dynamics in this disease. Here, we used magnetoencephalography in eighteen Parkinson's disease participants off dopaminergic medication and eighteen healthy control participants to investigate beta-event dynamics during timed movement preparation. We used the Hidden Markov Model to classify event dynamics in a data-driven manner and derived three parameters of beta events: (1) beta-state amplitude, (2) beta-state lifetime, and (3) beta-state interval time. Of these, changes in beta-state interval time explained the overall decreases in beta power during timed movement preparation and uniquely captured the impairment in such preparation in patients with Parkinson's disease. Thus, the increased granularity of the Hidden Markov Model analysis (compared with conventional analysis of power) provides increased sensitivity and suggests a possible reason for impairments of timed movement preparation in Parkinson's disease. |
Karin Heidlmayr; Karine Dore-Mazars; Xavier Aparico; Frederic Isel PLoS ONE, 11 (11), pp. e0165029, 2016. @article{Heidlmayr2016, title = {Multiple language use influences oculomotor task performance: Neurophysiological evidence of a shared substrate between language and motor control}, author = {Karin Heidlmayr and Karine Dore-Mazars and Xavier Aparico and Frederic Isel}, doi = {10.1371/journal.pone.0165029}, year = {2016}, date = {2016-01-01}, journal = {PLoS ONE}, volume = {11}, number = {11}, pages = {e0165029}, abstract = {In the present electroencephalographical study, we asked to which extent executive control processes are shared by both the language and motor domain. The rationale was to examine whether executive control processes whose efficiency is reinforced by the frequent use of a second language can lead to a benefit in the control of eye movements, i.e. a non-linguistic activity. For this purpose, we administrated to 19 highly proficient late French-German bilingual participants and to a control group of 20 French monolingual participants an antisaccade task, i.e. a specific motor task involving control. In this task, an automatic saccade has to be suppressed while a voluntary eye movement in the opposite direction has to be carried out. Here, our main hypothesis is that an advantage in the antisaccade task should be observed in the bilinguals if some properties of the control processes are shared between linguistic and motor domains. ERP data revealed clear differences between bilinguals and monolinguals. Critically, we showed an increased N2 effect size in bilinguals, thought to reflect better efficiency to monitor conflict, combined with reduced effect sizes on markers reflecting inhibitory control, i.e. cue-locked positivity, the target-locked P3 and the saccade-locked presaccadic positivity (PSP). Moreover, effective connectivity analyses (dynamic causal modelling; DCM) on the neuronal source level indicated that bilinguals rely more strongly on ACC-driven control while monolinguals rely on PFC-driven control. Taken together, our combined ERP and effective connectivity findings may reflect a dynamic interplay between strengthened conflict monitoring, associated with subsequently more efficient inhibition in bilinguals. Finally, L2 proficiency and immersion experience constitute relevant factors of the language background that predict efficiency of inhibition. To conclude, the present study provided ERP and effective connectivity evidence for domain-general executive control involvement in handling multiple language use, leading to a control advantage in bilingualism.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In the present electroencephalographical study, we asked to which extent executive control processes are shared by both the language and motor domain. The rationale was to examine whether executive control processes whose efficiency is reinforced by the frequent use of a second language can lead to a benefit in the control of eye movements, i.e. a non-linguistic activity. For this purpose, we administrated to 19 highly proficient late French-German bilingual participants and to a control group of 20 French monolingual participants an antisaccade task, i.e. a specific motor task involving control. In this task, an automatic saccade has to be suppressed while a voluntary eye movement in the opposite direction has to be carried out. Here, our main hypothesis is that an advantage in the antisaccade task should be observed in the bilinguals if some properties of the control processes are shared between linguistic and motor domains. ERP data revealed clear differences between bilinguals and monolinguals. Critically, we showed an increased N2 effect size in bilinguals, thought to reflect better efficiency to monitor conflict, combined with reduced effect sizes on markers reflecting inhibitory control, i.e. cue-locked positivity, the target-locked P3 and the saccade-locked presaccadic positivity (PSP). Moreover, effective connectivity analyses (dynamic causal modelling; DCM) on the neuronal source level indicated that bilinguals rely more strongly on ACC-driven control while monolinguals rely on PFC-driven control. Taken together, our combined ERP and effective connectivity findings may reflect a dynamic interplay between strengthened conflict monitoring, associated with subsequently more efficient inhibition in bilinguals. Finally, L2 proficiency and immersion experience constitute relevant factors of the language background that predict efficiency of inhibition. To conclude, the present study provided ERP and effective connectivity evidence for domain-general executive control involvement in handling multiple language use, leading to a control advantage in bilingualism. |
Jenni Heikkilä; Kaisa Tiippana; Otto Loberg; Paavo H T Leppänen Neural processing of congruent and incongruent audiovisual speech in school-age children and adults Journal Article Language Learning, 68 , pp. 58–79, 2018. @article{Heikkilae2018, title = {Neural processing of congruent and incongruent audiovisual speech in school-age children and adults}, author = {Jenni Heikkilä and Kaisa Tiippana and Otto Loberg and Paavo H T Leppänen}, doi = {10.1111/lang.12266}, year = {2018}, date = {2018-01-01}, journal = {Language Learning}, volume = {68}, pages = {58--79}, abstract = {Seeing articulatory gestures enhances speech perception. Perception ofauditory speech can even be changed by incongruent visual gestures, which is known as the McGurk effect (e.g., dubbing a voice saying /mi/ onto a face articulating /ni/, observers often hear /ni/). In children, the McGurk effect is weaker than in adults, but no previous knowledge exists about the neural-level correlates of the McGurk effect in school-age children. Using brain event-related potentials, we investigated change detection responses to congruent and incongruent audiovisual speech in school-age children and adults. We used an oddball paradigm with a congruent audiovisual /mi/ as the standard stimulus and a congruent audiovisual /ni/ or McGurk A/mi/V/ni/ as the deviant stimulus. In adults, a similar change detection response was elicited by both deviant stimuli. In children, change detection responses differed between the congruent and the McGurk stimulus. This reflects a maturational difference in the influence of visual stimuli on auditory processing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Seeing articulatory gestures enhances speech perception. Perception ofauditory speech can even be changed by incongruent visual gestures, which is known as the McGurk effect (e.g., dubbing a voice saying /mi/ onto a face articulating /ni/, observers often hear /ni/). In children, the McGurk effect is weaker than in adults, but no previous knowledge exists about the neural-level correlates of the McGurk effect in school-age children. Using brain event-related potentials, we investigated change detection responses to congruent and incongruent audiovisual speech in school-age children and adults. We used an oddball paradigm with a congruent audiovisual /mi/ as the standard stimulus and a congruent audiovisual /ni/ or McGurk A/mi/V/ni/ as the deviant stimulus. In adults, a similar change detection response was elicited by both deviant stimuli. In children, change detection responses differed between the congruent and the McGurk stimulus. This reflects a maturational difference in the influence of visual stimuli on auditory processing. |
Randolph F Helfrich; Hannah Knepper; Guido Nolte; Daniel Strüber; Stefan Rach; Christoph S Herrmann; Till R Schneider; Andreas K Engel Selective modulation of interhemispheric functional connectivity by HD-tACS shapes perception Journal Article PLoS Biology, 12 (12), pp. 1–15, 2014. @article{Helfrich2014, title = {Selective modulation of interhemispheric functional connectivity by HD-tACS shapes perception}, author = {Randolph F Helfrich and Hannah Knepper and Guido Nolte and Daniel Strüber and Stefan Rach and Christoph S Herrmann and Till R Schneider and Andreas K Engel}, doi = {10.1371/journal.pbio.1002031}, year = {2014}, date = {2014-01-01}, journal = {PLoS Biology}, volume = {12}, number = {12}, pages = {1--15}, abstract = {Oscillatory neuronal synchronization between cortical areas has been suggested to constitute a flexible mechanism to coordinate information flow in the human cerebral cortex. However, it remains unclear whether synchronized neuronal activity merely represents an epiphenomenon or whether it is causally involved in the selective gating of information. Here, we combined bilateral high-density transcranial alternating current stimulation (HD-tACS) at 40 Hz with simultaneous electroencephalographic (EEG) recordings to study immediate electrophysiological effects during the selective entrainment of oscillatory gamma-band signatures. We found that interhemispheric functional connectivity was modulated in a predictable, phase-specific way: In-phase stimulation enhanced synchronization, anti-phase stimulation impaired functional coupling. Perceptual correlates of these connectivity changes were found in an ambiguous motion task, which strongly support the functional relevance of long-range neuronal coupling. Additionally, our results revealed a decrease in oscillatory alpha power in response to the entrainment of gamma band signatures. This finding provides causal evidence for the antagonistic role of alpha and gamma oscillations in the parieto-occipital cortex and confirms that the observed gamma band modulations were physiological in nature. Our results demonstrate that synchronized cortical network activity across several spatiotemporal scales is essential for conscious perception and cognition.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Oscillatory neuronal synchronization between cortical areas has been suggested to constitute a flexible mechanism to coordinate information flow in the human cerebral cortex. However, it remains unclear whether synchronized neuronal activity merely represents an epiphenomenon or whether it is causally involved in the selective gating of information. Here, we combined bilateral high-density transcranial alternating current stimulation (HD-tACS) at 40 Hz with simultaneous electroencephalographic (EEG) recordings to study immediate electrophysiological effects during the selective entrainment of oscillatory gamma-band signatures. We found that interhemispheric functional connectivity was modulated in a predictable, phase-specific way: In-phase stimulation enhanced synchronization, anti-phase stimulation impaired functional coupling. Perceptual correlates of these connectivity changes were found in an ambiguous motion task, which strongly support the functional relevance of long-range neuronal coupling. Additionally, our results revealed a decrease in oscillatory alpha power in response to the entrainment of gamma band signatures. This finding provides causal evidence for the antagonistic role of alpha and gamma oscillations in the parieto-occipital cortex and confirms that the observed gamma band modulations were physiological in nature. Our results demonstrate that synchronized cortical network activity across several spatiotemporal scales is essential for conscious perception and cognition. |
Kasey S Hemington; James N Reynolds Clinical Neurophysiology, 125 (12), pp. 2364–2371, 2014. @article{Hemington2014, title = {Electroencephalographic correlates of working memory deficits in children with Fetal Alcohol Spectrum Disorder using a single-electrode pair recording device}, author = {Kasey S Hemington and James N Reynolds}, doi = {10.1016/j.clinph.2014.03.025}, year = {2014}, date = {2014-01-01}, journal = {Clinical Neurophysiology}, volume = {125}, number = {12}, pages = {2364--2371}, publisher = {International Federation of Clinical Neurophysiology}, abstract = {Objective: Children with Fetal Alcohol Spectrum Disorder (FASD) exhibit cognitive deficits that can be probed using eye movement tasks. We employed a recently developed, single-sensor electroencephalographic (EEG) recording device in measuring EEG activity during the performance of an eye movement task probing working memory in this population. Methods: Children with FASD (n= 18) and typically developing children (n= 19) performed a memory-guided saccade task requiring the participant to remember the spatial location of one, two or three stimuli. We hypothesized that children with FASD would (i) exhibit performance deficits, particularly at greater mnemonic loads; and (ii) display differences in theta (4-8 Hz) and alpha (8-12 Hz) frequency band power compared with controls. Results: Children with FASD failed to perform the task correctly more often than controls when presented with two or three stimuli, and demonstrated related reductions in alpha and theta power. Conclusion: These data suggest that the memory-guided task is sensitive to working memory deficits in children with FASD. Significance: Simultaneous recording of EEG activity suggest differing patterns of underlying neural recruitment in the clinical group, consistent with previous literature indicating more cognitive resources are required by children with FASD in order to complete complex tasks correctly.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objective: Children with Fetal Alcohol Spectrum Disorder (FASD) exhibit cognitive deficits that can be probed using eye movement tasks. We employed a recently developed, single-sensor electroencephalographic (EEG) recording device in measuring EEG activity during the performance of an eye movement task probing working memory in this population. Methods: Children with FASD (n= 18) and typically developing children (n= 19) performed a memory-guided saccade task requiring the participant to remember the spatial location of one, two or three stimuli. We hypothesized that children with FASD would (i) exhibit performance deficits, particularly at greater mnemonic loads; and (ii) display differences in theta (4-8 Hz) and alpha (8-12 Hz) frequency band power compared with controls. Results: Children with FASD failed to perform the task correctly more often than controls when presented with two or three stimuli, and demonstrated related reductions in alpha and theta power. Conclusion: These data suggest that the memory-guided task is sensitive to working memory deficits in children with FASD. Significance: Simultaneous recording of EEG activity suggest differing patterns of underlying neural recruitment in the clinical group, consistent with previous literature indicating more cognitive resources are required by children with FASD in order to complete complex tasks correctly. |
John M Henderson; Steven G Luke; Joseph Schmidt; John E Richards Co-registration of eye movements and event-related potentials in connected-text paragraph reading Journal Article Frontiers in Systems Neuroscience, 7 , pp. 1–13, 2013. @article{Henderson2013, title = {Co-registration of eye movements and event-related potentials in connected-text paragraph reading}, author = {John M Henderson and Steven G Luke and Joseph Schmidt and John E Richards}, doi = {10.3389/fnsys.2013.00028}, year = {2013}, date = {2013-01-01}, journal = {Frontiers in Systems Neuroscience}, volume = {7}, pages = {1--13}, abstract = {Eyetracking during reading has provided a critical source of on-line behavioral data informing basic theory in language processing. Similarly, event-related potentials (ERPs) have provided an important on-line measure of the neural correlates of language processing. Recently there has been strong interest in co-registering eyetracking and ERPs from simultaneous recording to capitalize on the strengths of both techniques, but a challenge has been devising approaches for controlling artifacts produced by eye movements in the EEG waveform. In this paper we describe our approach to correcting for eye movements in EEG and demonstrate its applicability to reading. The method is based on independent components analysis, and uses three criteria for identifying components tied to saccades: (1) component loadings on the surface of the head are consistent with eye movements; (2) source analysis localizes component activity to the eyes, and (3) the temporal activation of the component occurred at the time of the eye movement and differed for right and left eye movements. We demonstrate this method's applicability to reading by comparing ERPs time-locked to fixation onset in two reading conditions. In the text-reading condition, participants read paragraphs of text. In the pseudo-reading control condition, participants moved their eyes through spatially similar pseudo-text that preserved word locations, word shapes, and paragraph spatial structure, but eliminated meaning. The corrected EEG, time-locked to fixation onsets, showed effects of reading condition in early ERP components. The results indicate that co-registration of eyetracking and EEG in connected-text paragraph reading is possible, and has the potential to become an important tool for investigating the cognitive and neural bases of on-line language processing in reading.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Eyetracking during reading has provided a critical source of on-line behavioral data informing basic theory in language processing. Similarly, event-related potentials (ERPs) have provided an important on-line measure of the neural correlates of language processing. Recently there has been strong interest in co-registering eyetracking and ERPs from simultaneous recording to capitalize on the strengths of both techniques, but a challenge has been devising approaches for controlling artifacts produced by eye movements in the EEG waveform. In this paper we describe our approach to correcting for eye movements in EEG and demonstrate its applicability to reading. The method is based on independent components analysis, and uses three criteria for identifying components tied to saccades: (1) component loadings on the surface of the head are consistent with eye movements; (2) source analysis localizes component activity to the eyes, and (3) the temporal activation of the component occurred at the time of the eye movement and differed for right and left eye movements. We demonstrate this method's applicability to reading by comparing ERPs time-locked to fixation onset in two reading conditions. In the text-reading condition, participants read paragraphs of text. In the pseudo-reading control condition, participants moved their eyes through spatially similar pseudo-text that preserved word locations, word shapes, and paragraph spatial structure, but eliminated meaning. The corrected EEG, time-locked to fixation onsets, showed effects of reading condition in early ERP components. The results indicate that co-registration of eyetracking and EEG in connected-text paragraph reading is possible, and has the potential to become an important tool for investigating the cognitive and neural bases of on-line language processing in reading. |
Linda Henriksson; Marieke Mur; Nikolaus Kriegeskorte Rapid invariant encoding of scene layout in human OPA Journal Article Neuron, 103 , pp. 161–171, 2019. @article{Henriksson2019, title = {Rapid invariant encoding of scene layout in human OPA}, author = {Linda Henriksson and Marieke Mur and Nikolaus Kriegeskorte}, doi = {10.1016/j.neuron.2019.04.014}, year = {2019}, date = {2019-01-01}, journal = {Neuron}, volume = {103}, pages = {161--171}, publisher = {Elsevier Inc.}, abstract = {Successful visual navigation requires a sense of the geometry of the local environment. How do our brains extract this information from retinal images? Here we visually presented scenes with all possible combinations of five scene-bounding elements (left, right, and back walls; ceiling; floor) to human subjects during functional magnetic resonance imaging (fMRI) and magnetoencephalography (MEG). The fMRI response patterns in the scene-responsive occipital place area (OPA) reflected scene layout with invariance to changes in surface texture. This result contrasted sharply with the primary visual cortex (V1), which reflected low-level image features of the stimuli, and the parahippocampal place area (PPA), which showed better texture than layout decoding. MEG indicated that the texture-invariant scene layout representation is computed from visual input within ∼100 ms, suggesting a rapid computational mechanism. Taken together, these results suggest that the cortical representation underlying our instant sense of the environmental geometry is located in the OPA.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Successful visual navigation requires a sense of the geometry of the local environment. How do our brains extract this information from retinal images? Here we visually presented scenes with all possible combinations of five scene-bounding elements (left, right, and back walls; ceiling; floor) to human subjects during functional magnetic resonance imaging (fMRI) and magnetoencephalography (MEG). The fMRI response patterns in the scene-responsive occipital place area (OPA) reflected scene layout with invariance to changes in surface texture. This result contrasted sharply with the primary visual cortex (V1), which reflected low-level image features of the stimuli, and the parahippocampal place area (PPA), which showed better texture than layout decoding. MEG indicated that the texture-invariant scene layout representation is computed from visual input within ∼100 ms, suggesting a rapid computational mechanism. Taken together, these results suggest that the cortical representation underlying our instant sense of the environmental geometry is located in the OPA. |
Piril Hepsomali; Julie A Hadwin; Simon P Liversedge; Federica Degno; Matthew Garner Experimental Brain Research, 237 (4), pp. 897–909, 2019. @article{Hepsomali2019, title = {The impact of cognitive load on processing efficiency and performance effectiveness in anxiety: evidence from event-related potentials and pupillary responses}, author = {Piril Hepsomali and Julie A Hadwin and Simon P Liversedge and Federica Degno and Matthew Garner}, doi = {10.1007/s00221-018-05466-y}, year = {2019}, date = {2019-01-01}, journal = {Experimental Brain Research}, volume = {237}, number = {4}, pages = {897--909}, publisher = {Springer Berlin Heidelberg}, abstract = {Anxiety has been associated with poor attentional control, as reflected in lowered performance on experimental measures of executive attention and inhibitory control. Recent conceptualisations of anxiety propose that individuals who report elevated anxiety symptoms worry about performance and will exert greater cognitive effort to complete tasks well, particularly when cognitive demands are high. Across two experiments, we examined the effect of anxiety on task performance and across two load conditions using (1) measures of inhibitory control (behavioural reaction times and eye-movement responses) and (2) task effort with pupillary and electrocortical markers of effort (CNV) and inhibitory control (N2). Experiment 1 used an oculomotor-delayed-response task that manipulated load by increasing delay duration to create a high load, relative to a low load, condition. Experiment 2 used a Go/No-Go task and load was manipulated by decreasing the No-Go probabilities (i.e., 20% No-Go in the high load condition and 50% No-Go in the low load condition). Experiment 1 showed individuals with high (vs. low) anxiety made more antisaccade errors across load conditions, and made more effort during the high load condition, as evidenced by greater frontal CNV and increased pupillary responses. In Experiment 2, individuals with high anxiety showed increased effort (irrespective of cognitive load), as characterised by larger pupillary responses. In addition, N2 amplitudes were sensitive to load only in individuals with low anxiety. Evidence of reduced performance effectiveness and efficiency across electrophysiological, pupillary, and oculomotor systems in anxiety provides some support for neurocognitive models of frontocortical attentional dysfunction in anxiety.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Anxiety has been associated with poor attentional control, as reflected in lowered performance on experimental measures of executive attention and inhibitory control. Recent conceptualisations of anxiety propose that individuals who report elevated anxiety symptoms worry about performance and will exert greater cognitive effort to complete tasks well, particularly when cognitive demands are high. Across two experiments, we examined the effect of anxiety on task performance and across two load conditions using (1) measures of inhibitory control (behavioural reaction times and eye-movement responses) and (2) task effort with pupillary and electrocortical markers of effort (CNV) and inhibitory control (N2). Experiment 1 used an oculomotor-delayed-response task that manipulated load by increasing delay duration to create a high load, relative to a low load, condition. Experiment 2 used a Go/No-Go task and load was manipulated by decreasing the No-Go probabilities (i.e., 20% No-Go in the high load condition and 50% No-Go in the low load condition). Experiment 1 showed individuals with high (vs. low) anxiety made more antisaccade errors across load conditions, and made more effort during the high load condition, as evidenced by greater frontal CNV and increased pupillary responses. In Experiment 2, individuals with high anxiety showed increased effort (irrespective of cognitive load), as characterised by larger pupillary responses. In addition, N2 amplitudes were sensitive to load only in individuals with low anxiety. Evidence of reduced performance effectiveness and efficiency across electrophysiological, pupillary, and oculomotor systems in anxiety provides some support for neurocognitive models of frontocortical attentional dysfunction in anxiety. |
Jan Herding; Simon Ludwig; Alexander von Lautz; Bernhard Spitzer; Felix Blankenburg Centro-parietal EEG potentials index subjective evidence and confidence during perceptual decision making Journal Article NeuroImage, 201 , pp. 1–11, 2019. @article{Herding2019, title = {Centro-parietal EEG potentials index subjective evidence and confidence during perceptual decision making}, author = {Jan Herding and Simon Ludwig and Alexander von Lautz and Bernhard Spitzer and Felix Blankenburg}, doi = {10.1016/j.neuroimage.2019.116011}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {201}, pages = {1--11}, abstract = {Recent studies suggest that a centro-parietal positivity (CPP) in the EEG signal tracks the absolute (unsigned) strength of accumulated evidence for choices that require the integration of noisy sensory input. Here, we investigated whether the CPP might also reflect the evidence for decisions based on a quantitative comparison between two sequentially presented stimuli (a signed quantity). We recorded EEG while participants decided whether the latter of two vibrotactile frequencies was higher or lower than the former in six variants of this task (n ¼ 116). To account for biases in sequential comparisons, we applied a behavioral model based on Bayesian inference that estimated subjectively perceived frequency differences. Immediately after the second stimulus, parietal ERPs reflected the signed value of subjectively perceived differences and afterwards their absolute value. Strikingly, the modulation by signed difference was evident in trials without any objective evidence for either choice and correlated with choice-selective premotor beta band amplitudes. Modulations by the absolute strength of subjectively perceived evidence-a direct indicator of task difficulty-exhibited all features of statistical decision confidence. Together, our data suggest that parietal EEG signals first index subjective evidence, and later include a measure of confidence in the context of perceptual decision making.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent studies suggest that a centro-parietal positivity (CPP) in the EEG signal tracks the absolute (unsigned) strength of accumulated evidence for choices that require the integration of noisy sensory input. Here, we investigated whether the CPP might also reflect the evidence for decisions based on a quantitative comparison between two sequentially presented stimuli (a signed quantity). We recorded EEG while participants decided whether the latter of two vibrotactile frequencies was higher or lower than the former in six variants of this task (n ¼ 116). To account for biases in sequential comparisons, we applied a behavioral model based on Bayesian inference that estimated subjectively perceived frequency differences. Immediately after the second stimulus, parietal ERPs reflected the signed value of subjectively perceived differences and afterwards their absolute value. Strikingly, the modulation by signed difference was evident in trials without any objective evidence for either choice and correlated with choice-selective premotor beta band amplitudes. Modulations by the absolute strength of subjectively perceived evidence-a direct indicator of task difficulty-exhibited all features of statistical decision confidence. Together, our data suggest that parietal EEG signals first index subjective evidence, and later include a measure of confidence in the context of perceptual decision making. |
Jim D Herring; Sophie Esterer; Tom R Marshall; Ole Jensen; Til O Bergmann Low-frequency alternating current stimulation rhythmically suppresses gamma-band oscillations and impairs perceptual performance Journal Article NeuroImage, 184 , pp. 440–449, 2019. @article{Herring2019, title = {Low-frequency alternating current stimulation rhythmically suppresses gamma-band oscillations and impairs perceptual performance}, author = {Jim D Herring and Sophie Esterer and Tom R Marshall and Ole Jensen and Til O Bergmann}, doi = {10.1016/j.neuroimage.2018.09.047}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {184}, pages = {440--449}, publisher = {Elsevier Ltd}, abstract = {Low frequency oscillations such as alpha (8–12 Hz) are hypothesized to rhythmically gate sensory processing, reflected by 40–100 Hz gamma band activity, via the mechanism of pulsed inhibition. We applied transcranial alternating current stimulation (TACS) at individual alpha frequency (IAF) and flanking frequencies (IAF-4 Hz, IAF+4 Hz) to the occipital cortex of healthy human volunteers during concurrent magnetoencephalography (MEG), while participants performed a visual detection task inducing strong gamma-band responses. Occipital (but not retinal) TACS phasically suppressed stimulus-induced gamma oscillations in the visual cortex and impaired target detection, with stronger phase-to-amplitude coupling predicting behavioral impairments. Retinal control TACS ruled out retino-thalamo-cortical entrainment resulting from (subthreshold) retinal stimulation. All TACS frequencies tested were effective, suggesting that visual gamma-band responses can be modulated by a range of low frequency oscillations. We propose that TACS-induced membrane potential modulations mimic the rhythmic change in cortical excitability by which spontaneous low frequency oscillations may eventually exert their impact when gating sensory processing via pulsed inhibition.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Low frequency oscillations such as alpha (8–12 Hz) are hypothesized to rhythmically gate sensory processing, reflected by 40–100 Hz gamma band activity, via the mechanism of pulsed inhibition. We applied transcranial alternating current stimulation (TACS) at individual alpha frequency (IAF) and flanking frequencies (IAF-4 Hz, IAF+4 Hz) to the occipital cortex of healthy human volunteers during concurrent magnetoencephalography (MEG), while participants performed a visual detection task inducing strong gamma-band responses. Occipital (but not retinal) TACS phasically suppressed stimulus-induced gamma oscillations in the visual cortex and impaired target detection, with stronger phase-to-amplitude coupling predicting behavioral impairments. Retinal control TACS ruled out retino-thalamo-cortical entrainment resulting from (subthreshold) retinal stimulation. All TACS frequencies tested were effective, suggesting that visual gamma-band responses can be modulated by a range of low frequency oscillations. We propose that TACS-induced membrane potential modulations mimic the rhythmic change in cortical excitability by which spontaneous low frequency oscillations may eventually exert their impact when gating sensory processing via pulsed inhibition. |
Philipp N Hesse; Constanze Schmitt; Steffen Klingenhoefer; Frank Bremmer Preattentive processing of numerical visual information Journal Article Frontiers in Human Neuroscience, 11 , pp. 1–14, 2017. @article{Hesse2017b, title = {Preattentive processing of numerical visual information}, author = {Philipp N Hesse and Constanze Schmitt and Steffen Klingenhoefer and Frank Bremmer}, doi = {10.3389/fnhum.2017.00070}, year = {2017}, date = {2017-01-01}, journal = {Frontiers in Human Neuroscience}, volume = {11}, pages = {1--14}, abstract = {Humans can perceive and estimate approximate numerical information, even when accurate counting is impossible e.g. due to short presentation time. If the number of objects to be estimated is small, typically around one to four items, observers are able to give very fast and precise judgments with high confidence – an effect that is called subitizing. Due to its speed and effortless nature subitizing has usually been assumed to be preattentive, putting it into the same category as other low level visual features like color or orientation. More recently, however, a number of studies have suggested that subitizing might be dependent on attentional resources. In our current study we investigated the potentially preattentive nature of visual numerical perception in the subitizing range by means of EEG. We presented peripheral, task irrelevant sequences of stimuli consisting of a certain number of circular patches while participants were engaged in a demanding, non-numerical detection task at the fixation point drawing attention away from the number stimuli. Within a sequence of stimuli of a given number of patches (called ‘standards') we interspersed some stimuli of different numerosity (‘oddballs'). We compared the evoked responses to visually identical stimuli that had been presented in two different conditions, serving as standard in one condition and as oddball in the other. We found significant visual mismatch negativity (vMMN) responses over parieto-occipital electrodes. In addition to the ERP analysis, we performed a time-frequency analysis to investigate whether the vMMN was accompanied by additional oscillatory processes. We found a concurrent increase in evoked theta power of similar strength over both hemispheres. Our results provide clear evidence for a preattentive processing of numerical visual information in the subitizing range.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Humans can perceive and estimate approximate numerical information, even when accurate counting is impossible e.g. due to short presentation time. If the number of objects to be estimated is small, typically around one to four items, observers are able to give very fast and precise judgments with high confidence – an effect that is called subitizing. Due to its speed and effortless nature subitizing has usually been assumed to be preattentive, putting it into the same category as other low level visual features like color or orientation. More recently, however, a number of studies have suggested that subitizing might be dependent on attentional resources. In our current study we investigated the potentially preattentive nature of visual numerical perception in the subitizing range by means of EEG. We presented peripheral, task irrelevant sequences of stimuli consisting of a certain number of circular patches while participants were engaged in a demanding, non-numerical detection task at the fixation point drawing attention away from the number stimuli. Within a sequence of stimuli of a given number of patches (called ‘standards') we interspersed some stimuli of different numerosity (‘oddballs'). We compared the evoked responses to visually identical stimuli that had been presented in two different conditions, serving as standard in one condition and as oddball in the other. We found significant visual mismatch negativity (vMMN) responses over parieto-occipital electrodes. In addition to the ERP analysis, we performed a time-frequency analysis to investigate whether the vMMN was accompanied by additional oscillatory processes. We found a concurrent increase in evoked theta power of similar strength over both hemispheres. Our results provide clear evidence for a preattentive processing of numerical visual information in the subitizing range. |
Hannah Hiebel; Anja Ischebeck; Clemens Brunner; Andrey R Nikolaev; Margit Höfler; Christof Körner Target probability modulates fixation-related potentials in visual search Journal Article Biological Psychology, 138 , pp. 199–210, 2018. @article{Hiebel2018, title = {Target probability modulates fixation-related potentials in visual search}, author = {Hannah Hiebel and Anja Ischebeck and Clemens Brunner and Andrey R Nikolaev and Margit Höfler and Christof Körner}, doi = {10.1016/j.biopsycho.2018.09.007}, year = {2018}, date = {2018-01-01}, journal = {Biological Psychology}, volume = {138}, pages = {199--210}, publisher = {Elsevier}, abstract = {This study investigated the influence of target probability on the neural response to target detection in free viewing visual search. Participants were asked to indicate the number of targets (one or two) among distractors in a visual search task while EEG and eye movements were co-registered. Target probability was manipulated by varying the set size of the displays between 10, 22, and 30 items. Fixation-related potentials time-locked to first target fixations revealed a pronounced P300 at the centro-parietal cortex with larger amplitudes for set sizes 22 and 30 than for set size 10. With increasing set size, more distractor fixations preceded the detection of the target, resulting in a decreased target probability and, consequently, a larger P300. For distractors, no increase of P300 amplitude with set size was observed. The findings suggest that set size specifically affects target but not distractor processing in overt serial visual search.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study investigated the influence of target probability on the neural response to target detection in free viewing visual search. Participants were asked to indicate the number of targets (one or two) among distractors in a visual search task while EEG and eye movements were co-registered. Target probability was manipulated by varying the set size of the displays between 10, 22, and 30 items. Fixation-related potentials time-locked to first target fixations revealed a pronounced P300 at the centro-parietal cortex with larger amplitudes for set sizes 22 and 30 than for set size 10. With increasing set size, more distractor fixations preceded the detection of the target, resulting in a decreased target probability and, consequently, a larger P300. For distractors, no increase of P300 amplitude with set size was observed. The findings suggest that set size specifically affects target but not distractor processing in overt serial visual search. |
Rinat Hilo-Merkovich; Marisa Carrasco; Shlomit Yuval-Greenberg Task performance in covert, but not overt, attention correlates with early laterality of visual evoked potentials Journal Article Neuropsychologia, 119 , pp. 330–339, 2018. @article{HiloMerkovich2018, title = {Task performance in covert, but not overt, attention correlates with early laterality of visual evoked potentials}, author = {Rinat Hilo-Merkovich and Marisa Carrasco and Shlomit Yuval-Greenberg}, doi = {10.1016/j.neuropsychologia.2018.08.012}, year = {2018}, date = {2018-01-01}, journal = {Neuropsychologia}, volume = {119}, pages = {330--339}, publisher = {Elsevier Ltd}, abstract = {Attention affects visual perception at target locations via the amplification of stimuli signal strength, perceptual performance and perceived contrast. Behavioral and neural correlates of attention can be observed when attention is both covertly and overtly oriented (with or without accompanying eye movements). Previous studies have demonstrated that at the grand-average level, lateralization of Event Related Potentials (ERP) is associated with attentional facilitation at cued, relative to un-cued locations. Yet, the correspondence between ERP lateralization and behavior has not been established at the single-subject level. Specifically, it is an open question whether inter-individual differences in the neural manifestation of attentional orienting can predict differences in perception. Here, we addressed this question by examining the correlation between ERP lateralization and visual sensitivity at attended locations. Participants were presented with a cue indicating where a low-contrast grating patch target will appear, following a delay of varying durations. During this delay, while participants were waiting for the target to appear, a task-irrelevant checkerboard probe was presented briefly and bilaterally. ERP was measured relative to the onset of this probe. In separate blocks, participants were requested to report detection of a low-contrast target either by making a fast eye-movement toward the target (overt orienting), or by pressing a button (covert orienting). Results show that in the covert orienting condition, ERP lateralization of individual participants was positively correlated with their mean visual sensitivity for the target. But, no such correlation was found in the overt orienting condition. We conclude that ERP lateralization of individual participants can predict their performance on a covert, but not an overt, target detection task.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attention affects visual perception at target locations via the amplification of stimuli signal strength, perceptual performance and perceived contrast. Behavioral and neural correlates of attention can be observed when attention is both covertly and overtly oriented (with or without accompanying eye movements). Previous studies have demonstrated that at the grand-average level, lateralization of Event Related Potentials (ERP) is associated with attentional facilitation at cued, relative to un-cued locations. Yet, the correspondence between ERP lateralization and behavior has not been established at the single-subject level. Specifically, it is an open question whether inter-individual differences in the neural manifestation of attentional orienting can predict differences in perception. Here, we addressed this question by examining the correlation between ERP lateralization and visual sensitivity at attended locations. Participants were presented with a cue indicating where a low-contrast grating patch target will appear, following a delay of varying durations. During this delay, while participants were waiting for the target to appear, a task-irrelevant checkerboard probe was presented briefly and bilaterally. ERP was measured relative to the onset of this probe. In separate blocks, participants were requested to report detection of a low-contrast target either by making a fast eye-movement toward the target (overt orienting), or by pressing a button (covert orienting). Results show that in the covert orienting condition, ERP lateralization of individual participants was positively correlated with their mean visual sensitivity for the target. But, no such correlation was found in the overt orienting condition. We conclude that ERP lateralization of individual participants can predict their performance on a covert, but not an overt, target detection task. |
James E Hoffman; Minwoo Kim; Matt Taylor; Kelsey Holiday Emotional capture during emotion-induced blindness is not automatic Journal Article Cortex, 122 , pp. 140–158, 2020. @article{Hoffman2020, title = {Emotional capture during emotion-induced blindness is not automatic}, author = {James E Hoffman and Minwoo Kim and Matt Taylor and Kelsey Holiday}, doi = {10.1016/j.cortex.2019.03.013}, year = {2020}, date = {2020-01-01}, journal = {Cortex}, volume = {122}, pages = {140--158}, publisher = {Elsevier Ltd}, abstract = {The present research used behavioral and event-related brain potentials (ERP) measures to determine whether emotional capture is automatic in the emotion-induced blindness (EIB) paradigm. The first experiment varied the priority of performing two concurrent tasks: identifying a negative or neutral picture appearing in a rapid serial visual presentation (RSVP) stream of pictures and multiple object tracking (MOT). Results showed that increased attention to the MOT task resulted in decreased accuracy for identifying both negative and neutral target pictures accompanied by decreases in the amplitude of the P3b component. In contrast, the early posterior negativity (EPN) component elicited by negative pictures was unaffected by variations in attention. Similarly, there was a decrement in MOT performance for dual-task versus single task conditions but no effect of picture type (negative vs neutral) on MOT accuracy which isn't consistent with automatic emotional capture of attention. However, the MOT task might simply be insensitive to brief interruptions of attention. The second experiment used a more sensitive reaction time (RT) measure to examine this possibility. Results showed that RT to discriminate a gap appearing in a tracked object was delayed by the simultaneous appearance of to-be-ignored distractor pictures even though MOT performance was once again unaffected by the distractor. Importantly, the RT delay was the same for both negative and neutral distractors suggesting that capture was driven by physical salience rather than emotional salience of the distractors. Despite this lack of emotional capture, the EPN component, which is thought to reflect emotional capture, was still present. We suggest that the EPN doesn't reflect capture but rather downstream effects of attention, including object recognition. These results show that capture by emotional pictures in EIB can be suppressed when attention is engaged in another difficult task. The results have important implications for understanding capture effects in EIB.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The present research used behavioral and event-related brain potentials (ERP) measures to determine whether emotional capture is automatic in the emotion-induced blindness (EIB) paradigm. The first experiment varied the priority of performing two concurrent tasks: identifying a negative or neutral picture appearing in a rapid serial visual presentation (RSVP) stream of pictures and multiple object tracking (MOT). Results showed that increased attention to the MOT task resulted in decreased accuracy for identifying both negative and neutral target pictures accompanied by decreases in the amplitude of the P3b component. In contrast, the early posterior negativity (EPN) component elicited by negative pictures was unaffected by variations in attention. Similarly, there was a decrement in MOT performance for dual-task versus single task conditions but no effect of picture type (negative vs neutral) on MOT accuracy which isn't consistent with automatic emotional capture of attention. However, the MOT task might simply be insensitive to brief interruptions of attention. The second experiment used a more sensitive reaction time (RT) measure to examine this possibility. Results showed that RT to discriminate a gap appearing in a tracked object was delayed by the simultaneous appearance of to-be-ignored distractor pictures even though MOT performance was once again unaffected by the distractor. Importantly, the RT delay was the same for both negative and neutral distractors suggesting that capture was driven by physical salience rather than emotional salience of the distractors. Despite this lack of emotional capture, the EPN component, which is thought to reflect emotional capture, was still present. We suggest that the EPN doesn't reflect capture but rather downstream effects of attention, including object recognition. These results show that capture by emotional pictures in EIB can be suppressed when attention is engaged in another difficult task. The results have important implications for understanding capture effects in EIB. |
Nora Hollenstein; Jonathan Rotsztejn; Marius Troendle; Andreas Pedroni; Ce Zhang; Nicolas Langer Data descriptor: ZuCo, a simultaneous EEG and eye-tracking resource for natural sentence reading Journal Article Scientific Data, 5 , pp. 1–13, 2018. @article{Hollenstein2018, title = {Data descriptor: ZuCo, a simultaneous EEG and eye-tracking resource for natural sentence reading}, author = {Nora Hollenstein and Jonathan Rotsztejn and Marius Troendle and Andreas Pedroni and Ce Zhang and Nicolas Langer}, doi = {10.1038/sdata.2018.291}, year = {2018}, date = {2018-01-01}, journal = {Scientific Data}, volume = {5}, pages = {1--13}, publisher = {The Author(s)}, abstract = {We present the Zurich Cognitive Language Processing Corpus (ZuCo), a dataset combining electroencephalography (EEG) and eye-tracking recordings from subjects reading natural sentences. ZuCo includes high-density EEG and eye-tracking data of 12 healthy adult native English speakers, each reading natural English text for 4–6 hours. The recordings span two normal reading tasks and one task-specific reading task, resulting in a dataset that encompasses EEG and eye-tracking data of 21,629 words in 1107 sentences and 154,173 fixations. We believe that this dataset represents a valuable resource for natural language processing (NLP). The EEG and eye-tracking signals lend themselves to train improved machine- learning models for various tasks, in particular for information extraction tasks such as entity and relation extraction and sentiment analysis. Moreover, this dataset is useful for advancing research into the human reading and language understanding process at the level of brain activity and eye-movement.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present the Zurich Cognitive Language Processing Corpus (ZuCo), a dataset combining electroencephalography (EEG) and eye-tracking recordings from subjects reading natural sentences. ZuCo includes high-density EEG and eye-tracking data of 12 healthy adult native English speakers, each reading natural English text for 4–6 hours. The recordings span two normal reading tasks and one task-specific reading task, resulting in a dataset that encompasses EEG and eye-tracking data of 21,629 words in 1107 sentences and 154,173 fixations. We believe that this dataset represents a valuable resource for natural language processing (NLP). The EEG and eye-tracking signals lend themselves to train improved machine- learning models for various tasks, in particular for information extraction tasks such as entity and relation extraction and sentiment analysis. Moreover, this dataset is useful for advancing research into the human reading and language understanding process at the level of brain activity and eye-movement. |
Linbi Hong; Jennifer M Walz; Paul Sajda Your eyes give you away: Prestimulus changes in pupil diameter correlate with poststimulus task-related EEG dynamics Journal Article PLoS ONE, 9 (3), pp. e91321, 2014. @article{Hong2014, title = {Your eyes give you away: Prestimulus changes in pupil diameter correlate with poststimulus task-related EEG dynamics}, author = {Linbi Hong and Jennifer M Walz and Paul Sajda}, doi = {10.1371/journal.pone.0091321}, year = {2014}, date = {2014-01-01}, journal = {PLoS ONE}, volume = {9}, number = {3}, pages = {e91321}, abstract = {Pupillary measures have been linked to arousal and attention as well as activity in the brainstem's locus coeruleus norepinephrine (LC-NE) system. Similarly, there is evidence that evoked EEG responses, such as the P3, might have LC-NE activity as their basis. Since it is not feasible to record electrophysiological data directly from the LC in humans due to its location in the brainstem, an open question has been whether pupillary measures and EEG variability can be linked in a meaningful way to shed light on the nature of the LC-NE role in attention and arousal. We used an auditory oddball task with a data-driven approach to learn task-relevant projections of the EEG, for windows of data spanning the entire trial. We investigated linear and quadratic relationships between the evoked EEG along these projections and both prestimulus (baseline) and poststimulus (evoked dilation) pupil diameter measurements. We found that baseline pupil diameter correlates with early (175-200 ms) and late (350-400 ms) EEG component variability, suggesting a linear relationship between baseline (tonic) LC-NE activity and evoked EEG. We found no relationships between evoked EEG and evoked pupil dilation, which is often associated with evoked (phasic) LC activity. After regressing out reaction time (RT), the correlation between EEG variability and baseline pupil diameter remained, suggesting that such correlation is not explainable by RT variability. We also investigated the relationship between these pupil measures and prestimulus EEG alpha activity, which has been reported as a marker of attentional state, and found a negative linear relationship with evoked pupil dilation. In summary, our results demonstrate significant relationships between prestimulus and poststimulus neural and pupillary measures, and they provide further evidence for tight coupling between attentional state and evoked neural activity and for the role of cortical and subcortical networks underlying the process of target detection.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Pupillary measures have been linked to arousal and attention as well as activity in the brainstem's locus coeruleus norepinephrine (LC-NE) system. Similarly, there is evidence that evoked EEG responses, such as the P3, might have LC-NE activity as their basis. Since it is not feasible to record electrophysiological data directly from the LC in humans due to its location in the brainstem, an open question has been whether pupillary measures and EEG variability can be linked in a meaningful way to shed light on the nature of the LC-NE role in attention and arousal. We used an auditory oddball task with a data-driven approach to learn task-relevant projections of the EEG, for windows of data spanning the entire trial. We investigated linear and quadratic relationships between the evoked EEG along these projections and both prestimulus (baseline) and poststimulus (evoked dilation) pupil diameter measurements. We found that baseline pupil diameter correlates with early (175-200 ms) and late (350-400 ms) EEG component variability, suggesting a linear relationship between baseline (tonic) LC-NE activity and evoked EEG. We found no relationships between evoked EEG and evoked pupil dilation, which is often associated with evoked (phasic) LC activity. After regressing out reaction time (RT), the correlation between EEG variability and baseline pupil diameter remained, suggesting that such correlation is not explainable by RT variability. We also investigated the relationship between these pupil measures and prestimulus EEG alpha activity, which has been reported as a marker of attentional state, and found a negative linear relationship with evoked pupil dilation. In summary, our results demonstrate significant relationships between prestimulus and poststimulus neural and pupillary measures, and they provide further evidence for tight coupling between attentional state and evoked neural activity and for the role of cortical and subcortical networks underlying the process of target detection. |
Taylor Hornung; Wen Hsuan Chan; Ralph Axel Müller; Jeanne Townsend; Brandon Keehn Dopaminergic hypo-activity and reduced theta-band power in autism spectrum disorder: A resting-state EEG study Journal Article International Journal of Psychophysiology, 146 , pp. 101–106, 2019. @article{Hornung2019, title = {Dopaminergic hypo-activity and reduced theta-band power in autism spectrum disorder: A resting-state EEG study}, author = {Taylor Hornung and Wen Hsuan Chan and Ralph Axel Müller and Jeanne Townsend and Brandon Keehn}, doi = {10.1016/j.ijpsycho.2019.08.012}, year = {2019}, date = {2019-01-01}, journal = {International Journal of Psychophysiology}, volume = {146}, pages = {101--106}, publisher = {Elsevier}, abstract = {Background: Prior studies using a variety of methodologies have reported inconsistent dopamine (DA) findings in individuals with autism spectrum disorder (ASD), ranging from dopaminergic hypo- to hyper-activity. Theta-band power derived from scalp-recorded electroencephalography (EEG), which may be associated with dopamine levels in frontal cortex, has also been shown to be atypical in ASD. The present study examined spontaneous eye-blink rate (EBR), an indirect, non-invasive measure of central dopaminergic activity, and theta power in children with ASD to determine: 1) whether ASD may be associated with atypical DA levels, and 2) whether dopaminergic dysfunction may be associated with aberrant theta-band activation. Method: Participants included thirty-two children with ASD and thirty-two age-, IQ-, and sex-matched typically developing (TD) children. Electroencephalography and eye-tracking data were acquired while participants completed an eyes-open resting-state session. Blinks were counted and EBR was determined by dividing blink frequency by session duration and theta power (4–7.5 Hz) was extracted from midline leads. Results: Eye-blink rate and theta-band activity were significantly reduced in children with ASD as compared to their TD peers. For all participants, greater midline theta power was associated with increased EBR (related to higher DA levels). Conclusions: These results suggest that ASD may be associated with dopaminergic hypo-activity, and that this may contribute to atypical theta-band power. Lastly, EBR may be a useful tool to non-invasively index dopamine levels in ASD and could potentially have many clinical applications, including selecting treatment options and monitoring treatment response.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Prior studies using a variety of methodologies have reported inconsistent dopamine (DA) findings in individuals with autism spectrum disorder (ASD), ranging from dopaminergic hypo- to hyper-activity. Theta-band power derived from scalp-recorded electroencephalography (EEG), which may be associated with dopamine levels in frontal cortex, has also been shown to be atypical in ASD. The present study examined spontaneous eye-blink rate (EBR), an indirect, non-invasive measure of central dopaminergic activity, and theta power in children with ASD to determine: 1) whether ASD may be associated with atypical DA levels, and 2) whether dopaminergic dysfunction may be associated with aberrant theta-band activation. Method: Participants included thirty-two children with ASD and thirty-two age-, IQ-, and sex-matched typically developing (TD) children. Electroencephalography and eye-tracking data were acquired while participants completed an eyes-open resting-state session. Blinks were counted and EBR was determined by dividing blink frequency by session duration and theta power (4–7.5 Hz) was extracted from midline leads. Results: Eye-blink rate and theta-band activity were significantly reduced in children with ASD as compared to their TD peers. For all participants, greater midline theta power was associated with increased EBR (related to higher DA levels). Conclusions: These results suggest that ASD may be associated with dopaminergic hypo-activity, and that this may contribute to atypical theta-band power. Lastly, EBR may be a useful tool to non-invasively index dopamine levels in ASD and could potentially have many clinical applications, including selecting treatment options and monitoring treatment response. |
Jörn M Horschig; Ole Jensen; Martine R van Schouwenburg; Roshan Cools; Mathilde Bonnefond Alpha activity reflects individual abilities to adapt to the environment Journal Article NeuroImage, 89 , pp. 235–243, 2014. @article{Horschig2014, title = {Alpha activity reflects individual abilities to adapt to the environment}, author = {Jörn M Horschig and Ole Jensen and Martine R van Schouwenburg and Roshan Cools and Mathilde Bonnefond}, doi = {10.1016/j.neuroimage.2013.12.018}, year = {2014}, date = {2014-01-01}, journal = {NeuroImage}, volume = {89}, pages = {235--243}, publisher = {Elsevier Inc.}, abstract = {Recent findings suggest that oscillatory alpha activity (7-13. Hz) is associated with functional inhibition of sensory regions by filtering incoming information. Accordingly the alpha power in visual regions varies in anticipation of upcoming, predictable stimuli which has consequences for visual processing and subsequent behavior. In covert spatial attention studies it has been demonstrated that performance correlates with the adaptation of alpha power in response to explicit spatial cueing. However it remains unknown whether such an adaptation also occurs in response to implicit statistical properties of a task. In a covert attention switching paradigm, we here show evidence that individuals differ on how they adapt to implicit statistical properties of the task. Subjects whose behavioral performance reflects the implicit change in switch trial likelihood show strong adjustment of anticipatory alpha power lateralization. Most importantly, the stronger the behavioral adjustment to the switch trial likelihood was, the stronger the adjustment of anticipatory posterior alpha lateralization. We conclude that anticipatory spatial attention is reflected in the distribution of posterior alpha band power which is predictive of individual detection performance in response to the implicit statistical properties of the task.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent findings suggest that oscillatory alpha activity (7-13. Hz) is associated with functional inhibition of sensory regions by filtering incoming information. Accordingly the alpha power in visual regions varies in anticipation of upcoming, predictable stimuli which has consequences for visual processing and subsequent behavior. In covert spatial attention studies it has been demonstrated that performance correlates with the adaptation of alpha power in response to explicit spatial cueing. However it remains unknown whether such an adaptation also occurs in response to implicit statistical properties of a task. In a covert attention switching paradigm, we here show evidence that individuals differ on how they adapt to implicit statistical properties of the task. Subjects whose behavioral performance reflects the implicit change in switch trial likelihood show strong adjustment of anticipatory alpha power lateralization. Most importantly, the stronger the behavioral adjustment to the switch trial likelihood was, the stronger the adjustment of anticipatory posterior alpha lateralization. We conclude that anticipatory spatial attention is reflected in the distribution of posterior alpha band power which is predictive of individual detection performance in response to the implicit statistical properties of the task. |
Jörn M Horschig; Wouter Oosterheert; Robert Oostenveld; Ole Jensen Modulation of posterior alpha activity by spatial attention allows for controlling a continuous brain–computer interface Journal Article Brain Topography, 28 (6), pp. 852–864, 2015. @article{Horschig2015, title = {Modulation of posterior alpha activity by spatial attention allows for controlling a continuous brain–computer interface}, author = {Jörn M Horschig and Wouter Oosterheert and Robert Oostenveld and Ole Jensen}, doi = {10.1007/s10548-014-0401-7}, year = {2015}, date = {2015-01-01}, journal = {Brain Topography}, volume = {28}, number = {6}, pages = {852--864}, abstract = {Here we report that the modulation of alpha activity by covert attention can be used as a control signal in an online brain-computer interface, that it is reliable, and that it is robust. Subjects were instructed to orient covert visual attention to the left or right hemifield. We decoded the direction of attention from the magnetoencephalogram by a template matching classifier and provided the classification outcome to the subject in real-time using a novel graphical user interface. Training data for the templates were obtained from a Posner-cueing task conducted just before the BCI task. Eleven subjects participated in four sessions each. Eight of the subjects achieved classification rates significantly above chance level. Subjects were able to significantly increase their performance from the first to the second session. Individual patterns of posterior alpha power remained stable throughout the four sessions and did not change with increased performance. We conclude that posterior alpha power can successfully be used as a control signal in brain-computer interfaces. We also discuss several ideas for further improving the setup and propose future research based on solid hypotheses about behavioral consequences of modulating neuronal oscillations by brain computer interfacing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Here we report that the modulation of alpha activity by covert attention can be used as a control signal in an online brain-computer interface, that it is reliable, and that it is robust. Subjects were instructed to orient covert visual attention to the left or right hemifield. We decoded the direction of attention from the magnetoencephalogram by a template matching classifier and provided the classification outcome to the subject in real-time using a novel graphical user interface. Training data for the templates were obtained from a Posner-cueing task conducted just before the BCI task. Eleven subjects participated in four sessions each. Eight of the subjects achieved classification rates significantly above chance level. Subjects were able to significantly increase their performance from the first to the second session. Individual patterns of posterior alpha power remained stable throughout the four sessions and did not change with increased performance. We conclude that posterior alpha power can successfully be used as a control signal in brain-computer interfaces. We also discuss several ideas for further improving the setup and propose future research based on solid hypotheses about behavioral consequences of modulating neuronal oscillations by brain computer interfacing. |
Christoph Huber-Huber; Thomas Ditye; María Marchante Fernández; Ulrich Ansorge Using temporally aligned event-related potentials for the investigation of attention shifts prior to and during saccades Journal Article Neuropsychologia, 92 , pp. 129–141, 2016. @article{HuberHuber2016, title = {Using temporally aligned event-related potentials for the investigation of attention shifts prior to and during saccades}, author = {Christoph Huber-Huber and Thomas Ditye and María {Marchante Fernández} and Ulrich Ansorge}, doi = {10.1016/j.neuropsychologia.2016.03.035}, year = {2016}, date = {2016-01-01}, journal = {Neuropsychologia}, volume = {92}, pages = {129--141}, publisher = {Elsevier}, abstract = {According to the pre-motor theory of attention, attention is shifted to a saccade's landing position before the saccade is executed. Such pre-saccadic attention shifts are usually studied in psychophysical dual-task conditions, with a target-discrimination task before saccade onset. Here, we present a novel approach to investigate pre-saccadic attention shifts with the help of event-related potentials (ERPs). Participants executed one or two saccades to color-defined targets while ERPs and eye-movements were recorded. In single-target blocks participants executed a single saccade. In two-targets blocks participants made either a single saccade to one of the targets, or two successive saccades to both targets. Importantly, in two-targets blocks, targets could appear on the same or on opposite sides of the vertical midline. This allowed us to study contra-to-ipsilateral ERP differences (such as the N2pc or PCN) that reflect attention shifts to the targets, prior to saccade onset and during saccades. If pre-saccadic attention shifts to saccade target locations are necessary for saccade execution and if searched-for saccade targets capture attention, there should be enhanced attentional competition (1) between two targets compared to single targets; (2) between two opposite-sides targets compared to two same-side targets; and (3) in two saccades rather than one saccade conditions: More attentional competition was expected to delay saccade latency and to weaken pre-saccadic laterality effects in ERPs. Hypotheses were tested by means of temporally aligned ERPs that were simultaneously time-locked to stimulus onsets, saccade onsets, and saccade offsets. Predictions (1) and (2) were partly and fully confirmed, respectively, but no evidence was found for (3). We explain the implications of our results for the role of attention during saccade preparation, and we point out how temporally aligned ERPs compare to ICA-based electroencephalogram (EEG) artifact correction procedures and to psychophysical dual-task approaches.}, keywords = {}, pubstate = {published}, tppubtype = {article} } According to the pre-motor theory of attention, attention is shifted to a saccade's landing position before the saccade is executed. Such pre-saccadic attention shifts are usually studied in psychophysical dual-task conditions, with a target-discrimination task before saccade onset. Here, we present a novel approach to investigate pre-saccadic attention shifts with the help of event-related potentials (ERPs). Participants executed one or two saccades to color-defined targets while ERPs and eye-movements were recorded. In single-target blocks participants executed a single saccade. In two-targets blocks participants made either a single saccade to one of the targets, or two successive saccades to both targets. Importantly, in two-targets blocks, targets could appear on the same or on opposite sides of the vertical midline. This allowed us to study contra-to-ipsilateral ERP differences (such as the N2pc or PCN) that reflect attention shifts to the targets, prior to saccade onset and during saccades. If pre-saccadic attention shifts to saccade target locations are necessary for saccade execution and if searched-for saccade targets capture attention, there should be enhanced attentional competition (1) between two targets compared to single targets; (2) between two opposite-sides targets compared to two same-side targets; and (3) in two saccades rather than one saccade conditions: More attentional competition was expected to delay saccade latency and to weaken pre-saccadic laterality effects in ERPs. Hypotheses were tested by means of temporally aligned ERPs that were simultaneously time-locked to stimulus onsets, saccade onsets, and saccade offsets. Predictions (1) and (2) were partly and fully confirmed, respectively, but no evidence was found for (3). We explain the implications of our results for the role of attention during saccade preparation, and we point out how temporally aligned ERPs compare to ICA-based electroencephalogram (EEG) artifact correction procedures and to psychophysical dual-task approaches. |
Christoph Huber-Huber; Antimo Buonocore; Olaf Dimigen; Clayton Hickey; David Melcher NeuroImage, 200 , pp. 344–362, 2019. @article{HuberHuber2019, title = {The peripheral preview effect with faces: Combined EEG and eye-tracking suggests multiple stages of trans-saccadic predictive and non-predictive processing}, author = {Christoph Huber-Huber and Antimo Buonocore and Olaf Dimigen and Clayton Hickey and David Melcher}, doi = {10.1016/j.neuroimage.2019.06.059}, year = {2019}, date = {2019-10-01}, journal = {NeuroImage}, volume = {200}, pages = {344--362}, publisher = {Academic Press Inc.}, abstract = {The world appears stable despite saccadic eye-movements. One possible explanation for this phenomenon is that the visual system predicts upcoming input across saccadic eye-movements based on peripheral preview of the saccadic target. We tested this idea using concurrent electroencephalography (EEG) and eye-tracking. Participants made cued saccades to peripheral upright or inverted face stimuli that changed orientation (invalid preview) or maintained orientation (valid preview) while the saccade was completed. Experiment 1 demonstrated better discrimination performance and a reduced fixation-locked N170 component (fN170) with valid than with invalid preview, demonstrating integration of pre- and post-saccadic information. Moreover, the early fixation-related potentials (FRP) showed a preview face inversion effect suggesting that some pre-saccadic input was represented in the brain until around 170 ms post fixation-onset. Experiment 2 replicated Experiment 1 and manipulated the proportion of valid and invalid trials to test whether the preview effect reflects context-based prediction across trials. A whole-scalp Bayes factor analysis showed that this manipulation did not alter the fN170 preview effect but did influence the face inversion effect before the saccade. The pre-saccadic inversion effect declined earlier in the mostly invalid block than in the mostly valid block, which is consistent with the notion of pre-saccadic expectations. In addition, in both studies, we found strong evidence for an interaction between the pre-saccadic preview stimulus and the post-saccadic target as early as 50 ms (Experiment 2) or 90 ms (Experiment 1) into the new fixation. These findings suggest that visual stability may involve three temporal stages: prediction about the saccadic target, integration of pre-saccadic and post-saccadic information at around 50-90 ms post fixation onset, and post-saccadic facilitation of rapid categorization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The world appears stable despite saccadic eye-movements. One possible explanation for this phenomenon is that the visual system predicts upcoming input across saccadic eye-movements based on peripheral preview of the saccadic target. We tested this idea using concurrent electroencephalography (EEG) and eye-tracking. Participants made cued saccades to peripheral upright or inverted face stimuli that changed orientation (invalid preview) or maintained orientation (valid preview) while the saccade was completed. Experiment 1 demonstrated better discrimination performance and a reduced fixation-locked N170 component (fN170) with valid than with invalid preview, demonstrating integration of pre- and post-saccadic information. Moreover, the early fixation-related potentials (FRP) showed a preview face inversion effect suggesting that some pre-saccadic input was represented in the brain until around 170 ms post fixation-onset. Experiment 2 replicated Experiment 1 and manipulated the proportion of valid and invalid trials to test whether the preview effect reflects context-based prediction across trials. A whole-scalp Bayes factor analysis showed that this manipulation did not alter the fN170 preview effect but did influence the face inversion effect before the saccade. The pre-saccadic inversion effect declined earlier in the mostly invalid block than in the mostly valid block, which is consistent with the notion of pre-saccadic expectations. In addition, in both studies, we found strong evidence for an interaction between the pre-saccadic preview stimulus and the post-saccadic target as early as 50 ms (Experiment 2) or 90 ms (Experiment 1) into the new fixation. These findings suggest that visual stability may involve three temporal stages: prediction about the saccadic target, integration of pre-saccadic and post-saccadic information at around 50-90 ms post fixation onset, and post-saccadic facilitation of rapid categorization. |
Samuel B Hutton; Brendan S Weekes Low frequency rTMS over posterior parietal cortex impairs smooth pursuit eye tracking Journal Article Experimental Brain Research, 183 (2), pp. 195–200, 2007. @article{Hutton2007, title = {Low frequency rTMS over posterior parietal cortex impairs smooth pursuit eye tracking}, author = {Samuel B Hutton and Brendan S Weekes}, doi = {10.1007/s00221-007-1033-x}, year = {2007}, date = {2007-01-01}, journal = {Experimental Brain Research}, volume = {183}, number = {2}, pages = {195--200}, abstract = {The role of the posterior parietal cortex in smooth pursuit eye movements remains unclear. We used low frequency repetitive transcranial magnetic stimulation (rTMS) to study the cognitive and neural systems involved in the control of smooth pursuit eye movements. Eighteen participants were tested on two separate occasions. On each occasion we measured smooth pursuit eye tracking before and after 6 min of 1 Hz rTMS delivered at 90% of motor threshold. Low frequency rTMS over the posterior parietal cortex led to a significant reduction in smooth pursuit velocity gain, whereas rTMS over the motor cortex had no effect on gain. We conclude that low frequency offline rTMS is a potentially useful tool with which to explore the cortical systems involved in oculomotor control.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The role of the posterior parietal cortex in smooth pursuit eye movements remains unclear. We used low frequency repetitive transcranial magnetic stimulation (rTMS) to study the cognitive and neural systems involved in the control of smooth pursuit eye movements. Eighteen participants were tested on two separate occasions. On each occasion we measured smooth pursuit eye tracking before and after 6 min of 1 Hz rTMS delivered at 90% of motor threshold. Low frequency rTMS over the posterior parietal cortex led to a significant reduction in smooth pursuit velocity gain, whereas rTMS over the motor cortex had no effect on gain. We conclude that low frequency offline rTMS is a potentially useful tool with which to explore the cortical systems involved in oculomotor control. |
Florian Hutzler; Isabella Fuchs; Benjamin Gagl; Sarah Schuster; Fabio Richlan; Mario Braun; Stefan Hawelka Parafoveal X-masks interfere with foveal word recognition: Evidence from fixation-related brain potentials Journal Article Frontiers in Systems Neuroscience, 7 , pp. 1–10, 2013. @article{Hutzler2013, title = {Parafoveal X-masks interfere with foveal word recognition: Evidence from fixation-related brain potentials}, author = {Florian Hutzler and Isabella Fuchs and Benjamin Gagl and Sarah Schuster and Fabio Richlan and Mario Braun and Stefan Hawelka}, doi = {10.3389/fnsys.2013.00033}, year = {2013}, date = {2013-01-01}, journal = {Frontiers in Systems Neuroscience}, volume = {7}, pages = {1--10}, abstract = {The boundary paradigm, in combination with parafoveal masks, is the main technique for studying parafoveal preprocessing during reading. The rationale is that the masks (e.g., strings of X's) prevent parafoveal preprocessing, but do not interfere with foveal processing. A recent study, however, raised doubts about the neutrality of parafoveal masks. In the present study, we explored this issue by means of fixation-related brain potentials (FRPs). Two FRP conditions presented rows of five words. The task of the participant was to judge whether the final word of a list was a "new" word, or whether it was a repeated (i.e., "old") word. The critical manipulation was that the final word was X-masked during parafoveal preview in one condition, whereas another condition presented a valid preview of the word. In two additional event-related brain potential (ERP) conditions, the words were presented serially with no parafoveal preview available; in one of the conditions with a fixed timing, in the other word presentation was self-paced by the participants. Expectedly, the valid-preview FRP condition elicited the shortest processing times. Processing times did not differ between the two ERP conditions indicating that "cognitive readiness" during self-paced processing can be ruled out as an alternative explanation for differences in processing times between the ERP and the FRP conditions. The longest processing times were found in the X-mask FRP condition indicating that parafoveal X-masks interfere with foveal word recognition.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The boundary paradigm, in combination with parafoveal masks, is the main technique for studying parafoveal preprocessing during reading. The rationale is that the masks (e.g., strings of X's) prevent parafoveal preprocessing, but do not interfere with foveal processing. A recent study, however, raised doubts about the neutrality of parafoveal masks. In the present study, we explored this issue by means of fixation-related brain potentials (FRPs). Two FRP conditions presented rows of five words. The task of the participant was to judge whether the final word of a list was a "new" word, or whether it was a repeated (i.e., "old") word. The critical manipulation was that the final word was X-masked during parafoveal preview in one condition, whereas another condition presented a valid preview of the word. In two additional event-related brain potential (ERP) conditions, the words were presented serially with no parafoveal preview available; in one of the conditions with a fixed timing, in the other word presentation was self-paced by the participants. Expectedly, the valid-preview FRP condition elicited the shortest processing times. Processing times did not differ between the two ERP conditions indicating that "cognitive readiness" during self-paced processing can be ruled out as an alternative explanation for differences in processing times between the ERP and the FRP conditions. The longest processing times were found in the X-mask FRP condition indicating that parafoveal X-masks interfere with foveal word recognition. |
Akiko Ikkai; Sangita Dandekar; Clayton E Curtis Lateralization in alpha-band oscillations predicts the locus and spatial distribution of attention Journal Article PLoS ONE, 11 (5), pp. e0154796, 2016. @article{Ikkai2016, title = {Lateralization in alpha-band oscillations predicts the locus and spatial distribution of attention}, author = {Akiko Ikkai and Sangita Dandekar and Clayton E Curtis}, doi = {10.1371/journal.pone.0154796}, year = {2016}, date = {2016-01-01}, journal = {PLoS ONE}, volume = {11}, number = {5}, pages = {e0154796}, abstract = {Attending to a task-relevant location changes how neural activity oscillates in the alpha band (8-13Hz) in posterior visual cortical areas. However, a clear understanding of the relationships between top-down attention, changes in alpha oscillations in visual cortex, and attention performance are still poorly understood. Here, we tested the degree to which the posterior alpha power tracked the locus of attention, the distribution of attention, and how well the topography of alpha could predict the locus of attention. We recorded magnetoencephalographic (MEG) data while subjects performed an attention demanding visual discrimination task that dissociated the direction of attention from the direction of a saccade to indicate choice. On some trials, an endogenous cue predicted the target's location, while on others it contained no spatial information. When the target's location was cued, alpha power decreased in sensors over occipital cortex contralateral to the attended visual field. When the cue did not predict the target's location, alpha power again decreased in sensors over occipital cortex, but bilaterally, and increased in sensors over frontal cortex. Thus, the distribution and the topography of alpha reliably indicated the locus of covert attention. Together, these results suggest that alpha synchronization reflects changes in the excitability of populations of neurons whose receptive fields match the locus of attention. This is consistent with the hypothesis that alpha oscillations reflect the neural mechanisms by which top-down control of attention biases information processing and modulate the activity of neurons in visual cortex.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attending to a task-relevant location changes how neural activity oscillates in the alpha band (8-13Hz) in posterior visual cortical areas. However, a clear understanding of the relationships between top-down attention, changes in alpha oscillations in visual cortex, and attention performance are still poorly understood. Here, we tested the degree to which the posterior alpha power tracked the locus of attention, the distribution of attention, and how well the topography of alpha could predict the locus of attention. We recorded magnetoencephalographic (MEG) data while subjects performed an attention demanding visual discrimination task that dissociated the direction of attention from the direction of a saccade to indicate choice. On some trials, an endogenous cue predicted the target's location, while on others it contained no spatial information. When the target's location was cued, alpha power decreased in sensors over occipital cortex contralateral to the attended visual field. When the cue did not predict the target's location, alpha power again decreased in sensors over occipital cortex, but bilaterally, and increased in sensors over frontal cortex. Thus, the distribution and the topography of alpha reliably indicated the locus of covert attention. Together, these results suggest that alpha synchronization reflects changes in the excitability of populations of neurons whose receptive fields match the locus of attention. This is consistent with the hypothesis that alpha oscillations reflect the neural mechanisms by which top-down control of attention biases information processing and modulate the activity of neurons in visual cortex. |
Leyla Isik; Ethan M Meyers; Joel Z Leibo; Tomaso Poggio The dynamics of invariant object recognition in the human visual system Journal Article Journal of Neurophysiology, 111 (1), pp. 91–102, 2014. @article{Isik2014, title = {The dynamics of invariant object recognition in the human visual system}, author = {Leyla Isik and Ethan M Meyers and Joel Z Leibo and Tomaso Poggio}, doi = {10.1152/jn.00394.2013}, year = {2014}, date = {2014-01-01}, journal = {Journal of Neurophysiology}, volume = {111}, number = {1}, pages = {91--102}, abstract = {The human visual system can rapidly recognize objects despite transformations that alter their appearance. The precise timing of when the brain computes neural representations that are invariant to particular transformations, however, has not been mapped in humans. Here we employ magnetoencephalography decoding analysis to measure the dynamics of size- and position-invariant visual information development in the ventral visual stream. With this method we can read out the identity of objects beginning as early as 60 ms. Size- and position-invariant visual information appear around 125 ms and 150 ms, respectively, and both develop in stages, with invariance to smaller transformations arising before invariance to larger transformations. Additionally, the magnetoencephalography sensor activity localizes to neural sources that are in the most posterior occipital regions at the early decoding times and then move temporally as invariant information develops. These results provide previously unknown latencies for key stages of human-invariant object recognition, as well as new and compelling evidence for a feed-forward hierarchical model of invariant object recognition where invariance increases at each successive visual area along the ventral stream.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The human visual system can rapidly recognize objects despite transformations that alter their appearance. The precise timing of when the brain computes neural representations that are invariant to particular transformations, however, has not been mapped in humans. Here we employ magnetoencephalography decoding analysis to measure the dynamics of size- and position-invariant visual information development in the ventral visual stream. With this method we can read out the identity of objects beginning as early as 60 ms. Size- and position-invariant visual information appear around 125 ms and 150 ms, respectively, and both develop in stages, with invariance to smaller transformations arising before invariance to larger transformations. Additionally, the magnetoencephalography sensor activity localizes to neural sources that are in the most posterior occipital regions at the early decoding times and then move temporally as invariant information develops. These results provide previously unknown latencies for key stages of human-invariant object recognition, as well as new and compelling evidence for a feed-forward hierarchical model of invariant object recognition where invariance increases at each successive visual area along the ventral stream. |
Leyla Isik; Jedediah M Singer; Joseph R Madsen; Nancy Kanwisher; Gabriel Kreiman What is changing when: Decoding visual information in movies from human intracranial recordings Journal Article NeuroImage, 180 , pp. 147–159, 2018. @article{Isik2018, title = {What is changing when: Decoding visual information in movies from human intracranial recordings}, author = {Leyla Isik and Jedediah M Singer and Joseph R Madsen and Nancy Kanwisher and Gabriel Kreiman}, doi = {10.1016/j.neuroimage.2017.08.027}, year = {2018}, date = {2018-01-01}, journal = {NeuroImage}, volume = {180}, pages = {147--159}, publisher = {Elsevier Ltd}, abstract = {The majority of visual recognition studies have focused on the neural responses to repeated presentations of static stimuli with abrupt and well-defined onset and offset times. In contrast, natural vision involves unique renderings of visual inputs that are continuously changing without explicitly defined temporal transitions. Here we considered commercial movies as a coarse proxy to natural vision. We recorded intracranial field potential signals from 1,284 electrodes implanted in 15 patients with epilepsy while the subjects passively viewed commercial movies. We could rapidly detect large changes in the visual inputs within approximately 100 ms of their occurrence, using exclusively field potential signals from ventral visual cortical areas including the inferior temporal gyrus and inferior occipital gyrus. Furthermore, we could decode the content of those visual changes even in a single movie presentation, generalizing across the wide range of transformations present in a movie. These results present a methodological framework for studying cognition during dynamic and natural vision.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The majority of visual recognition studies have focused on the neural responses to repeated presentations of static stimuli with abrupt and well-defined onset and offset times. In contrast, natural vision involves unique renderings of visual inputs that are continuously changing without explicitly defined temporal transitions. Here we considered commercial movies as a coarse proxy to natural vision. We recorded intracranial field potential signals from 1,284 electrodes implanted in 15 patients with epilepsy while the subjects passively viewed commercial movies. We could rapidly detect large changes in the visual inputs within approximately 100 ms of their occurrence, using exclusively field potential signals from ventral visual cortical areas including the inferior temporal gyrus and inferior occipital gyrus. Furthermore, we could decode the content of those visual changes even in a single movie presentation, generalizing across the wide range of transformations present in a movie. These results present a methodological framework for studying cognition during dynamic and natural vision. |
Leyla Isik; Anna Mynick; Dimitrios Pantazis; Nancy Kanwisher The speed of human social interaction perception Journal Article NeuroImage, 215 , pp. 1–10, 2020. @article{Isik2020, title = {The speed of human social interaction perception}, author = {Leyla Isik and Anna Mynick and Dimitrios Pantazis and Nancy Kanwisher}, doi = {10.1016/j.neuroimage.2020.116844}, year = {2020}, date = {2020-01-01}, journal = {NeuroImage}, volume = {215}, pages = {1--10}, publisher = {The Authors}, abstract = {The ability to perceive others' social interactions, here defined as the directed contingent actions between two or more people, is a fundamental part of human experience that develops early in infancy and is shared with other primates. However, the neural computations underlying this ability remain largely unknown. Is social interaction recognition a rapid feedforward process or a slower post-perceptual inference? Here we used magnetoencephalography (MEG) decoding to address this question. Subjects in the MEG viewed snapshots of visually matched real-world scenes containing a pair of people who were either engaged in a social interaction or acting independently. The presence versus absence of a social interaction could be read out from subjects' MEG data spontaneously, even while subjects performed an orthogonal task. This readout generalized across different people and scenes, revealing abstract representations of social interactions in the human brain. These representations, however, did not come online until quite late, at 300 ms after image onset, well after feedforward visual processes. In a second experiment, we found that social interaction readout still occurred at this same late latency even when subjects performed an explicit task detecting social interactions. We further showed that MEG responses distinguished between different types of social interactions (mutual gaze vs joint attention) even later, around 500 ms after image onset. Taken together, these results suggest that the human brain spontaneously extracts information about others' social interactions, but does so slowly, likely relying on iterative top-down computations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The ability to perceive others' social interactions, here defined as the directed contingent actions between two or more people, is a fundamental part of human experience that develops early in infancy and is shared with other primates. However, the neural computations underlying this ability remain largely unknown. Is social interaction recognition a rapid feedforward process or a slower post-perceptual inference? Here we used magnetoencephalography (MEG) decoding to address this question. Subjects in the MEG viewed snapshots of visually matched real-world scenes containing a pair of people who were either engaged in a social interaction or acting independently. The presence versus absence of a social interaction could be read out from subjects' MEG data spontaneously, even while subjects performed an orthogonal task. This readout generalized across different people and scenes, revealing abstract representations of social interactions in the human brain. These representations, however, did not come online until quite late, at 300 ms after image onset, well after feedforward visual processes. In a second experiment, we found that social interaction readout still occurred at this same late latency even when subjects performed an explicit task detecting social interactions. We further showed that MEG responses distinguished between different types of social interactions (mutual gaze vs joint attention) even later, around 500 ms after image onset. Taken together, these results suggest that the human brain spontaneously extracts information about others' social interactions, but does so slowly, likely relying on iterative top-down computations. |
Roxane J Itier; Karly N Neath-Tavares Effects of task demands on the early neural processing of fearful and happy facial expressions Journal Article Brain Research, 1663 , pp. 38–50, 2017. @article{Itier2017, title = {Effects of task demands on the early neural processing of fearful and happy facial expressions}, author = {Roxane J Itier and Karly N Neath-Tavares}, doi = {10.1016/j.brainres.2017.03.013}, year = {2017}, date = {2017-01-01}, journal = {Brain Research}, volume = {1663}, pages = {38--50}, publisher = {Elsevier B.V.}, abstract = {Task demands shape how we process environmental stimuli but their impact on the early neural processing of facial expressions remains unclear. In a within-subject design, ERPs were recorded to the same fearful, happy and neutral facial expressions presented during a gender discrimination, an explicit emotion discrimination and an oddball detection tasks, the most studied tasks in the field. Using an eye tracker, fixation on the face nose was enforced using a gaze-contingent presentation. Task demands modulated amplitudes from 200 to 350 ms at occipito-temporal sites spanning the EPN component. Amplitudes were more negative for fearful than neutral expressions starting on N170 from 150 to 350 ms, with a temporo-occipital distribution, whereas no clear effect of happy expressions was seen. Task and emotion effects never interacted in any time window or for the ERP components analyzed (P1, N170, EPN). Thus, whether emotion is explicitly discriminated or irrelevant for the task at hand, neural correlates of fearful and happy facial expressions seem immune to these task demands during the first 350 ms of visual processing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Task demands shape how we process environmental stimuli but their impact on the early neural processing of facial expressions remains unclear. In a within-subject design, ERPs were recorded to the same fearful, happy and neutral facial expressions presented during a gender discrimination, an explicit emotion discrimination and an oddball detection tasks, the most studied tasks in the field. Using an eye tracker, fixation on the face nose was enforced using a gaze-contingent presentation. Task demands modulated amplitudes from 200 to 350 ms at occipito-temporal sites spanning the EPN component. Amplitudes were more negative for fearful than neutral expressions starting on N170 from 150 to 350 ms, with a temporo-occipital distribution, whereas no clear effect of happy expressions was seen. Task and emotion effects never interacted in any time window or for the ERP components analyzed (P1, N170, EPN). Thus, whether emotion is explicitly discriminated or irrelevant for the task at hand, neural correlates of fearful and happy facial expressions seem immune to these task demands during the first 350 ms of visual processing. |
Roxane J Itier; Frank F Preston Increased early sensitivity to eyes in mouthless faces: In support of the LIFTED model of early face processing Journal Article Brain Topography, 31 (6), pp. 972–984, 2018. @article{Itier2018, title = {Increased early sensitivity to eyes in mouthless faces: In support of the LIFTED model of early face processing}, author = {Roxane J Itier and Frank F Preston}, doi = {10.1007/s10548-018-0663-6}, year = {2018}, date = {2018-01-01}, journal = {Brain Topography}, volume = {31}, number = {6}, pages = {972--984}, publisher = {Springer US}, abstract = {The N170 ERP component is a central neural marker of early face perception usually thought to reflect holistic processing. However, it is also highly sensitive to eyes presented in isolation and to fixation on the eyes within a full face. The lateral inhibition face template and eye detector (LIFTED) model (Nemrodov et al. in NeuroImage 97:81–94, 2014) integrates these views by proposing a neural inhibition mechanism that perceptually glues features into a whole, in parallel to the activ- ity of an eye detector that accounts for the eye sensitivity. The LIFTED model was derived from a large number of results obtained with intact and eyeless faces presented upright and inverted. The present study provided a control condition to the original design by replacing eyeless with mouthless faces, hereby enabling testing of specific predictions derived from the model. Using the same gaze-contingent approach, we replicated the N170 eye sensitivity regardless of face orientation. Furthermore, when eyes were fixated in upright faces, the N170 was larger for mouthless compared to intact faces, while inverted mouthless faces elicited smaller amplitude than intact inverted faces when fixation was on the mouth and nose. The results are largely in line with the LIFTED model, in particular with the idea of an inhibition mechanism involved in holistic processing of upright faces and the lack of such inhibition in processing inverted faces. Some modifications to the original model are also proposed based on these results.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The N170 ERP component is a central neural marker of early face perception usually thought to reflect holistic processing. However, it is also highly sensitive to eyes presented in isolation and to fixation on the eyes within a full face. The lateral inhibition face template and eye detector (LIFTED) model (Nemrodov et al. in NeuroImage 97:81–94, 2014) integrates these views by proposing a neural inhibition mechanism that perceptually glues features into a whole, in parallel to the activ- ity of an eye detector that accounts for the eye sensitivity. The LIFTED model was derived from a large number of results obtained with intact and eyeless faces presented upright and inverted. The present study provided a control condition to the original design by replacing eyeless with mouthless faces, hereby enabling testing of specific predictions derived from the model. Using the same gaze-contingent approach, we replicated the N170 eye sensitivity regardless of face orientation. Furthermore, when eyes were fixated in upright faces, the N170 was larger for mouthless compared to intact faces, while inverted mouthless faces elicited smaller amplitude than intact inverted faces when fixation was on the mouth and nose. The results are largely in line with the LIFTED model, in particular with the idea of an inhibition mechanism involved in holistic processing of upright faces and the lack of such inhibition in processing inverted faces. Some modifications to the original model are also proposed based on these results. |
Syaheed B Jabar; Alex Filipowicz; Britt Anderson Tuned by experience: How orientation probability modulates early perceptual processing Journal Article Vision Research, 138 , pp. 86–96, 2017. @article{Jabar2017a, title = {Tuned by experience: How orientation probability modulates early perceptual processing}, author = {Syaheed B Jabar and Alex Filipowicz and Britt Anderson}, doi = {10.1016/j.visres.2017.07.008}, year = {2017}, date = {2017-01-01}, journal = {Vision Research}, volume = {138}, pages = {86--96}, publisher = {Elsevier Ltd}, abstract = {Probable stimuli are more often and more quickly detected. While stimulus probability is known to affect decision-making, it can also be explained as a perceptual phenomenon. Using spatial gratings, we have previously shown that probable orientations are also more precisely estimated, even while participants remained naive to the manipulation. We conducted an electrophysiological study to investigate the effect that probability has on perception and visual-evoked potentials. In line with previous studies on oddballs and stimulus prevalence, low-probability orientations were associated with a greater late positive ‘P300' component which might be related to either surprise or decision-making. However, the early ‘C1' component, thought to reflect V1 processing, was dampened for high-probability orientations while later P1 and N1 components were unaffected. Exploratory analyses revealed a participant-level correlation between C1 and P300 amplitudes, suggesting a link between perceptual processing and decision-making. We discuss how these probability effects could be indicative of sharpening of neurons preferring the probable orientations, due either to perceptual learning, or to feature-based attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Probable stimuli are more often and more quickly detected. While stimulus probability is known to affect decision-making, it can also be explained as a perceptual phenomenon. Using spatial gratings, we have previously shown that probable orientations are also more precisely estimated, even while participants remained naive to the manipulation. We conducted an electrophysiological study to investigate the effect that probability has on perception and visual-evoked potentials. In line with previous studies on oddballs and stimulus prevalence, low-probability orientations were associated with a greater late positive ‘P300' component which might be related to either surprise or decision-making. However, the early ‘C1' component, thought to reflect V1 processing, was dampened for high-probability orientations while later P1 and N1 components were unaffected. Exploratory analyses revealed a participant-level correlation between C1 and P300 amplitudes, suggesting a link between perceptual processing and decision-making. We discuss how these probability effects could be indicative of sharpening of neurons preferring the probable orientations, due either to perceptual learning, or to feature-based attention. |
Robert Jagiello; Ulrich Pomper; Makoto Yoneya; Sijia Zhao; Maria Chait Rapid brain sesponses to familiar vs. unfamiliar music-an EEG and pupillometry study Journal Article Scientific Reports, 9 , pp. 15570, 2019. @article{Jagiello2019, title = {Rapid brain sesponses to familiar vs. unfamiliar music-an EEG and pupillometry study}, author = {Robert Jagiello and Ulrich Pomper and Makoto Yoneya and Sijia Zhao and Maria Chait}, doi = {10.1038/s41598-019-51759-9}, year = {2019}, date = {2019-01-01}, journal = {Scientific Reports}, volume = {9}, pages = {15570}, abstract = {Human listeners exhibit marked sensitivity to familiar music, perhaps most readily revealed by popular "name that tune" games, in which listeners often succeed in recognizing a familiar song based on extremely brief presentation. In this work, we used electroencephalography (EEG) and pupillometry to reveal the temporal signatures of the brain processes that allow differentiation between a familiar, well liked, and unfamiliar piece of music. In contrast to previous work, which has quantified gradual changes in pupil diameter (the so-called "pupil dilation response"), here we focus on the occurrence of pupil dilation events. This approach is substantially more sensitive in the temporal domain and allowed us to tap early activity with the putative salience network. Participants (N = 10) passively listened to snippets (750 ms) of a familiar, personally relevant and, an acoustically matched, unfamiliar song, presented in random order. A group of control participants (N = 12), who were unfamiliar with all of the songs, was also tested. We reveal a rapid differentiation between snippets from familiar and unfamiliar songs: Pupil responses showed greater dilation rate to familiar music from 100-300 ms post-stimulus-onset, consistent with a faster activation of the autonomic salience network. Brain responses measured with EEG showed a later differentiation between familiar and unfamiliar music from 350 ms post onset. Remarkably, the cluster pattern identified in the EEG response is very similar to that commonly found in the classic old/new memory retrieval paradigms, suggesting that the recognition of brief, randomly presented, music snippets, draws on similar processes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Human listeners exhibit marked sensitivity to familiar music, perhaps most readily revealed by popular "name that tune" games, in which listeners often succeed in recognizing a familiar song based on extremely brief presentation. In this work, we used electroencephalography (EEG) and pupillometry to reveal the temporal signatures of the brain processes that allow differentiation between a familiar, well liked, and unfamiliar piece of music. In contrast to previous work, which has quantified gradual changes in pupil diameter (the so-called "pupil dilation response"), here we focus on the occurrence of pupil dilation events. This approach is substantially more sensitive in the temporal domain and allowed us to tap early activity with the putative salience network. Participants (N = 10) passively listened to snippets (750 ms) of a familiar, personally relevant and, an acoustically matched, unfamiliar song, presented in random order. A group of control participants (N = 12), who were unfamiliar with all of the songs, was also tested. We reveal a rapid differentiation between snippets from familiar and unfamiliar songs: Pupil responses showed greater dilation rate to familiar music from 100-300 ms post-stimulus-onset, consistent with a faster activation of the autonomic salience network. Brain responses measured with EEG showed a later differentiation between familiar and unfamiliar music from 350 ms post onset. Remarkably, the cluster pattern identified in the EEG response is very similar to that commonly found in the classic old/new memory retrieval paradigms, suggesting that the recognition of brief, randomly presented, music snippets, draws on similar processes. |
David C Jangraw; Jun Wang; Brent J Lance; Shih Fu Chang; Paul Sajda Neurally and ocularly informed graph-based models for searching 3D environments Journal Article Journal of Neural Engineering, 11 (4), pp. 1–13, 2014. @article{Jangraw2014a, title = {Neurally and ocularly informed graph-based models for searching 3D environments}, author = {David C Jangraw and Jun Wang and Brent J Lance and Shih Fu Chang and Paul Sajda}, doi = {10.1088/1741-2560/11/4/046003}, year = {2014}, date = {2014-01-01}, journal = {Journal of Neural Engineering}, volume = {11}, number = {4}, pages = {1--13}, abstract = {OBJECTIVE: As we move through an environment, we are constantly making assessments, judgments and decisions about the things we encounter. Some are acted upon immediately, but many more become mental notes or fleeting impressions-our implicit 'labeling' of the world. In this paper, we use physiological correlates of this labeling to construct a hybrid brain-computer interface (hBCI) system for efficient navigation of a 3D environment. APPROACH: First, we record electroencephalographic (EEG), saccadic and pupillary data from subjects as they move through a small part of a 3D virtual city under free-viewing conditions. Using machine learning, we integrate the neural and ocular signals evoked by the objects they encounter to infer which ones are of subjective interest to them. These inferred labels are propagated through a large computer vision graph of objects in the city, using semi-supervised learning to identify other, unseen objects that are visually similar to the labeled ones. Finally, the system plots an efficient route to help the subjects visit the 'similar' objects it identifies. MAIN RESULTS: We show that by exploiting the subjects' implicit labeling to find objects of interest instead of exploring naively, the median search precision is increased from 25% to 97%, and the median subject need only travel 40% of the distance to see 84% of the objects of interest. We also find that the neural and ocular signals contribute in a complementary fashion to the classifiers' inference of subjects' implicit labeling. SIGNIFICANCE: In summary, we show that neural and ocular signals reflecting subjective assessment of objects in a 3D environment can be used to inform a graph-based learning model of that environment, resulting in an hBCI system that improves navigation and information delivery specific to the user's interests.}, keywords = {}, pubstate = {published}, tppubtype = {article} } OBJECTIVE: As we move through an environment, we are constantly making assessments, judgments and decisions about the things we encounter. Some are acted upon immediately, but many more become mental notes or fleeting impressions-our implicit 'labeling' of the world. In this paper, we use physiological correlates of this labeling to construct a hybrid brain-computer interface (hBCI) system for efficient navigation of a 3D environment. APPROACH: First, we record electroencephalographic (EEG), saccadic and pupillary data from subjects as they move through a small part of a 3D virtual city under free-viewing conditions. Using machine learning, we integrate the neural and ocular signals evoked by the objects they encounter to infer which ones are of subjective interest to them. These inferred labels are propagated through a large computer vision graph of objects in the city, using semi-supervised learning to identify other, unseen objects that are visually similar to the labeled ones. Finally, the system plots an efficient route to help the subjects visit the 'similar' objects it identifies. MAIN RESULTS: We show that by exploiting the subjects' implicit labeling to find objects of interest instead of exploring naively, the median search precision is increased from 25% to 97%, and the median subject need only travel 40% of the distance to see 84% of the objects of interest. We also find that the neural and ocular signals contribute in a complementary fashion to the classifiers' inference of subjects' implicit labeling. SIGNIFICANCE: In summary, we show that neural and ocular signals reflecting subjective assessment of objects in a 3D environment can be used to inform a graph-based learning model of that environment, resulting in an hBCI system that improves navigation and information delivery specific to the user's interests. |
Woojae Jeong; Seolmin Kim; Yee Joon Kim; Joonyeol Lee Motion direction representation in multivariate electroencephalography activity for smooth pursuit eye movements Journal Article NeuroImage, 202 , pp. 1–10, 2019. @article{Jeong2019, title = {Motion direction representation in multivariate electroencephalography activity for smooth pursuit eye movements}, author = {Woojae Jeong and Seolmin Kim and Yee Joon Kim and Joonyeol Lee}, doi = {10.1016/j.neuroimage.2019.116160}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {202}, pages = {1--10}, publisher = {Elsevier Ltd}, abstract = {Visually-guided smooth pursuit eye movements are composed of initial open-loop and later steady-state periods. Feedforward sensory information dominates the motor behavior during the open-loop pursuit, and a more complex feedback loop regulates the steady-state pursuit. To understand the neural representations of motion direction during open-loop and steady-state smooth pursuits, we recorded electroencephalography (EEG) responses from human observers while they tracked random-dot kinematograms as pursuit targets. We estimated population direction tuning curves from multivariate EEG activity using an inverted encoding model. We found significant direction tuning curves as early as about 60 ms from stimulus onset. Direction tuning responses were generalized to later times during the open-loop smooth pursuit, but they became more dynamic during the later steady-state pursuit. The encoding quality of retinal motion direction information estimated from the early direction tuning curves was predictive of trial-by-trial variation in initial pursuit directions. These results suggest that the movement directions of open-loop smooth pursuit are guided by the representation of the retinal motion present in the multivariate EEG activity.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visually-guided smooth pursuit eye movements are composed of initial open-loop and later steady-state periods. Feedforward sensory information dominates the motor behavior during the open-loop pursuit, and a more complex feedback loop regulates the steady-state pursuit. To understand the neural representations of motion direction during open-loop and steady-state smooth pursuits, we recorded electroencephalography (EEG) responses from human observers while they tracked random-dot kinematograms as pursuit targets. We estimated population direction tuning curves from multivariate EEG activity using an inverted encoding model. We found significant direction tuning curves as early as about 60 ms from stimulus onset. Direction tuning responses were generalized to later times during the open-loop smooth pursuit, but they became more dynamic during the later steady-state pursuit. The encoding quality of retinal motion direction information estimated from the early direction tuning curves was predictive of trial-by-trial variation in initial pursuit directions. These results suggest that the movement directions of open-loop smooth pursuit are guided by the representation of the retinal motion present in the multivariate EEG activity. |
Jianrong Jia; Ling Liu; Fang Fang; Huan Luo Sequential sampling of visual objects during sustained attention Journal Article PLoS Biology, 15 (6), pp. e2001903, 2017. @article{Jia2017b, title = {Sequential sampling of visual objects during sustained attention}, author = {Jianrong Jia and Ling Liu and Fang Fang and Huan Luo}, doi = {10.1371/journal.pbio.2001903}, year = {2017}, date = {2017-01-01}, journal = {PLoS Biology}, volume = {15}, number = {6}, pages = {e2001903}, abstract = {In a crowded visual scene, attention must be distributed efficiently and flexibly over time and space to accommodate different contexts. It is well established that selective attention enhances the corresponding neural responses, presumably implying that attention would persistently dwell on the task-relevant item. Meanwhile, recent studies, mostly in divided attentional contexts, suggest that attention does not remain stationary but samples objects alternately over time, suggesting a rhythmic view of attention. However, it remains unknown whether the dynamic mechanism essentially mediates attentional processes at a general level. Importantly, there is also a complete lack of direct neural evidence reflecting whether and how the brain rhythmically samples multiple visual objects during stimulus processing. To address these issues, in this study, we employed electroencephalography (EEG) and a temporal response function (TRF) approach, which can dissociate responses that exclusively represent a single object from the overall neuronal activity, to examine the spatiotemporal characteristics of attention in various attentional contexts. First, attention, which is characterized by inhibitory alpha-band (approximately 10 Hz) activity in TRFs, switches between attended and unattended objects every approximately 200 ms, suggesting a sequential sampling even when attention is required to mostly stay on the attended object. Second, the attentional spatiotemporal pattern is modulated by the task context, such that alpha-mediated switching becomes increasingly prominent as the task requires a more uniform distribution of attention. Finally, the switching pattern correlates with attentional behavioral performance. Our work provides direct neural evidence supporting a generally central role of temporal organization mechanism in attention, such that multiple objects are sequentially sorted according to their priority in attentional contexts. The results suggest that selective attention, in addition to the classically posited attentional “focus,” involves a dynamic mechanism for monitoring all objects outside of the focus. Our findings also suggest that attention implements a space (object)-to-time transformation by acting as a series of concatenating attentional chunks that operate on 1 object at a time.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In a crowded visual scene, attention must be distributed efficiently and flexibly over time and space to accommodate different contexts. It is well established that selective attention enhances the corresponding neural responses, presumably implying that attention would persistently dwell on the task-relevant item. Meanwhile, recent studies, mostly in divided attentional contexts, suggest that attention does not remain stationary but samples objects alternately over time, suggesting a rhythmic view of attention. However, it remains unknown whether the dynamic mechanism essentially mediates attentional processes at a general level. Importantly, there is also a complete lack of direct neural evidence reflecting whether and how the brain rhythmically samples multiple visual objects during stimulus processing. To address these issues, in this study, we employed electroencephalography (EEG) and a temporal response function (TRF) approach, which can dissociate responses that exclusively represent a single object from the overall neuronal activity, to examine the spatiotemporal characteristics of attention in various attentional contexts. First, attention, which is characterized by inhibitory alpha-band (approximately 10 Hz) activity in TRFs, switches between attended and unattended objects every approximately 200 ms, suggesting a sequential sampling even when attention is required to mostly stay on the attended object. Second, the attentional spatiotemporal pattern is modulated by the task context, such that alpha-mediated switching becomes increasingly prominent as the task requires a more uniform distribution of attention. Finally, the switching pattern correlates with attentional behavioral performance. Our work provides direct neural evidence supporting a generally central role of temporal organization mechanism in attention, such that multiple objects are sequentially sorted according to their priority in attentional contexts. The results suggest that selective attention, in addition to the classically posited attentional “focus,” involves a dynamic mechanism for monitoring all objects outside of the focus. Our findings also suggest that attention implements a space (object)-to-time transformation by acting as a series of concatenating attentional chunks that operate on 1 object at a time. |
Jianrong Jia; Fang Fang; Huan Luo Selective spatial attention involves two alpha-band components associated with distinct spatiotemporal and functional characteristics Journal Article NeuroImage, 199 , pp. 228–236, 2019. @article{Jia2019, title = {Selective spatial attention involves two alpha-band components associated with distinct spatiotemporal and functional characteristics}, author = {Jianrong Jia and Fang Fang and Huan Luo}, doi = {10.1016/j.neuroimage.2019.05.079}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {199}, pages = {228--236}, publisher = {Elsevier Ltd}, abstract = {Attention is crucial for efficiently coordinating resources over multiple objects in a visual scene. Recently, a growing number of studies suggest that attention is implemented through a temporal organization process during which resources are dynamically allocated over a multitude of objects, yet the associated neural evidence, particularly in low-level sensory areas, is still limited. Here we used EEG recordings in combination with a temporal response function (TRF) approach to examine the spatiotemporal characteristics of neuronal impulse response in covert selective attention. We demonstrate two distinct alpha-band components – one in post-central parietal area and one in contralateral occipital area – that are involved in coordinating neural representations of attended and unattended stimuli. Specifically, consistent with previous findings, the central alpha-band component showed enhanced activities for unattended versus attended stimuli within the first 200 ms temporal lag of TRF response, suggesting its inhibitory function in attention. In contrast, the contralateral occipital component displayed relatively earlier activation for the attended than unattended one in the TRF response. Furthermore, the central component but not the occipital component was correlated with attentional behavioral performance. Finally, the parietal area exerted directional influences on the occipital activity through alpha-band rhythm. Taken together, spatial attention involves two hierarchically organized alpha-band components that are associated with distinct spatiotemporal characteristics and presumably play different functions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Attention is crucial for efficiently coordinating resources over multiple objects in a visual scene. Recently, a growing number of studies suggest that attention is implemented through a temporal organization process during which resources are dynamically allocated over a multitude of objects, yet the associated neural evidence, particularly in low-level sensory areas, is still limited. Here we used EEG recordings in combination with a temporal response function (TRF) approach to examine the spatiotemporal characteristics of neuronal impulse response in covert selective attention. We demonstrate two distinct alpha-band components – one in post-central parietal area and one in contralateral occipital area – that are involved in coordinating neural representations of attended and unattended stimuli. Specifically, consistent with previous findings, the central alpha-band component showed enhanced activities for unattended versus attended stimuli within the first 200 ms temporal lag of TRF response, suggesting its inhibitory function in attention. In contrast, the contralateral occipital component displayed relatively earlier activation for the attended than unattended one in the TRF response. Furthermore, the central component but not the occipital component was correlated with attentional behavioral performance. Finally, the parietal area exerted directional influences on the occipital activity through alpha-band rhythm. Taken together, spatial attention involves two hierarchically organized alpha-band components that are associated with distinct spatiotemporal characteristics and presumably play different functions. |
Peiqing Jin; Jiajie Zou; Tao Zhou; Nai Ding Eye activity tracks task-relevant structures during speech and auditory sequence perception Journal Article Nature Communications, 9 , pp. 5374, 2018. @article{Jin2018a, title = {Eye activity tracks task-relevant structures during speech and auditory sequence perception}, author = {Peiqing Jin and Jiajie Zou and Tao Zhou and Nai Ding}, doi = {10.1038/s41467-018-07773-y}, year = {2018}, date = {2018-01-01}, journal = {Nature Communications}, volume = {9}, pages = {5374}, publisher = {Springer US}, abstract = {The sensory and motor systems jointly contribute to complex behaviors, but whether motor systems are involved in high-order perceptual tasks such as speech and auditory comprehension remain debated. Here, we show that ocular muscle activity is synchronized to mentally constructed sentences during speech listening, in the absence of any sentence-related visual or prosodic cue. Ocular tracking of sentences is observed in the vertical electrooculogram (EOG), whether the eyes are open or closed, and in eye blinks measured by eyetracking. Critically, the phase of sentence-tracking ocular activity is strongly modulated by temporal attention, i.e., which word in a sentence is attended. Ocular activity also tracks high-level structures in non-linguistic auditory and visual sequences, and captures rapid fluctuations in temporal attention. Ocular tracking of non-visual rhythms possibly reflects global neural entrainment to task-relevant temporal structures across sensory and motor areas, which could serve to implement temporal attention and coordinate cortical networks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The sensory and motor systems jointly contribute to complex behaviors, but whether motor systems are involved in high-order perceptual tasks such as speech and auditory comprehension remain debated. Here, we show that ocular muscle activity is synchronized to mentally constructed sentences during speech listening, in the absence of any sentence-related visual or prosodic cue. Ocular tracking of sentences is observed in the vertical electrooculogram (EOG), whether the eyes are open or closed, and in eye blinks measured by eyetracking. Critically, the phase of sentence-tracking ocular activity is strongly modulated by temporal attention, i.e., which word in a sentence is attended. Ocular activity also tracks high-level structures in non-linguistic auditory and visual sequences, and captures rapid fluctuations in temporal attention. Ocular tracking of non-visual rhythms possibly reflects global neural entrainment to task-relevant temporal structures across sensory and motor areas, which could serve to implement temporal attention and coordinate cortical networks. |
Han-Gue Gue Jo; Thilo Kellermann; Conrad Baumann; Junji Ito; Barbara Schulte Holthausen; Frank Schneider; Sonja Grün; Ute Habel Distinct modes of top-down cognitive processing in the ventral visual cortex Journal Article NeuroImage, 193 , pp. 201–213, 2019. @article{Jo2019, title = {Distinct modes of top-down cognitive processing in the ventral visual cortex}, author = {Han-Gue Gue Jo and Thilo Kellermann and Conrad Baumann and Junji Ito and Barbara {Schulte Holthausen} and Frank Schneider and Sonja Grün and Ute Habel}, doi = {10.1016/j.neuroimage.2019.02.068}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {193}, pages = {201--213}, publisher = {Elsevier Ltd}, abstract = {Top-down cognitive control leads to changes in the sensory processing of the brain. In visual perception such changes can take place in the ventral visual cortex altering the functional asymmetry in forward and backward connections. Here we used fixation-related evoked responses of EEG measurement and dynamic causal modeling to examine hierarchical forward-backward asymmetry, while twenty-six healthy adults performed cognitive tasks that require different types of top-down cognitive control (memorizing or searching visual objects embedded in a natural scene image). The generative model revealed an enhanced asymmetry toward forward connections during memorizing, whereas enhanced backward connections were found during searching. This task-dependent modulation of forward and backward connections suggests two distinct modes of top-down cognitive processing in cortical networks. The alteration in forward-backward asymmetry might underlie the functional role in the cognitive control of visual information processing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Top-down cognitive control leads to changes in the sensory processing of the brain. In visual perception such changes can take place in the ventral visual cortex altering the functional asymmetry in forward and backward connections. Here we used fixation-related evoked responses of EEG measurement and dynamic causal modeling to examine hierarchical forward-backward asymmetry, while twenty-six healthy adults performed cognitive tasks that require different types of top-down cognitive control (memorizing or searching visual objects embedded in a natural scene image). The generative model revealed an enhanced asymmetry toward forward connections during memorizing, whereas enhanced backward connections were found during searching. This task-dependent modulation of forward and backward connections suggests two distinct modes of top-down cognitive processing in cortical networks. The alteration in forward-backward asymmetry might underlie the functional role in the cognitive control of visual information processing. |
Elizabeth L Johnson; Callum D Dewar; Anne Kristin Solbakk; Tor Endestad; Torstein R Meling; Robert T Knight Bidirectional frontoparietal oscillatory systems support working memory Journal Article Current Biology, 27 (12), pp. 1829–1835, 2017. @article{Johnson2017ab, title = {Bidirectional frontoparietal oscillatory systems support working memory}, author = {Elizabeth L Johnson and Callum D Dewar and Anne Kristin Solbakk and Tor Endestad and Torstein R Meling and Robert T Knight}, doi = {10.1016/j.cub.2017.05.046}, year = {2017}, date = {2017-01-01}, journal = {Current Biology}, volume = {27}, number = {12}, pages = {1829--1835}, publisher = {Elsevier Ltd.}, abstract = {The ability to represent and select information in working memory provides the neurobiological infrastructure for human cognition. For 80 years, dominant views of working memory have focused on the key role of prefrontal cortex (PFC) [1–8]. However, more recent work has implicated posterior cortical regions [9–12], suggesting that PFC engagement during working memory is dependent on the degree of executive demand. We provide evidence from neurological patients with discrete PFC damage that challenges the dominant models attributing working memory to PFC-dependent systems. We show that neural oscillations, which provide a mechanism for PFC to communicate with posterior cortical regions [13], independently subserve communications both to and from PFC—uncovering parallel oscillatory mechanisms for working memory. Fourteen PFC patients and 20 healthy, age-matched controls performed a working memory task where they encoded, maintained, and actively processed information about pairs of common shapes. In controls, the electroencephalogram (EEG) exhibited oscillatory activity in the low-theta range over PFC and directional connectivity from PFC to parieto-occipital regions commensurate with executive processing demands. Concurrent alpha-beta oscillations were observed over parieto-occipital regions, with directional connectivity from parieto-occipital regions to PFC, regardless of processing demands. Accuracy, PFC low-theta activity, and PFC → parieto-occipital connectivity were attenuated in patients, revealing a PFC-independent, alpha-beta system. The PFC patients still demonstrated task proficiency, which indicates that the posterior alpha-beta system provides sufficient resources for working memory. Taken together, our findings reveal neurologically dissociable PFC and parieto-occipital systems and suggest that parallel, bidirectional oscillatory systems form the basis of working memory.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The ability to represent and select information in working memory provides the neurobiological infrastructure for human cognition. For 80 years, dominant views of working memory have focused on the key role of prefrontal cortex (PFC) [1–8]. However, more recent work has implicated posterior cortical regions [9–12], suggesting that PFC engagement during working memory is dependent on the degree of executive demand. We provide evidence from neurological patients with discrete PFC damage that challenges the dominant models attributing working memory to PFC-dependent systems. We show that neural oscillations, which provide a mechanism for PFC to communicate with posterior cortical regions [13], independently subserve communications both to and from PFC—uncovering parallel oscillatory mechanisms for working memory. Fourteen PFC patients and 20 healthy, age-matched controls performed a working memory task where they encoded, maintained, and actively processed information about pairs of common shapes. In controls, the electroencephalogram (EEG) exhibited oscillatory activity in the low-theta range over PFC and directional connectivity from PFC to parieto-occipital regions commensurate with executive processing demands. Concurrent alpha-beta oscillations were observed over parieto-occipital regions, with directional connectivity from parieto-occipital regions to PFC, regardless of processing demands. Accuracy, PFC low-theta activity, and PFC → parieto-occipital connectivity were attenuated in patients, revealing a PFC-independent, alpha-beta system. The PFC patients still demonstrated task proficiency, which indicates that the posterior alpha-beta system provides sufficient resources for working memory. Taken together, our findings reveal neurologically dissociable PFC and parieto-occipital systems and suggest that parallel, bidirectional oscillatory systems form the basis of working memory. |