David Aagten-Murphy; Paul M Bays Independent working memory resources for egocentric and allocentric spatial information Journal Article PLOS Computational Biology, 15 (2), pp. e1006563, 2019. @article{AagtenMurphy2019, title = {Independent working memory resources for egocentric and allocentric spatial information}, author = {David Aagten-Murphy and Paul M Bays}, editor = {Erie Boorman}, doi = {10.1371/journal.pcbi.1006563}, year = {2019}, date = {2019-02-01}, journal = {PLOS Computational Biology}, volume = {15}, number = {2}, pages = {e1006563}, abstract = {Visuospatial working memory enables us to maintain access to visual information for processing even when a stimulus is no longer present, due to occlusion, our own movements, or transience of the stimulus. Here we show that, when localizing remembered stimuli, the precision of spatial recall does not rely solely on memory for individual stimuli, but additionally depends on the relative distances between stimuli and visual landmarks in the surroundings. Across three separate experiments, we consistently observed a spatially selective improvement in the precision of recall for items located near a persistent landmark. While the results did not require that the landmark be visible throughout the memory delay period, it was essential that it was visible both during encoding and response. We present a simple model that can accurately capture human performance by considering relative (allocentric) spatial information as an independent localization estimate which degrades with distance and is optimally integrated with egocentric spatial information. Critically, allocentric information was encoded without cost to egocentric estimation, demonstrating independent storage of the two sources of information. Finally, when egocentric and allocentric estimates were put in conflict, the model successfully predicted the resulting localization errors. We suggest that the relative distance between stimuli represents an additional, independent spatial cue for memory recall. This cue information is likely to be critical for spatial localization in natural settings which contain an abundance of visual landmarks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visuospatial working memory enables us to maintain access to visual information for processing even when a stimulus is no longer present, due to occlusion, our own movements, or transience of the stimulus. Here we show that, when localizing remembered stimuli, the precision of spatial recall does not rely solely on memory for individual stimuli, but additionally depends on the relative distances between stimuli and visual landmarks in the surroundings. Across three separate experiments, we consistently observed a spatially selective improvement in the precision of recall for items located near a persistent landmark. While the results did not require that the landmark be visible throughout the memory delay period, it was essential that it was visible both during encoding and response. We present a simple model that can accurately capture human performance by considering relative (allocentric) spatial information as an independent localization estimate which degrades with distance and is optimally integrated with egocentric spatial information. Critically, allocentric information was encoded without cost to egocentric estimation, demonstrating independent storage of the two sources of information. Finally, when egocentric and allocentric estimates were put in conflict, the model successfully predicted the resulting localization errors. We suggest that the relative distance between stimuli represents an additional, independent spatial cue for memory recall. This cue information is likely to be critical for spatial localization in natural settings which contain an abundance of visual landmarks. |
Annika Åkerfelt; Hans Colonius; Adele Diederich Visual-tactile saccadic inhibition Journal Article Experimental Brain Research, 169 (4), pp. 554–563, 2006. @article{Aakerfelt2006, title = {Visual-tactile saccadic inhibition}, author = {Annika Åkerfelt and Hans Colonius and Adele Diederich}, doi = {10.1007/s00221-005-0168-x}, year = {2006}, date = {2006-01-01}, journal = {Experimental Brain Research}, volume = {169}, number = {4}, pages = {554--563}, abstract = {In an eye movement countermanding paradigm it is demonstrated for the first time that a tactile stimulus can be an effective stop signal when human participants are to inhibit saccades to a visual target. Estimated stop signal processing times were 90-140 ms, comparable to results with auditory stop signals, but shorter than those commonly found for manual responses. Two of the three participants significantly slowed their reactions in expectation of the stop signal as revealed by a control experiment without stop signals. All participants produced slower responses in the shortest stop signal delay condition than predicted by the race model (Logan and Cowan 1984) along with hypometric saccades on stop failure trials, suggesting that the race model may need to be elaborated to include some component of interaction of stop and go signal processing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In an eye movement countermanding paradigm it is demonstrated for the first time that a tactile stimulus can be an effective stop signal when human participants are to inhibit saccades to a visual target. Estimated stop signal processing times were 90-140 ms, comparable to results with auditory stop signals, but shorter than those commonly found for manual responses. Two of the three participants significantly slowed their reactions in expectation of the stop signal as revealed by a control experiment without stop signals. All participants produced slower responses in the shortest stop signal delay condition than predicted by the race model (Logan and Cowan 1984) along with hypometric saccades on stop failure trials, suggesting that the race model may need to be elaborated to include some component of interaction of stop and go signal processing. |
Bahman Abdi Sargezeh; Ahmad Ayatollahi; Mohammad Reza Daliri Investigation of eye movement pattern parameters of individuals with different fluid intelligence Journal Article Experimental Brain Research, 237 , pp. 15–28, 2019. @article{AbdiSargezeh2019, title = {Investigation of eye movement pattern parameters of individuals with different fluid intelligence}, author = {Bahman {Abdi Sargezeh} and Ahmad Ayatollahi and Mohammad Reza Daliri}, doi = {10.1007/s00221-018-5392-2}, year = {2019}, date = {2019-01-01}, journal = {Experimental Brain Research}, volume = {237}, pages = {15--28}, publisher = {Springer Berlin Heidelberg}, abstract = {Eye movement studies are subject of interest in human cognition. Cortical activity and cognitive load impress eye movement influentially. Here, we investigated whether fluid intelligence (FI) has any effect on eye movement pattern in a comparative visual search (CVS) task. FI of individuals was measured using the Cattell test, and participants were divided into three groups: low FI, middle FI, and high FI. Eye movements of individuals were then recorded during the CVS task. Eye movement patterns were extracted and compared statistically among the three groups. Our experiment demonstrated that eye movement patterns were significantly different among the three groups. Pearson correlation coefficients between FI and eye movement parameters were also calculated to assess which of the eye movement parameters were most affected by FI. Our findings illustrate that saccade peak velocity had the greatest positive correlation with FI score and the ratio of total fixation duration to total saccade duration had the greatest negative correlation with FI. Next, we extracted 24 features from eye movement patterns and designed: (1) a classifier to categorize individuals and (2) a regression analysis to predict the FI score of individuals. In the best case examined, the classifier categorized subjects with 68.3% accuracy, and the regression predicted FI of individuals with a 0.54 correlation between observed FI and predicted FI. In our investigation, the results have emphasized that imposed loads on low FI individuals is greater than that of high FI individuals in the cognitive load tasks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Eye movement studies are subject of interest in human cognition. Cortical activity and cognitive load impress eye movement influentially. Here, we investigated whether fluid intelligence (FI) has any effect on eye movement pattern in a comparative visual search (CVS) task. FI of individuals was measured using the Cattell test, and participants were divided into three groups: low FI, middle FI, and high FI. Eye movements of individuals were then recorded during the CVS task. Eye movement patterns were extracted and compared statistically among the three groups. Our experiment demonstrated that eye movement patterns were significantly different among the three groups. Pearson correlation coefficients between FI and eye movement parameters were also calculated to assess which of the eye movement parameters were most affected by FI. Our findings illustrate that saccade peak velocity had the greatest positive correlation with FI score and the ratio of total fixation duration to total saccade duration had the greatest negative correlation with FI. Next, we extracted 24 features from eye movement patterns and designed: (1) a classifier to categorize individuals and (2) a regression analysis to predict the FI score of individuals. In the best case examined, the classifier categorized subjects with 68.3% accuracy, and the regression predicted FI of individuals with a 0.54 correlation between observed FI and predicted FI. In our investigation, the results have emphasized that imposed loads on low FI individuals is greater than that of high FI individuals in the cognitive load tasks. |
Bahman Abdi Sargezeh; Niloofar Tavakoli; Mohammad Reza Daliri Gender-based eye movement differences in passive indoor picture viewing: An eye-tracking study Journal Article Physiology and Behavior, 206 , pp. 43–50, 2019. @article{AbdiSargezeh2019a, title = {Gender-based eye movement differences in passive indoor picture viewing: An eye-tracking study}, author = {Bahman {Abdi Sargezeh} and Niloofar Tavakoli and Mohammad Reza Daliri}, doi = {10.1016/j.physbeh.2019.03.023}, year = {2019}, date = {2019-01-01}, journal = {Physiology and Behavior}, volume = {206}, pages = {43--50}, abstract = {Male and female brains have different structures, which can make genders produce various eye- movement patterns. This study presents the results of an eye tracking experiment in which we analyzed the eye movements of 25 male and 20 female participants during passive indoor picture viewing. We examined eye-movement parameters, namely fixation duration, scan path length, number of saccades, spatial density, saccade amplitude, and the ratio of total fixation duration to total saccade duration so as to investigate gender differences in eye-movement patterns while indoor picture viewing. We found significant differences in eye-movement patterns between genders. The results of eye-movement analysis also indicated that females showed more explorative gaze behavior, indicated by larger saccade amplitudes, and by longer scan paths. Furthermore, owing to shorter ratio of fixation durations to saccade duration in females as compared to male, we speculate that females inspect the images faster than males. In addition, we classified the genders into two subgroups—males and females—based on their eye-movement parameters by using a support vector machine classifier achieving an accuracy of 70%. We have come to the result males and females – with same culture – see the environment differently. Our findings have profound implications for researches employing gaze-based models.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Male and female brains have different structures, which can make genders produce various eye- movement patterns. This study presents the results of an eye tracking experiment in which we analyzed the eye movements of 25 male and 20 female participants during passive indoor picture viewing. We examined eye-movement parameters, namely fixation duration, scan path length, number of saccades, spatial density, saccade amplitude, and the ratio of total fixation duration to total saccade duration so as to investigate gender differences in eye-movement patterns while indoor picture viewing. We found significant differences in eye-movement patterns between genders. The results of eye-movement analysis also indicated that females showed more explorative gaze behavior, indicated by larger saccade amplitudes, and by longer scan paths. Furthermore, owing to shorter ratio of fixation durations to saccade duration in females as compared to male, we speculate that females inspect the images faster than males. In addition, we classified the genders into two subgroups—males and females—based on their eye-movement parameters by using a support vector machine classifier achieving an accuracy of 70%. We have come to the result males and females – with same culture – see the environment differently. Our findings have profound implications for researches employing gaze-based models. |
Parisa Abedi Khoozani; Gunnar Blohm Neck muscle spindle noise biases reaches in a multisensory integration task Journal Article Journal of Neurophysiology, 120 (3), pp. 893–909, 2018. @article{AbediKhoozani2018, title = {Neck muscle spindle noise biases reaches in a multisensory integration task}, author = {Parisa {Abedi Khoozani} and Gunnar Blohm}, doi = {10.1152/jn.00643.2017}, year = {2018}, date = {2018-01-01}, journal = {Journal of Neurophysiology}, volume = {120}, number = {3}, pages = {893--909}, abstract = {Reference frame Transformations (RFTs) are crucial components of sensorimotor transformations in the brain. Stochasticity in RFTs has been suggested to add noise to the transformed signal due to variability in transformation parameter estimates (e.g. angle) as well as the stochastic nature of computations in spiking networks of neurons. Here, we varied the RFT angle together with the associated variability and evaluated the behavioral impact in a reaching task that required variability-dependent visual-proprioceptive multi-sensory integration. Crucially, reaches were performed with the head either straight or rolled 30deg to either shoulder and we also applied neck loads of 0 or 1.8kg (left or right) in a 3x3 design, resulting in different combinations of estimated head roll angle magnitude and variance required in RFTs. A novel 3D stochastic model of multi-sensory integration across reference frames was fitted to the data and captured our main behavioral findings: (1) neck load biased head angle estimation across all head roll orientations resulting in systematic shifts in reach errors; (2) Increased neck muscle tone led to increased reach variability, due to signal-dependent noise; (3) both head roll and neck load created larger angular errors in reaches to visual targets away from the body compared to reaches toward the body. These results show that noise in muscle spindles and stochasticity in general have a tangible effect on RFTs underlying reach planning. Since RFTs are omnipresent in the brain, our results could have implication for processes as diverse as motor control, decision making, posture / balance control, and perception.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Reference frame Transformations (RFTs) are crucial components of sensorimotor transformations in the brain. Stochasticity in RFTs has been suggested to add noise to the transformed signal due to variability in transformation parameter estimates (e.g. angle) as well as the stochastic nature of computations in spiking networks of neurons. Here, we varied the RFT angle together with the associated variability and evaluated the behavioral impact in a reaching task that required variability-dependent visual-proprioceptive multi-sensory integration. Crucially, reaches were performed with the head either straight or rolled 30deg to either shoulder and we also applied neck loads of 0 or 1.8kg (left or right) in a 3x3 design, resulting in different combinations of estimated head roll angle magnitude and variance required in RFTs. A novel 3D stochastic model of multi-sensory integration across reference frames was fitted to the data and captured our main behavioral findings: (1) neck load biased head angle estimation across all head roll orientations resulting in systematic shifts in reach errors; (2) Increased neck muscle tone led to increased reach variability, due to signal-dependent noise; (3) both head roll and neck load created larger angular errors in reaches to visual targets away from the body compared to reaches toward the body. These results show that noise in muscle spindles and stochasticity in general have a tangible effect on RFTs underlying reach planning. Since RFTs are omnipresent in the brain, our results could have implication for processes as diverse as motor control, decision making, posture / balance control, and perception. |
Mathias Abegg; Hyung Lee; Jason J S Barton Systematic diagonal and vertical errors in antisaccades and memory-guided saccades Journal Article Journal of Eye Movement Research, 3 (3), pp. 1–10, 2010. @article{Abegg2010, title = {Systematic diagonal and vertical errors in antisaccades and memory-guided saccades}, author = {Mathias Abegg and Hyung Lee and Jason J S Barton}, doi = {10.16910/jemr.3.3.5}, year = {2010}, date = {2010-01-01}, journal = {Journal of Eye Movement Research}, volume = {3}, number = {3}, pages = {1--10}, abstract = {Studies of memory-guided saccades in monkeys show an upward bias, while studies of antisaccades in humans show a diagonal effect, a deviation of endpoints toward the 45° diagonal. To determine if these two different spatial biases are specific to different types of saccades, we studied prosaccades, antisaccades and memory-guided saccades in humans. The diagonal effect occurred not with prosaccades but with antisaccades and memory-guided saccades with long intervals, consistent with hypotheses that it originates in computations of goal location under conditions of uncertainty. There was a small upward bias for memory-guided saccades but not prosaccades or antisaccades. Thus this bias is not a general effect of target uncertainty but a property specific to memory-guided saccades.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Studies of memory-guided saccades in monkeys show an upward bias, while studies of antisaccades in humans show a diagonal effect, a deviation of endpoints toward the 45° diagonal. To determine if these two different spatial biases are specific to different types of saccades, we studied prosaccades, antisaccades and memory-guided saccades in humans. The diagonal effect occurred not with prosaccades but with antisaccades and memory-guided saccades with long intervals, consistent with hypotheses that it originates in computations of goal location under conditions of uncertainty. There was a small upward bias for memory-guided saccades but not prosaccades or antisaccades. Thus this bias is not a general effect of target uncertainty but a property specific to memory-guided saccades. |
Mathias Abegg; Amadeo R Rodriguez; Hyung Lee; Jason J S Barton ‘Alternate-goal bias' in antisaccades and the influence of expectation Journal Article Experimental Brain Research, 203 (3), pp. 553–562, 2010. @article{Abegg2010a, title = {‘Alternate-goal bias' in antisaccades and the influence of expectation}, author = {Mathias Abegg and Amadeo R Rodriguez and Hyung Lee and Jason J S Barton}, doi = {10.1007/s00221-010-2259-6}, year = {2010}, date = {2010-01-01}, journal = {Experimental Brain Research}, volume = {203}, number = {3}, pages = {553--562}, abstract = {Saccadic performance depends on the requirements of the current trial, but also may be influenced by other trials in the same experiment. This effect of trial context has been investigated most for saccadic error rate and reaction time but seldom for the positional accuracy of saccadic landing points. We investigated whether the direction of saccades towards one goal is affected by the location of a second goal used in other trials in the same experimental block. In our first experiment, landing points ('endpoints') of antisaccades but not prosaccades were shifted towards the location of the alternate goal. This spatial bias decreased with increasing angular separation between the current and alternative goals. In a second experiment, we explored whether expectancy about the goal location was responsible for the biasing of the saccadic endpoint. For this, we used a condition where the saccadic goal randomly changed from one trial to the next between locations on, above or below the horizontal meridian. We modulated the prior probability of the alternate-goal location by showing cues prior to stimulus onset. The results showed that expectation about the possible positions of the saccadic goal is sufficient to bias saccadic endpoints and can account for at least part of this phenomenon of 'alternate-goal bias'.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Saccadic performance depends on the requirements of the current trial, but also may be influenced by other trials in the same experiment. This effect of trial context has been investigated most for saccadic error rate and reaction time but seldom for the positional accuracy of saccadic landing points. We investigated whether the direction of saccades towards one goal is affected by the location of a second goal used in other trials in the same experimental block. In our first experiment, landing points ('endpoints') of antisaccades but not prosaccades were shifted towards the location of the alternate goal. This spatial bias decreased with increasing angular separation between the current and alternative goals. In a second experiment, we explored whether expectancy about the goal location was responsible for the biasing of the saccadic endpoint. For this, we used a condition where the saccadic goal randomly changed from one trial to the next between locations on, above or below the horizontal meridian. We modulated the prior probability of the alternate-goal location by showing cues prior to stimulus onset. The results showed that expectation about the possible positions of the saccadic goal is sufficient to bias saccadic endpoints and can account for at least part of this phenomenon of 'alternate-goal bias'. |
Mathias Abegg; Dara S Manoach; Jason J S Barton Knowing the future: Partial foreknowledge effects on the programming of prosaccades and antisaccades Journal Article Vision Research, 51 (1), pp. 215–221, 2011. @article{Abegg2011, title = {Knowing the future: Partial foreknowledge effects on the programming of prosaccades and antisaccades}, author = {Mathias Abegg and Dara S Manoach and Jason J S Barton}, doi = {10.1016/j.visres.2010.11.006}, year = {2011}, date = {2011-01-01}, journal = {Vision Research}, volume = {51}, number = {1}, pages = {215--221}, abstract = {Foreknowledge about the demands of an upcoming trial may be exploited to optimize behavioural responses. In the current study we systematically investigated the benefits of partial foreknowledge - that is, when some but not all aspects of a future trial are known in advance. For this we used an ocular motor paradigm with horizontal prosaccades and antisaccades. Predictable sequences were used to create three partial foreknowledge conditions: one with foreknowledge about the stimulus location only, one with foreknowledge about the task set only, and one with foreknowledge about the direction of the required response only. These were contrasted with a condition of no-foreknowledge and a condition of complete foreknowledge about all three parameters. The results showed that the three types of foreknowledge affected saccadic efficiency differently. While foreknowledge about stimulus-location had no effect on efficiency, task foreknowledge had some effect and response-foreknowledge was as effective as complete foreknowledge. Foreknowledge effects on switch costs followed a similar pattern in general, but were not specific for switching of the trial attribute for which foreknowledge was available. We conclude that partial foreknowledge has a differential effect on efficiency, most consistent with preparatory activation of a motor schema in advance of the stimulus, with consequent benefits for both switched and repeated trials.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Foreknowledge about the demands of an upcoming trial may be exploited to optimize behavioural responses. In the current study we systematically investigated the benefits of partial foreknowledge - that is, when some but not all aspects of a future trial are known in advance. For this we used an ocular motor paradigm with horizontal prosaccades and antisaccades. Predictable sequences were used to create three partial foreknowledge conditions: one with foreknowledge about the stimulus location only, one with foreknowledge about the task set only, and one with foreknowledge about the direction of the required response only. These were contrasted with a condition of no-foreknowledge and a condition of complete foreknowledge about all three parameters. The results showed that the three types of foreknowledge affected saccadic efficiency differently. While foreknowledge about stimulus-location had no effect on efficiency, task foreknowledge had some effect and response-foreknowledge was as effective as complete foreknowledge. Foreknowledge effects on switch costs followed a similar pattern in general, but were not specific for switching of the trial attribute for which foreknowledge was available. We conclude that partial foreknowledge has a differential effect on efficiency, most consistent with preparatory activation of a motor schema in advance of the stimulus, with consequent benefits for both switched and repeated trials. |
Mathias Abegg; Nishant Sharma; Jason J S Barton Antisaccades generate two types of saccadic inhibition Journal Article Biological Psychology, 89 (1), pp. 191–194, 2012. @article{Abegg2012, title = {Antisaccades generate two types of saccadic inhibition}, author = {Mathias Abegg and Nishant Sharma and Jason J S Barton}, doi = {10.1016/j.biopsycho.2011.10.007}, year = {2012}, date = {2012-01-01}, journal = {Biological Psychology}, volume = {89}, number = {1}, pages = {191--194}, abstract = {To make an antisaccade away from a stimulus, one must also suppress the more reflexive prosaccade to the stimulus. Whether this inhibition is diffuse or specific for saccade direction is not known. We used a paradigm examining inter-trial carry-over effects. Twelve subjects performed sequences of four identical antisaccades followed by sequences of four prosaccades randomly directed at the location of the antisaccade stimulus, the location of the antisaccade goal, or neutral locations. We found two types of persistent antisaccade-related inhibition. First, prosaccades in any direction were delayed only in the first trial after the antisaccades. Second, prosaccades to the location of the antisaccade stimulus were delayed more than all other prosaccades, and this persisted from the first to the fourth subsequent trial. These findings are consistent with both a transient global inhibition and a more sustained focal inhibition specific for the location of the antisaccade stimulus.}, keywords = {}, pubstate = {published}, tppubtype = {article} } To make an antisaccade away from a stimulus, one must also suppress the more reflexive prosaccade to the stimulus. Whether this inhibition is diffuse or specific for saccade direction is not known. We used a paradigm examining inter-trial carry-over effects. Twelve subjects performed sequences of four identical antisaccades followed by sequences of four prosaccades randomly directed at the location of the antisaccade stimulus, the location of the antisaccade goal, or neutral locations. We found two types of persistent antisaccade-related inhibition. First, prosaccades in any direction were delayed only in the first trial after the antisaccades. Second, prosaccades to the location of the antisaccade stimulus were delayed more than all other prosaccades, and this persisted from the first to the fourth subsequent trial. These findings are consistent with both a transient global inhibition and a more sustained focal inhibition specific for the location of the antisaccade stimulus. |
Mathias Abegg; Dario Pianezzi; Jason J S Barton A vertical asymmetry in saccades Journal Article Journal of Eye Movement Research, 8 (5), pp. 1–10, 2015. @article{Abegg2015, title = {A vertical asymmetry in saccades}, author = {Mathias Abegg and Dario Pianezzi and Jason J S Barton}, doi = {10.16910/jemr.8.5.3}, year = {2015}, date = {2015-01-01}, journal = {Journal of Eye Movement Research}, volume = {8}, number = {5}, pages = {1--10}, abstract = {Visual exploration of natural scenes imposes demands that differ between the upper and the lower visual hemifield. Yet little is known about how ocular motor performance is affected by the location of visual stimuli or the direction of a behavioural response. We compared saccadic latencies between upper and lower hemifield in a variety of conditions, including short-latency prosaccades, long-latency prosaccades, antisaccades, memory-guided saccades and saccades with increased attentional and selection demand. All saccade types, except memory guided saccades, had shorter latencies when saccades were directed towards the upper field as compared to downward saccades (ptextless0.05). This upper field reaction time advantage probably arises in ocular motor rather than visual processing. It may originate in structures involved in motor preparation rather than execution.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual exploration of natural scenes imposes demands that differ between the upper and the lower visual hemifield. Yet little is known about how ocular motor performance is affected by the location of visual stimuli or the direction of a behavioural response. We compared saccadic latencies between upper and lower hemifield in a variety of conditions, including short-latency prosaccades, long-latency prosaccades, antisaccades, memory-guided saccades and saccades with increased attentional and selection demand. All saccade types, except memory guided saccades, had shorter latencies when saccades were directed towards the upper field as compared to downward saccades (ptextless0.05). This upper field reaction time advantage probably arises in ocular motor rather than visual processing. It may originate in structures involved in motor preparation rather than execution. |
Naotoshi Abekawa; Hiroaki Gomi Spatial coincidence of intentional actions modulates an implicit visuomotor control Journal Article Journal of Neurophysiology, 103 (5), pp. 2717–2727, 2010. @article{Abekawa2010, title = {Spatial coincidence of intentional actions modulates an implicit visuomotor control}, author = {Naotoshi Abekawa and Hiroaki Gomi}, doi = {10.1152/jn.91133.2008}, year = {2010}, date = {2010-01-01}, journal = {Journal of Neurophysiology}, volume = {103}, number = {5}, pages = {2717--2727}, abstract = {We investigated a visuomotor mechanism contributing to reach correction: the manual following response (MFR), which is a quick response to background visual motion that frequently occurs as a reafference when the body moves. Although several visual specificities of the MFR have been elucidated, the functional and computational mechanisms of its motor coordination remain unclear mainly because it involves complex relationships among gaze, reaching target, and visual stimuli. To directly explore how these factors interact in the MFR, we assessed the impact of spatial coincidences among gaze, arm reaching, and visual motion on the MFR. When gaze location was displaced from the reaching target with an identical visual motion kept on the retina, the amplitude of the MFR significantly decreased as displacement increased. A factorial manipulation of gaze, reaching-target, and visual motion locations showed that the response decrease is due to the spatial separation between gaze and reaching target but is not due to the spatial separation between visual motion and reaching target. Additionally, elimination of visual motion around the fovea attenuated the MFR. The effects of these spatial coincidences on the MFR are completely different from their effects on the perceptual mislocalization of targets caused by visual motion. Furthermore, we found clear differences between the modulation sensitivities of the MFR and the ocular following response to spatial mismatch between gaze and reaching locations. These results suggest that the MFR modulation observed in our experiment is not due to changes in visual interaction between target and visual motion or to modulation of motion sensitivity in early visual processing. Instead the motor command of the MFR appears to be modulated by the spatial relationship between gaze and reaching.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We investigated a visuomotor mechanism contributing to reach correction: the manual following response (MFR), which is a quick response to background visual motion that frequently occurs as a reafference when the body moves. Although several visual specificities of the MFR have been elucidated, the functional and computational mechanisms of its motor coordination remain unclear mainly because it involves complex relationships among gaze, reaching target, and visual stimuli. To directly explore how these factors interact in the MFR, we assessed the impact of spatial coincidences among gaze, arm reaching, and visual motion on the MFR. When gaze location was displaced from the reaching target with an identical visual motion kept on the retina, the amplitude of the MFR significantly decreased as displacement increased. A factorial manipulation of gaze, reaching-target, and visual motion locations showed that the response decrease is due to the spatial separation between gaze and reaching target but is not due to the spatial separation between visual motion and reaching target. Additionally, elimination of visual motion around the fovea attenuated the MFR. The effects of these spatial coincidences on the MFR are completely different from their effects on the perceptual mislocalization of targets caused by visual motion. Furthermore, we found clear differences between the modulation sensitivities of the MFR and the ocular following response to spatial mismatch between gaze and reaching locations. These results suggest that the MFR modulation observed in our experiment is not due to changes in visual interaction between target and visual motion or to modulation of motion sensitivity in early visual processing. Instead the motor command of the MFR appears to be modulated by the spatial relationship between gaze and reaching. |
Naotoshi Abekawa; Toshio Inui; Hiroaki Gomi Eye-hand coordination in on-line visuomotor adjustments Journal Article NeuroReport, 25 (7), pp. 441–445, 2014. @article{Abekawa2014, title = {Eye-hand coordination in on-line visuomotor adjustments}, author = {Naotoshi Abekawa and Toshio Inui and Hiroaki Gomi}, doi = {10.1097/WNR.0000000000000111}, year = {2014}, date = {2014-01-01}, journal = {NeuroReport}, volume = {25}, number = {7}, pages = {441--445}, abstract = {When we perform a visually guided reaching action, the brain coordinates our hand and eye movements. Eye-hand coordination has been examined widely, but it remains unclear whether the hand and eye motor systems are coordinated during on-line visuomotor adjustments induced by a target jump during a reaching movement. As such quick motor responses are required when we interact with dynamic environments, eye and hand movements could be coordinated even during on-line motor control. Here, we examine the relationship between online hand adjustment and saccadic eye movement. In contrast to the well-known temporal order of eye and hand initiations where the hand follows the eyes, we found that on-line hand adjustment was initiated before the saccade onset. Despite this order reversal, a correlation between hand and saccade latencies was observed, suggesting that the hand motor system is not independent of eye control even when the hand response was induced before the saccade. Moreover, the latency of the hand adjustment with saccadic eye movement was significantly shorter than that with eye fixation. This hand latency modulation cannot be ascribed to any changes of visual or oculomotor reafferent information as the saccade was not yet initiated when the hand adjustment started. Taken together, the hand motor system would receive preparation signals rather than reafference signals of saccadic eye movements to provide quick manual adjustments of the goal-directed eye-hand movements.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When we perform a visually guided reaching action, the brain coordinates our hand and eye movements. Eye-hand coordination has been examined widely, but it remains unclear whether the hand and eye motor systems are coordinated during on-line visuomotor adjustments induced by a target jump during a reaching movement. As such quick motor responses are required when we interact with dynamic environments, eye and hand movements could be coordinated even during on-line motor control. Here, we examine the relationship between online hand adjustment and saccadic eye movement. In contrast to the well-known temporal order of eye and hand initiations where the hand follows the eyes, we found that on-line hand adjustment was initiated before the saccade onset. Despite this order reversal, a correlation between hand and saccade latencies was observed, suggesting that the hand motor system is not independent of eye control even when the hand response was induced before the saccade. Moreover, the latency of the hand adjustment with saccadic eye movement was significantly shorter than that with eye fixation. This hand latency modulation cannot be ascribed to any changes of visual or oculomotor reafferent information as the saccade was not yet initiated when the hand adjustment started. Taken together, the hand motor system would receive preparation signals rather than reafference signals of saccadic eye movements to provide quick manual adjustments of the goal-directed eye-hand movements. |
Naotoshi Abekawa; Hiroaki Gomi Online gain update for manual following response accompanied by gaze shift during arm reaching Journal Article Journal of Neurophysiology, 113 (4), pp. 1206–1216, 2015. @article{Abekawa2015, title = {Online gain update for manual following response accompanied by gaze shift during arm reaching}, author = {Naotoshi Abekawa and Hiroaki Gomi}, doi = {10.1152/jn.00281.2014}, year = {2015}, date = {2015-01-01}, journal = {Journal of Neurophysiology}, volume = {113}, number = {4}, pages = {1206--1216}, abstract = {To capture objects by hand, online motor corrections are required to compensate for self-body movements. Recent studies have shown that background visual motion, usually caused by body movement, plays a significant role in such online corrections. Visual motion applied during a reaching movement induces a rapid and automatic manual following response (MFR) in the direction of the visual motion. Importantly, the MFR amplitude is modulated by the gaze direction relative to the reach target location (i.e., foveal or peripheral reaching). That is, the brain specifies the adequate visuomotor gain for an online controller based on gaze-reach coordination. However, the time or state point at which the brain specifies this visuomotor gain remains unclear. More specifically, does the gain change occur even during the execution of reaching? In the present study, we measured MFR amplitudes during a task in which the participant performed a saccadic eye movement that altered the gaze-reach coordination during reaching. The results indicate that the MFR amplitude immediately after the saccade termination changed according to the new gaze-reach coordination, suggesting a flexible online updating of the MFR gain during reaching. An additional experiment showed that this gain updating mostly started before the saccade terminated. Therefore, the MFR gain updating process would be triggered by an ocular command related to saccade planning or execution based on forthcoming changes in the gaze-reach coordination. Our findings suggest that the brain flexibly updates the visuomotor gain for an online controller even during reaching movements based on continuous monitoring of the gaze-reach coordination.}, keywords = {}, pubstate = {published}, tppubtype = {article} } To capture objects by hand, online motor corrections are required to compensate for self-body movements. Recent studies have shown that background visual motion, usually caused by body movement, plays a significant role in such online corrections. Visual motion applied during a reaching movement induces a rapid and automatic manual following response (MFR) in the direction of the visual motion. Importantly, the MFR amplitude is modulated by the gaze direction relative to the reach target location (i.e., foveal or peripheral reaching). That is, the brain specifies the adequate visuomotor gain for an online controller based on gaze-reach coordination. However, the time or state point at which the brain specifies this visuomotor gain remains unclear. More specifically, does the gain change occur even during the execution of reaching? In the present study, we measured MFR amplitudes during a task in which the participant performed a saccadic eye movement that altered the gaze-reach coordination during reaching. The results indicate that the MFR amplitude immediately after the saccade termination changed according to the new gaze-reach coordination, suggesting a flexible online updating of the MFR gain during reaching. An additional experiment showed that this gain updating mostly started before the saccade terminated. Therefore, the MFR gain updating process would be triggered by an ocular command related to saccade planning or execution based on forthcoming changes in the gaze-reach coordination. Our findings suggest that the brain flexibly updates the visuomotor gain for an online controller even during reaching movements based on continuous monitoring of the gaze-reach coordination. |
Dekel Abeles; Shlomit Yuval-Greenberg Just look away: Gaze aversions as an overt attentional disengagement mechanism Journal Article Cognition, 168 , pp. 99–109, 2017. @article{Abeles2017, title = {Just look away: Gaze aversions as an overt attentional disengagement mechanism}, author = {Dekel Abeles and Shlomit Yuval-Greenberg}, doi = {10.1016/j.cognition.2017.06.021}, year = {2017}, date = {2017-01-01}, journal = {Cognition}, volume = {168}, pages = {99--109}, publisher = {Elsevier B.V.}, abstract = {During visual exploration of a scene, the eye-gaze tends to be directed toward more salient image-locations, containing more information. However, while performing non-visual tasks, such information-seeking behavior could be detrimental to performance, as the perception of irrelevant but salient visual input may unnecessarily increase the cognitive-load. It would be therefore beneficial if during non-visual tasks, eye-gaze would be governed by a drive to reduce saliency rather than maximize it. The current study examined the phenomenon of gaze-aversion during non-visual tasks, which is hypothesized to act as an active avoidance mechanism. In two experiments, gaze-position was monitored by an eye-tracker while participants performed an auditory mental arithmetic task, and in a third experiment they performed an undemanding naming task. Task-irrelevant simple motion stimuli (drifting grating and random dot kinematogram) were centrally presented, moving at varying speeds. Participants averted their gaze away from the moving stimuli more frequently and for longer proportions of the time when the motion was faster than when it was slower. Additionally, a positive correlation was found between the task's difficulty and this aversion behavior. When the task was highly undemanding, no gaze aversion behavior was observed. We conclude that gaze aversion is an active avoidance strategy, sensitive to both the physical features of the visual distractions and the cognitive load imposed by the non-visual task.}, keywords = {}, pubstate = {published}, tppubtype = {article} } During visual exploration of a scene, the eye-gaze tends to be directed toward more salient image-locations, containing more information. However, while performing non-visual tasks, such information-seeking behavior could be detrimental to performance, as the perception of irrelevant but salient visual input may unnecessarily increase the cognitive-load. It would be therefore beneficial if during non-visual tasks, eye-gaze would be governed by a drive to reduce saliency rather than maximize it. The current study examined the phenomenon of gaze-aversion during non-visual tasks, which is hypothesized to act as an active avoidance mechanism. In two experiments, gaze-position was monitored by an eye-tracker while participants performed an auditory mental arithmetic task, and in a third experiment they performed an undemanding naming task. Task-irrelevant simple motion stimuli (drifting grating and random dot kinematogram) were centrally presented, moving at varying speeds. Participants averted their gaze away from the moving stimuli more frequently and for longer proportions of the time when the motion was faster than when it was slower. Additionally, a positive correlation was found between the task's difficulty and this aversion behavior. When the task was highly undemanding, no gaze aversion behavior was observed. We conclude that gaze aversion is an active avoidance strategy, sensitive to both the physical features of the visual distractions and the cognitive load imposed by the non-visual task. |
Dekel Abeles; Roy Amit; Shlomit Yuval-Greenberg Oculomotor behavior during non-visual tasks: The role of visual saliency Journal Article PLoS ONE, 13 (6), pp. e0198242, 2018. @article{Abeles2018, title = {Oculomotor behavior during non-visual tasks: The role of visual saliency}, author = {Dekel Abeles and Roy Amit and Shlomit Yuval-Greenberg}, doi = {10.1371/journal.pone.0198242}, year = {2018}, date = {2018-01-01}, journal = {PLoS ONE}, volume = {13}, number = {6}, pages = {e0198242}, abstract = {During visual exploration or free-view, gaze positioning is largely determined by the tendency to maximize visual saliency: more salient locations are more likely to be fixated. However, when visual input is completely irrelevant for performance, such as with non-visual tasks, this saliency maximization strategy may be less advantageous and potentially even disruptive for task-performance. Here, we examined whether visual saliency remains a strong driving force in determining gaze positions even in non-visual tasks. We tested three alternative hypotheses: a) That saliency is disadvantageous for non-visual tasks and therefore gaze would tend to shift away from it and towards non-salient locations; b) That saliency is irrelevant during non-visual tasks and therefore gaze would not be directed towards it but also not away-from it; c) That saliency maximization is a strong behavioral drive that would prevail even during non-visual tasks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } During visual exploration or free-view, gaze positioning is largely determined by the tendency to maximize visual saliency: more salient locations are more likely to be fixated. However, when visual input is completely irrelevant for performance, such as with non-visual tasks, this saliency maximization strategy may be less advantageous and potentially even disruptive for task-performance. Here, we examined whether visual saliency remains a strong driving force in determining gaze positions even in non-visual tasks. We tested three alternative hypotheses: a) That saliency is disadvantageous for non-visual tasks and therefore gaze would tend to shift away from it and towards non-salient locations; b) That saliency is irrelevant during non-visual tasks and therefore gaze would not be directed towards it but also not away-from it; c) That saliency maximization is a strong behavioral drive that would prevail even during non-visual tasks. |
Dekel Abeles; Roy Amit; Noam Tal-Perry; Marisa Carrasco; Shlomit Yuval-Greenberg Oculomotor inhibition precedes temporally expected auditory targets Journal Article Nature Communications, 11 , pp. 1–12, 2020. @article{Abeles2020, title = {Oculomotor inhibition precedes temporally expected auditory targets}, author = {Dekel Abeles and Roy Amit and Noam Tal-Perry and Marisa Carrasco and Shlomit Yuval-Greenberg}, doi = {10.1038/s41467-020-17158-9}, year = {2020}, date = {2020-01-01}, journal = {Nature Communications}, volume = {11}, pages = {1--12}, publisher = {Springer US}, abstract = {Eye movements are inhibited prior to the onset of temporally-predictable visual targets. This oculomotor inhibition effect could be considered a marker for the formation of temporal expectations and the allocation of temporal attention in the visual domain. Here we show that eye movements are also inhibited before predictable auditory targets. In two experiments, we manipulate the period between a cue and an auditory target to be either predictable or unpredictable. The findings show that although there is no perceptual gain from avoiding gaze-shifts in this procedure, saccades and blinks are inhibited prior to predictable relative to unpredictable auditory targets. These findings show that oculomotor inhibition occurs prior to auditory targets. This link between auditory expectation and oculomotor behavior reveals a multimodal perception action coupling, which has a central role in temporal expectations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Eye movements are inhibited prior to the onset of temporally-predictable visual targets. This oculomotor inhibition effect could be considered a marker for the formation of temporal expectations and the allocation of temporal attention in the visual domain. Here we show that eye movements are also inhibited before predictable auditory targets. In two experiments, we manipulate the period between a cue and an auditory target to be either predictable or unpredictable. The findings show that although there is no perceptual gain from avoiding gaze-shifts in this procedure, saccades and blinks are inhibited prior to predictable relative to unpredictable auditory targets. These findings show that oculomotor inhibition occurs prior to auditory targets. This link between auditory expectation and oculomotor behavior reveals a multimodal perception action coupling, which has a central role in temporal expectations. |
Arman Abrahamyan; Laura Luz Silva; Steven C Dakin; Matteo Carandini; Justin L Gardner Adaptable history biases in human perceptual decisions Journal Article Proceedings of the National Academy of Sciences, 113 (25), pp. E3548–E3557, 2016. @article{Abrahamyan2016, title = {Adaptable history biases in human perceptual decisions}, author = {Arman Abrahamyan and Laura Luz Silva and Steven C Dakin and Matteo Carandini and Justin L Gardner}, doi = {10.1073/pnas.1518786113}, year = {2016}, date = {2016-01-01}, journal = {Proceedings of the National Academy of Sciences}, volume = {113}, number = {25}, pages = {E3548--E3557}, abstract = {When making choices under conditions of perceptual uncertainty, past experience can play a vital role. However, it can also lead to biases that worsen decisions. Consistent with previous observations, we found that human choices are influenced by the success or failure of past choices even in a standard two-alternative detection task, where choice history is irrelevant. The typical bias was one that made the subject switch choices after a failure. These choice history biases led to poorer performance and were similar for observers in different countries. They were well captured by a simple logistic regression model that had been previously applied to describe psychophysical performance in mice. Such irrational biases seem at odds with the principles of reinforcement learning, which would predict exquisite adaptability to choice history. We therefore asked whether subjects could adapt their irrational biases following changes in trial order statistics. Adaptability was strong in the direction that confirmed a subject's default biases, but weaker in the opposite direction, so that existing biases could not be eradicated. We conclude that humans can adapt choice history biases, but cannot easily overcome existing biases even if irrational in the current context: adaptation is more sensitive to confirmatory than contradictory statistics.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When making choices under conditions of perceptual uncertainty, past experience can play a vital role. However, it can also lead to biases that worsen decisions. Consistent with previous observations, we found that human choices are influenced by the success or failure of past choices even in a standard two-alternative detection task, where choice history is irrelevant. The typical bias was one that made the subject switch choices after a failure. These choice history biases led to poorer performance and were similar for observers in different countries. They were well captured by a simple logistic regression model that had been previously applied to describe psychophysical performance in mice. Such irrational biases seem at odds with the principles of reinforcement learning, which would predict exquisite adaptability to choice history. We therefore asked whether subjects could adapt their irrational biases following changes in trial order statistics. Adaptability was strong in the direction that confirmed a subject's default biases, but weaker in the opposite direction, so that existing biases could not be eradicated. We conclude that humans can adapt choice history biases, but cannot easily overcome existing biases even if irrational in the current context: adaptation is more sensitive to confirmatory than contradictory statistics. |
Alper Açik; Selim Onat; Frank Schumann; Wolfgang Einhäuser; Peter König Effects of luminance contrast and its modifications on fixation behavior during free viewing of images from different categories Journal Article Vision Research, 49 (12), pp. 1541–1553, 2009. @article{Acik2009, title = {Effects of luminance contrast and its modifications on fixation behavior during free viewing of images from different categories}, author = {Alper A{ç}ik and Selim Onat and Frank Schumann and Wolfgang Einhäuser and Peter König}, doi = {10.1016/j.visres.2009.03.011}, year = {2009}, date = {2009-01-01}, journal = {Vision Research}, volume = {49}, number = {12}, pages = {1541--1553}, publisher = {Elsevier Ltd}, abstract = {During viewing of natural scenes, do low-level features guide attention, and if so, does this depend on higher-level features? To answer these questions, we studied the image category dependence of low-level feature modification effects. Subjects fixated contrast-modified regions often in natural scene images, while smaller but significant effects were observed for urban scenes and faces. Surprisingly, modifications in fractal images did not influence fixations. Further analysis revealed an inverse relationship between modification effects and higher-level, phase-dependent image features. We suggest that high- and mid-level features - such as edges, symmetries, and recursive patterns - guide attention if present. However, if the scene lacks such diagnostic properties, low-level features prevail. We posit a hierarchical framework, which combines aspects of bottom-up and top-down theories and is compatible with our data.}, keywords = {}, pubstate = {published}, tppubtype = {article} } During viewing of natural scenes, do low-level features guide attention, and if so, does this depend on higher-level features? To answer these questions, we studied the image category dependence of low-level feature modification effects. Subjects fixated contrast-modified regions often in natural scene images, while smaller but significant effects were observed for urban scenes and faces. Surprisingly, modifications in fractal images did not influence fixations. Further analysis revealed an inverse relationship between modification effects and higher-level, phase-dependent image features. We suggest that high- and mid-level features - such as edges, symmetries, and recursive patterns - guide attention if present. However, if the scene lacks such diagnostic properties, low-level features prevail. We posit a hierarchical framework, which combines aspects of bottom-up and top-down theories and is compatible with our data. |
Alper Açik; Adjmal Sarwary; Rafael Schultze-Kraft; Selim Onat; Peter König Developmental changes in natural viewing behavior: Bottom-up and top-down differences between children, young adults and older adults Journal Article Frontiers in Psychology, 1 , pp. 1–14, 2010. @article{Acik2010, title = {Developmental changes in natural viewing behavior: Bottom-up and top-down differences between children, young adults and older adults}, author = {Alper A{ç}ik and Adjmal Sarwary and Rafael Schultze-Kraft and Selim Onat and Peter König}, doi = {10.3389/fpsyg.2010.00207}, year = {2010}, date = {2010-01-01}, journal = {Frontiers in Psychology}, volume = {1}, pages = {1--14}, publisher = {10}, address = {doi}, abstract = {Despite the growing interest in fixation selection under natural conditions, there is a major gap in the literature concerning its developmental aspects. Early in life, bottom-up processes, such as local image feature - color, luminance contrast etc. - guided viewing, might be prominent but later overshadowed by more top-down processing. Moreover, with decline in visual functioning in old age, bottom-up processing is known to suffer. Here we recorded eye movements of 7- to 9-year-old children, 19- to 27-year-old adults, and older adults above 72 years of age while they viewed natural and complex images before performing a patch-recognition task. Task performance displayed the classical inverted U-shape, with young adults outperforming the other age groups. Fixation discrimination performance of local feature values dropped with age. Whereas children displayed the highest feature values at fixated points, suggesting a bottom-up mechanism, older adult viewing behavior was less feature-dependent, reminiscent of a top-down strategy. Importantly, we observed a double dissociation between children and elderly regarding the effects of active viewing on feature-related viewing: Explorativeness correlated with feature-related viewing negatively in young age, and positively in older adults. The results indicate that, with age, bottom-up fixation selection loses strength and/or the role of top-down processes becomes more important. Older adults who increase their feature-related viewing by being more explorative make use of this low-level information and perform better in the task. The present study thus reveals an important developmental change in natural and task-guided viewing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Despite the growing interest in fixation selection under natural conditions, there is a major gap in the literature concerning its developmental aspects. Early in life, bottom-up processes, such as local image feature - color, luminance contrast etc. - guided viewing, might be prominent but later overshadowed by more top-down processing. Moreover, with decline in visual functioning in old age, bottom-up processing is known to suffer. Here we recorded eye movements of 7- to 9-year-old children, 19- to 27-year-old adults, and older adults above 72 years of age while they viewed natural and complex images before performing a patch-recognition task. Task performance displayed the classical inverted U-shape, with young adults outperforming the other age groups. Fixation discrimination performance of local feature values dropped with age. Whereas children displayed the highest feature values at fixated points, suggesting a bottom-up mechanism, older adult viewing behavior was less feature-dependent, reminiscent of a top-down strategy. Importantly, we observed a double dissociation between children and elderly regarding the effects of active viewing on feature-related viewing: Explorativeness correlated with feature-related viewing negatively in young age, and positively in older adults. The results indicate that, with age, bottom-up fixation selection loses strength and/or the role of top-down processes becomes more important. Older adults who increase their feature-related viewing by being more explorative make use of this low-level information and perform better in the task. The present study thus reveals an important developmental change in natural and task-guided viewing. |
Alper Acik; Andreas Bartel; Peter Konig Real and implied motion at the center of gaze Journal Article Journal of Vision, 14 (1), pp. 1–19, 2014. @article{Acik2014, title = {Real and implied motion at the center of gaze}, author = {Alper Acik and Andreas Bartel and Peter Konig}, doi = {10.1109/ICMTS.2016.7476201}, year = {2014}, date = {2014-01-01}, journal = {Journal of Vision}, volume = {14}, number = {1}, pages = {1--19}, abstract = {Even though the dynamicity of our environment is a given, much of what we know on fixation selection comes from studies of static scene viewing. We performed a direct comparison of fixation selection on static and dynamic visual stimuli and investigated how far identical mechanisms drive these. We recorded eye movements while participants viewed movie clips of natural scenery and static frames taken from the same movies. Both were presented in the same high spatial resolution (1080 textperiodcentered 1920 pixels). The static condition allowed us to check whether local movement features computed from movies are salient even when presented as single frames. We observed that during the first second of viewing, movement and static features are equally salient in both conditions. Furthermore, predictability of fixations based on movement features decreased faster when viewing static frames as compared with viewing movie clips. Yet even during the later portion of static-frame viewing, the predictive value of movement features was still high above chance. Moreover, we demonstrated that, whereas the sets of movement and static features were statistically dependent within these sets, respectively, no dependence was observed between the two sets. Based on these results, we argue that implied motion is predictive of fixation similarly to real movement and that the onset of motion in natural stimuli is more salient than ongoing movement is. The present results allow us to address to what extent and when static image viewing is similar to the perception of a dynamic environment.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Even though the dynamicity of our environment is a given, much of what we know on fixation selection comes from studies of static scene viewing. We performed a direct comparison of fixation selection on static and dynamic visual stimuli and investigated how far identical mechanisms drive these. We recorded eye movements while participants viewed movie clips of natural scenery and static frames taken from the same movies. Both were presented in the same high spatial resolution (1080 textperiodcentered 1920 pixels). The static condition allowed us to check whether local movement features computed from movies are salient even when presented as single frames. We observed that during the first second of viewing, movement and static features are equally salient in both conditions. Furthermore, predictability of fixations based on movement features decreased faster when viewing static frames as compared with viewing movie clips. Yet even during the later portion of static-frame viewing, the predictive value of movement features was still high above chance. Moreover, we demonstrated that, whereas the sets of movement and static features were statistically dependent within these sets, respectively, no dependence was observed between the two sets. Based on these results, we argue that implied motion is predictive of fixation similarly to real movement and that the onset of motion in natural stimuli is more salient than ongoing movement is. The present results allow us to address to what extent and when static image viewing is similar to the perception of a dynamic environment. |
John F Ackermann; M S Landy Statistical templates for visual search Journal Article Journal of Vision, 14 (3), pp. 1–17, 2014. @article{Ackermann2014, title = {Statistical templates for visual search}, author = {John F Ackermann and M S Landy}, doi = {10.1167/14.3.18}, year = {2014}, date = {2014-01-01}, journal = {Journal of Vision}, volume = {14}, number = {3}, pages = {1--17}, abstract = {How do we find a target embedded in a scene? Within the framework of signal detection theory, this task is carried out by comparing each region of the scene with a "template," i.e., an internal representation of the search target. Here we ask what form this representation takes when the search target is a complex image with uncertain orientation. We examine three possible representations. The first is the matched filter. Such a representation cannot account for the ease with which humans can find a complex search target that is rotated relative to the template. A second representation attempts to deal with this by estimating the relative orientation of target and match and rotating the intensity-based template. No intensity-based template, however, can account for the ability to easily locate targets that are defined categorically and not in terms of a specific arrangement of pixels. Thus, we define a third template that represents the target in terms of image statistics rather than pixel intensities. Subjects performed a two-alternative, forced-choice search task in which they had to localize an image that matched a previously viewed target. Target images were texture patches. In one condition, match images were the same image as the target and distractors were a different image of the same textured material. In the second condition, the match image was of the same texture as the target (but different pixels) and the distractor was an image of a different texture. Match and distractor stimuli were randomly rotated relative to the target. We compared human performance to pixel-based, pixel-based with rotation, and statistic-based search models. The statistic-based search model was most successful at matching human performance. We conclude that humans use summary statistics to search for complex visual targets.}, keywords = {}, pubstate = {published}, tppubtype = {article} } How do we find a target embedded in a scene? Within the framework of signal detection theory, this task is carried out by comparing each region of the scene with a "template," i.e., an internal representation of the search target. Here we ask what form this representation takes when the search target is a complex image with uncertain orientation. We examine three possible representations. The first is the matched filter. Such a representation cannot account for the ease with which humans can find a complex search target that is rotated relative to the template. A second representation attempts to deal with this by estimating the relative orientation of target and match and rotating the intensity-based template. No intensity-based template, however, can account for the ability to easily locate targets that are defined categorically and not in terms of a specific arrangement of pixels. Thus, we define a third template that represents the target in terms of image statistics rather than pixel intensities. Subjects performed a two-alternative, forced-choice search task in which they had to localize an image that matched a previously viewed target. Target images were texture patches. In one condition, match images were the same image as the target and distractors were a different image of the same textured material. In the second condition, the match image was of the same texture as the target (but different pixels) and the distractor was an image of a different texture. Match and distractor stimuli were randomly rotated relative to the target. We compared human performance to pixel-based, pixel-based with rotation, and statistic-based search models. The statistic-based search model was most successful at matching human performance. We conclude that humans use summary statistics to search for complex visual targets. |
John F Ackermann; Michael S Landy Suboptimal decision criteria are predicted by subjectively weighted probabilities and rewards Journal Article Attention, Perception, and Psychophysics, 77 (2), pp. 638–658, 2015. @article{Ackermann2015, title = {Suboptimal decision criteria are predicted by subjectively weighted probabilities and rewards}, author = {John F Ackermann and Michael S Landy}, doi = {10.3758/s13414-014-0779-z}, year = {2015}, date = {2015-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {77}, number = {2}, pages = {638--658}, abstract = {Subjects performed a visual detection task in which the probability of target occurrence at each of the two possible locations, and the rewards for correct responses for each, were varied across conditions. To maximize monetary gain, observers should bias their responses, choosing one location more often than the other in line with the varied probabilities and rewards. Typically, and in our task, observers do not bias their responses to the extent they should, and instead distribute their responses more evenly across locations, a phenomenon referred to as 'conservatism.' We investigated several hypotheses regarding the source of the conservatism. We measured utility and probability weighting functions under Prospect Theory for each subject in an independent economic choice task and used the weighting-function parameters to calculate each subject's subjective utility (SU(c)) as a function of the criterion c, and the corresponding weighted optimal criteria (wc opt ). Subjects' criteria were not close to optimal relative to wc opt . The slope of SU(c) and of expected gain EG(c) at the neutral criterion corresponding to $beta$ = 1 were both predictive of the subjects' criteria. The slope of SU(c) was a better predictor of observers' decision criteria overall. Thus, rather than behaving optimally, subjects move their criterion away from the neutral criterion by estimating how much they stand to gain by such a change based on the slope of subjective gain as a function of criterion, using inherently distorted probabilities and values.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Subjects performed a visual detection task in which the probability of target occurrence at each of the two possible locations, and the rewards for correct responses for each, were varied across conditions. To maximize monetary gain, observers should bias their responses, choosing one location more often than the other in line with the varied probabilities and rewards. Typically, and in our task, observers do not bias their responses to the extent they should, and instead distribute their responses more evenly across locations, a phenomenon referred to as 'conservatism.' We investigated several hypotheses regarding the source of the conservatism. We measured utility and probability weighting functions under Prospect Theory for each subject in an independent economic choice task and used the weighting-function parameters to calculate each subject's subjective utility (SU(c)) as a function of the criterion c, and the corresponding weighted optimal criteria (wc opt ). Subjects' criteria were not close to optimal relative to wc opt . The slope of SU(c) and of expected gain EG(c) at the neutral criterion corresponding to $beta$ = 1 were both predictive of the subjects' criteria. The slope of SU(c) was a better predictor of observers' decision criteria overall. Thus, rather than behaving optimally, subjects move their criterion away from the neutral criterion by estimating how much they stand to gain by such a change based on the slope of subjective gain as a function of criterion, using inherently distorted probabilities and values. |
David J Acunzo; John M Henderson No emotional "Pop-out" effect in natural scene viewing Journal Article Emotion, 11 (5), pp. 1134–1143, 2011. @article{Acunzo2011, title = {No emotional "Pop-out" effect in natural scene viewing}, author = {David J Acunzo and John M Henderson}, doi = {10.1037/a0022586}, year = {2011}, date = {2011-01-01}, journal = {Emotion}, volume = {11}, number = {5}, pages = {1134--1143}, abstract = {It has been shown that attention is drawn toward emotional stimuli. In particular, eye movement research suggests that gaze is attracted toward emotional stimuli in an unconscious, automated manner. We addressed whether this effect remains when emotional targets are embedded within complex real-world scenes. Eye movements were recorded while participants memorized natural images. Each image contained an item that was either neutral, such as a bag, or emotional, such as a snake or a couple hugging. We found no latency difference for the first target fixation between the emotional and neutral conditions, suggesting no extrafoveal "pop-out" effect of emotional targets. However, once detected, emotional targets held attention for a longer time than neutral targets. The failure of emotional items to attract attention seems to contradict previous eye-movement research using emotional stimuli. However, our results are consistent with studies examining semantic drive of overt attention in natural scenes. Interpretations of the results in terms of perceptual and attentional load are provided.}, keywords = {}, pubstate = {published}, tppubtype = {article} } It has been shown that attention is drawn toward emotional stimuli. In particular, eye movement research suggests that gaze is attracted toward emotional stimuli in an unconscious, automated manner. We addressed whether this effect remains when emotional targets are embedded within complex real-world scenes. Eye movements were recorded while participants memorized natural images. Each image contained an item that was either neutral, such as a bag, or emotional, such as a snake or a couple hugging. We found no latency difference for the first target fixation between the emotional and neutral conditions, suggesting no extrafoveal "pop-out" effect of emotional targets. However, once detected, emotional targets held attention for a longer time than neutral targets. The failure of emotional items to attract attention seems to contradict previous eye-movement research using emotional stimuli. However, our results are consistent with studies examining semantic drive of overt attention in natural scenes. Interpretations of the results in terms of perceptual and attentional load are provided. |
Hamed Zivari Adab; Ivo D Popivanov; Wim Vanduffel; Rufin Vogels Perceptual learning of simple stimuli modifies stimulus representations in posterior inferior temporal cortex Journal Article Journal of Cognitive Neuroscience, 26 (10), pp. 2187–2200, 2014. @article{Adab2014, title = {Perceptual learning of simple stimuli modifies stimulus representations in posterior inferior temporal cortex}, author = {Hamed Zivari Adab and Ivo D Popivanov and Wim Vanduffel and Rufin Vogels}, doi = {10.1162/jocn}, year = {2014}, date = {2014-01-01}, journal = {Journal of Cognitive Neuroscience}, volume = {26}, number = {10}, pages = {2187--2200}, abstract = {Practicing simple visual detection and discrimination tasks improves performance, a signature of adult brain plasticity. The neural mechanisms that underlie these changes in performance are still unclear. Previously, we reported that practice in discriminating the orientation of noisy gratings (coarse orientation discrimination) increased the ability of single neurons in the early visual area V4 to discriminate the trained stimuli. Here, we ask whether practice in this task also changes the stimulus tuning properties of later visual cortical areas, despite the use of simple grating stimuli. To identify candidate areas, we used fMRI to map activations to noisy gratings in trained rhesus monkeys, revealing a region in the posterior inferior temporal (PIT) cortex. Subsequent single unit record- ings in PIT showed that the degree of orientation selectivity was similar to that of area V4 and that the PIT neurons discriminated the trained orientations better than the untrained orientations. Unlike in previous single unit studies of perceptual learning in early visual cortex, more PIT neurons preferred trained compared with untrained orientations. The effects of training on the responses to the grating stimuli were also present when the animals were performing a difficult orthogo- nal task in which the grating stimuli were task-irrelevant, suggesting that the training effect does not need attention to be expressed. The PIT neurons could support orientation discrimination at low signal-to-noise levels. These findings suggest that extensive practice in discriminating simple grating stimuli not only affects early visual cortex but also changes the stimulus tuning of a late visual cortical area.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Practicing simple visual detection and discrimination tasks improves performance, a signature of adult brain plasticity. The neural mechanisms that underlie these changes in performance are still unclear. Previously, we reported that practice in discriminating the orientation of noisy gratings (coarse orientation discrimination) increased the ability of single neurons in the early visual area V4 to discriminate the trained stimuli. Here, we ask whether practice in this task also changes the stimulus tuning properties of later visual cortical areas, despite the use of simple grating stimuli. To identify candidate areas, we used fMRI to map activations to noisy gratings in trained rhesus monkeys, revealing a region in the posterior inferior temporal (PIT) cortex. Subsequent single unit record- ings in PIT showed that the degree of orientation selectivity was similar to that of area V4 and that the PIT neurons discriminated the trained orientations better than the untrained orientations. Unlike in previous single unit studies of perceptual learning in early visual cortex, more PIT neurons preferred trained compared with untrained orientations. The effects of training on the responses to the grating stimuli were also present when the animals were performing a difficult orthogo- nal task in which the grating stimuli were task-irrelevant, suggesting that the training effect does not need attention to be expressed. The PIT neurons could support orientation discrimination at low signal-to-noise levels. These findings suggest that extensive practice in discriminating simple grating stimuli not only affects early visual cortex but also changes the stimulus tuning of a late visual cortical area. |
Jos J Adam; Simona Buetti; Dirk Kerzel Coordinated flexibility: How initial gaze position modulates eye-hand coordination and reaching Journal Article Journal of Experimental Psychology: Human Perception and Performance, 38 (4), pp. 891–901, 2012. @article{Adam2012, title = {Coordinated flexibility: How initial gaze position modulates eye-hand coordination and reaching}, author = {Jos J Adam and Simona Buetti and Dirk Kerzel}, doi = {10.1037/a0027592}, year = {2012}, date = {2012-01-01}, journal = {Journal of Experimental Psychology: Human Perception and Performance}, volume = {38}, number = {4}, pages = {891--901}, abstract = {Reaching to targets in space requires the coordination of eye and hand movements. In two experiments, we recorded eye and hand kinematics to examine the role of gaze position at target onset on eye-hand coordination and reaching performance. Experiment 1 showed that with eyes and hand aligned on the same peripheral start location, time lags between eye and hand onsets were small and initiation times were substantially correlated, suggesting simultaneous control and tight eye-hand coupling. With eyes and hand departing from different start locations (gaze aligned with the center of the range of possible target positions), time lags between eye and hand onsets were large and initiation times were largely uncorrelated, suggesting independent control and decoupling of eye and hand movements. Furthermore, initial gaze position strongly mediated manual reaching performance indexed by increments in movement time as a function of target distance. Experiment 2 confirmed the impact of target foveation in modulating the effect of target distance on movement time. Our findings reveal the operation of an overarching, flexible neural control system that tunes the operation and cooperation of saccadic and manual control systems depending on where the eyes look at target onset.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Reaching to targets in space requires the coordination of eye and hand movements. In two experiments, we recorded eye and hand kinematics to examine the role of gaze position at target onset on eye-hand coordination and reaching performance. Experiment 1 showed that with eyes and hand aligned on the same peripheral start location, time lags between eye and hand onsets were small and initiation times were substantially correlated, suggesting simultaneous control and tight eye-hand coupling. With eyes and hand departing from different start locations (gaze aligned with the center of the range of possible target positions), time lags between eye and hand onsets were large and initiation times were largely uncorrelated, suggesting independent control and decoupling of eye and hand movements. Furthermore, initial gaze position strongly mediated manual reaching performance indexed by increments in movement time as a function of target distance. Experiment 2 confirmed the impact of target foveation in modulating the effect of target distance on movement time. Our findings reveal the operation of an overarching, flexible neural control system that tunes the operation and cooperation of saccadic and manual control systems depending on where the eyes look at target onset. |
Robert Adam; Paul M Bays; Masud Husain Rapid decision-making under risk Journal Article Cognitive Neuroscience, 3 (1), pp. 52–61, 2012. @article{Adam2012a, title = {Rapid decision-making under risk}, author = {Robert Adam and Paul M Bays and Masud Husain}, year = {2012}, date = {2012-01-01}, journal = {Cognitive Neuroscience}, volume = {3}, number = {1}, pages = {52--61}, abstract = {Impulsivity is often characterized by rapid decisions under risk, but most current tests of decision-making do not impose time pressures on participants' choices. Here we introduce a new Traffic Lights test which requires people to choose whether to programme a risky, early eye movement before a traffic light turns green (earning them high rewards or a penalty) or wait for the green light before responding to obtain a small reward instead. Young participants demonstrated bimodal responses: an early, high-risk and a later, low-risk set of choices. By contrast, elderly people invariably waited for the green light and showed little risk-taking. Performance could be modelled as a race between two rise-to-threshold decision processes, one triggered by the green light and the other initiated before it. The test provides a useful measure of rapid decision-making under risk, with the potential to reveal how this process alters with aging or in patient groups.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Impulsivity is often characterized by rapid decisions under risk, but most current tests of decision-making do not impose time pressures on participants' choices. Here we introduce a new Traffic Lights test which requires people to choose whether to programme a risky, early eye movement before a traffic light turns green (earning them high rewards or a penalty) or wait for the green light before responding to obtain a small reward instead. Young participants demonstrated bimodal responses: an early, high-risk and a later, low-risk set of choices. By contrast, elderly people invariably waited for the green light and showed little risk-taking. Performance could be modelled as a race between two rise-to-threshold decision processes, one triggered by the green light and the other initiated before it. The test provides a useful measure of rapid decision-making under risk, with the potential to reveal how this process alters with aging or in patient groups. |
Jos J Adam; Thamar J H Bovend'Eerdt; Fren T Y Smulders; Pascal W M Van Gerven Strategic flexibility in response preparation: Effects of cue validity on reaction time and pupil dilation Journal Article Journal of Cognitive Psychology, 26 (2), pp. 166–177, 2014. @article{Adam2014, title = {Strategic flexibility in response preparation: Effects of cue validity on reaction time and pupil dilation}, author = {Jos J Adam and Thamar J H Bovend'Eerdt and Fren T Y Smulders and Pascal W M {Van Gerven}}, doi = {10.1080/20445911.2014.883399}, year = {2014}, date = {2014-01-01}, journal = {Journal of Cognitive Psychology}, volume = {26}, number = {2}, pages = {166--177}, abstract = {This study examined the ability of participants to strategically adapt their level of response preparation to the predictive value of preparatory cues. Participants performed the finger-precuing task under three levels of cue validity: 100, 75 and 50% valid. Response preparation was indexed by means of reaction time (RT) and pupil dilation, the latter providing a psychophysiological index of invested effort. Results showed a systematic increase in RT benefits (generated by valid cues) and RT costs (generated by invalid cues) with increments in the predictive value of cues. Converging with these behavioural effects, pupil dilation also increased systematically with greater cue validity during the cue-stimulus interval, suggesting more effortful response preparation with increases in cue validity. Together, these findings confirm the hypothesis that response preparation is flexible and that it can be strategically allocated in proportion to the relative frequency of valid/invalid preparatory cues.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study examined the ability of participants to strategically adapt their level of response preparation to the predictive value of preparatory cues. Participants performed the finger-precuing task under three levels of cue validity: 100, 75 and 50% valid. Response preparation was indexed by means of reaction time (RT) and pupil dilation, the latter providing a psychophysiological index of invested effort. Results showed a systematic increase in RT benefits (generated by valid cues) and RT costs (generated by invalid cues) with increments in the predictive value of cues. Converging with these behavioural effects, pupil dilation also increased systematically with greater cue validity during the cue-stimulus interval, suggesting more effortful response preparation with increases in cue validity. Together, these findings confirm the hypothesis that response preparation is flexible and that it can be strategically allocated in proportion to the relative frequency of valid/invalid preparatory cues. |
Ramina Adam; Kevin D Johnston; Ravi S Menon; Stefan Everling Functional reorganization during the recovery of contralesional target selection deficits after prefrontal cortex lesions in macaque monkeys Journal Article NeuroImage, 207 , pp. 1–17, 2020. @article{Adam2020, title = {Functional reorganization during the recovery of contralesional target selection deficits after prefrontal cortex lesions in macaque monkeys}, author = {Ramina Adam and Kevin D Johnston and Ravi S Menon and Stefan Everling}, doi = {10.1016/j.neuroimage.2019.116339}, year = {2020}, date = {2020-01-01}, journal = {NeuroImage}, volume = {207}, pages = {1--17}, publisher = {Elsevier Ltd}, abstract = {Visual extinction has been characterized by the failure to respond to a visual stimulus in the contralesional hemifield when presented simultaneously with an ipsilesional stimulus (Corbetta and Shulman, 2011). Unilateral damage to the macaque frontoparietal cortex commonly leads to deficits in contralesional target selection that resemble visual extinction. Recently, we showed that macaque monkeys with unilateral lesions in the caudal prefrontal cortex (PFC) exhibited contralesional target selection deficits that recovered over 2–4 months (Adam et al., 2019). Here, we investigated the longitudinal changes in functional connectivity (FC) of the frontoparietal network after a small or large right caudal PFC lesion in four macaque monkeys. We collected ultra-high field resting-state fMRI at 7-T before the lesion and at weeks 1–16 post-lesion and compared the functional data with behavioural performance on a free-choice saccade task. We found that the pattern of frontoparietal network FC changes depended on lesion size, such that the recovery of contralesional extinction was associated with an initial increase in network FC that returned to baseline in the two small lesion monkeys, whereas FC continued to increase throughout recovery in the two monkeys with a larger lesion. We also found that the FC between contralesional dorsolateral PFC and ipsilesional parietal cortex correlated with behavioural recovery and that the contralesional dorsolateral PFC showed increasing degree centrality with the frontoparietal network. These findings suggest that both the contralesional and ipsilesional hemispheres play an important role in the recovery of function. Importantly, optimal compensation after large PFC lesions may require greater recruitment of distant and intact areas of the frontoparietal network, whereas recovery from smaller lesions was supported by a normalization of the functional network.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual extinction has been characterized by the failure to respond to a visual stimulus in the contralesional hemifield when presented simultaneously with an ipsilesional stimulus (Corbetta and Shulman, 2011). Unilateral damage to the macaque frontoparietal cortex commonly leads to deficits in contralesional target selection that resemble visual extinction. Recently, we showed that macaque monkeys with unilateral lesions in the caudal prefrontal cortex (PFC) exhibited contralesional target selection deficits that recovered over 2–4 months (Adam et al., 2019). Here, we investigated the longitudinal changes in functional connectivity (FC) of the frontoparietal network after a small or large right caudal PFC lesion in four macaque monkeys. We collected ultra-high field resting-state fMRI at 7-T before the lesion and at weeks 1–16 post-lesion and compared the functional data with behavioural performance on a free-choice saccade task. We found that the pattern of frontoparietal network FC changes depended on lesion size, such that the recovery of contralesional extinction was associated with an initial increase in network FC that returned to baseline in the two small lesion monkeys, whereas FC continued to increase throughout recovery in the two monkeys with a larger lesion. We also found that the FC between contralesional dorsolateral PFC and ipsilesional parietal cortex correlated with behavioural recovery and that the contralesional dorsolateral PFC showed increasing degree centrality with the frontoparietal network. These findings suggest that both the contralesional and ipsilesional hemispheres play an important role in the recovery of function. Importantly, optimal compensation after large PFC lesions may require greater recruitment of distant and intact areas of the frontoparietal network, whereas recovery from smaller lesions was supported by a normalization of the functional network. |
Owen J Adams; Nicholas Gaspelin Assessing introspective awareness of attention capture Journal Article Attention, Perception, and Psychophysics, 82 (4), pp. 1586–1598, 2020. @article{Adams2020, title = {Assessing introspective awareness of attention capture}, author = {Owen J Adams and Nicholas Gaspelin}, doi = {10.3758/s13414-019-01936-9}, year = {2020}, date = {2020-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {82}, number = {4}, pages = {1586--1598}, publisher = {Attention, Perception, & Psychophysics}, abstract = {Visual attention can sometimes be involuntarily captured by salient stimuli, and this may lead to impaired performance in a variety of real-world tasks. If observers were aware that their attention was being captured, they might be able to exert control and avoid subsequent distraction. However, it is unknown whether observers can detect attention capture when it occurs. In the current study, participants searched for a target shape and attempted to ignore a salient color distractor. On a subset of trials, participants then immediately classified whether the salient distractor captured their attention (“capture” vs. “no capture”). Participants were slower and less accurate at detecting the target on trials on which they reported “capture” than “no capture.” Follow-up experiments revealed that participants specifically detected covert shifts of attention to the salient item. Altogether, these results indicate that observers can have immediate awareness of visual distraction, at least under certain circumstances.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual attention can sometimes be involuntarily captured by salient stimuli, and this may lead to impaired performance in a variety of real-world tasks. If observers were aware that their attention was being captured, they might be able to exert control and avoid subsequent distraction. However, it is unknown whether observers can detect attention capture when it occurs. In the current study, participants searched for a target shape and attempted to ignore a salient color distractor. On a subset of trials, participants then immediately classified whether the salient distractor captured their attention (“capture” vs. “no capture”). Participants were slower and less accurate at detecting the target on trials on which they reported “capture” than “no capture.” Follow-up experiments revealed that participants specifically detected covert shifts of attention to the salient item. Altogether, these results indicate that observers can have immediate awareness of visual distraction, at least under certain circumstances. |
Rick A Adams; Daniel Bush; Fanfan Zheng; Sofie S Meyer; Raphael Kaplan; Stelios Orfanos; Tiago Reis Marques; Oliver D Howes; Neil Burgess Impaired theta phase coupling underlies frontotemporal dysconnectivity in schizophrenia Journal Article Brain, 143 (3), pp. 1261–1277, 2020. @article{Adams2020a, title = {Impaired theta phase coupling underlies frontotemporal dysconnectivity in schizophrenia}, author = {Rick A Adams and Daniel Bush and Fanfan Zheng and Sofie S Meyer and Raphael Kaplan and Stelios Orfanos and Tiago Reis Marques and Oliver D Howes and Neil Burgess}, doi = {10.1093/brain/awaa035}, year = {2020}, date = {2020-01-01}, journal = {Brain}, volume = {143}, number = {3}, pages = {1261--1277}, abstract = {Frontotemporal dysconnectivity is a key pathology in schizophrenia. The specific nature of this dysconnectivity is unknown, but animal models imply dysfunctional theta phase coupling between hippocampus and medial prefrontal cortex (mPFC). We tested this hypothesis by examining neural dynamics in 18 participants with a schizophrenia diagnosis, both medicated and unmedicated; and 26 age, sex and IQ matched control subjects. All participants completed two tasks known to elicit hippocampal-prefrontal theta coupling: a spatial memory task (during magnetoencephalography) and a memory integration task. In addition, an overlapping group of 33 schizophrenia and 29 control subjects underwent PET to measure the availability of GABAARs expressing the a5 subunit (concentrated on hippocampal somatostatin interneurons). We demonstrate-in the spatial memory task, during memory recall-that theta power increases in left medial temporal lobe (mTL) are impaired in schizophrenia, as is theta phase coupling between mPFC and mTL. Importantly, the latter cannot be explained by theta power changes, head movement, antipsychotics, cannabis use, or IQ, and is not found in other frequency bands. Moreover, mPFC-mTL theta coupling correlated strongly with performance in controls, but not in subjects with schizophrenia, who were mildly impaired at the spatial memory task and no better than chance on the memory integration task. Finally, mTL regions showing reduced phase coupling in schizophrenia magnetoencephalography participants overlapped substantially with areas of diminished a5-GABAAR availability in the wider schizophrenia PET sample. These results indicate that mPFC-mTL dysconnectivity in schizophrenia is due to a loss of theta phase coupling, and imply a5-GABAARs (and the cells that express them) have a role in this process.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Frontotemporal dysconnectivity is a key pathology in schizophrenia. The specific nature of this dysconnectivity is unknown, but animal models imply dysfunctional theta phase coupling between hippocampus and medial prefrontal cortex (mPFC). We tested this hypothesis by examining neural dynamics in 18 participants with a schizophrenia diagnosis, both medicated and unmedicated; and 26 age, sex and IQ matched control subjects. All participants completed two tasks known to elicit hippocampal-prefrontal theta coupling: a spatial memory task (during magnetoencephalography) and a memory integration task. In addition, an overlapping group of 33 schizophrenia and 29 control subjects underwent PET to measure the availability of GABAARs expressing the a5 subunit (concentrated on hippocampal somatostatin interneurons). We demonstrate-in the spatial memory task, during memory recall-that theta power increases in left medial temporal lobe (mTL) are impaired in schizophrenia, as is theta phase coupling between mPFC and mTL. Importantly, the latter cannot be explained by theta power changes, head movement, antipsychotics, cannabis use, or IQ, and is not found in other frequency bands. Moreover, mPFC-mTL theta coupling correlated strongly with performance in controls, but not in subjects with schizophrenia, who were mildly impaired at the spatial memory task and no better than chance on the memory integration task. Finally, mTL regions showing reduced phase coupling in schizophrenia magnetoencephalography participants overlapped substantially with areas of diminished a5-GABAAR availability in the wider schizophrenia PET sample. These results indicate that mPFC-mTL dysconnectivity in schizophrenia is due to a loss of theta phase coupling, and imply a5-GABAARs (and the cells that express them) have a role in this process. |
Hossein Adeli; Françoise Vitu; Gregory J Zelinsky A model of the superior colliculus predicts fixation locations during scene viewing and visual aearch Journal Article Journal of Neuroscience, 37 (6), pp. 1453–1467, 2017. @article{Adeli2017, title = {A model of the superior colliculus predicts fixation locations during scene viewing and visual aearch}, author = {Hossein Adeli and Fran{ç}oise Vitu and Gregory J Zelinsky}, doi = {10.1523/JNEUROSCI.0825-16.2016}, year = {2017}, date = {2017-01-01}, journal = {Journal of Neuroscience}, volume = {37}, number = {6}, pages = {1453--1467}, abstract = {Modern computational models of attention predict fixations using saliency maps and target maps, which prioritize locations for fixation based on feature contrast and target goals, respectively. But whereas many such models are biologically plausible, none have looked to the oculomotor system for design constraints or parameter specification. Conversely, although most models of saccade programming are tightly coupled to underlying neurophysiology, none have been tested using real-world stimuli and tasks. We combined the strengths of these two approaches in MASC, a model of attention in the superior colliculus (SC) that captures known neurophysiological constraints on saccade programming. We show that MASC predicted the fixation locations of humans freely viewing naturalistic scenes and performing exemplar and categorical search tasks, a breadth achieved by no other existing model. Moreover, it did this as well or better than its more specialized state-of-the-art competitors. MASC's predictive success stems from its inclusion of high-level but core principles of SC organization: an over-representation of foveal information, size-invariant population codes, cascaded population averaging over distorted visual and motor maps, and competition between motor point images for saccade programming, all of which cause further modulation of priority (attention) after projection of saliency and target maps to the SC. Only by incorporating these organizing brain principles into our models can we fully understand the transformation of complex visual information into the saccade programs underlying movements of overt attention. With MASC, a theoretical footing now exists to generate and test computationally explicit predictions of behavioral and neural responses in visually complex real-world contexts.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Modern computational models of attention predict fixations using saliency maps and target maps, which prioritize locations for fixation based on feature contrast and target goals, respectively. But whereas many such models are biologically plausible, none have looked to the oculomotor system for design constraints or parameter specification. Conversely, although most models of saccade programming are tightly coupled to underlying neurophysiology, none have been tested using real-world stimuli and tasks. We combined the strengths of these two approaches in MASC, a model of attention in the superior colliculus (SC) that captures known neurophysiological constraints on saccade programming. We show that MASC predicted the fixation locations of humans freely viewing naturalistic scenes and performing exemplar and categorical search tasks, a breadth achieved by no other existing model. Moreover, it did this as well or better than its more specialized state-of-the-art competitors. MASC's predictive success stems from its inclusion of high-level but core principles of SC organization: an over-representation of foveal information, size-invariant population codes, cascaded population averaging over distorted visual and motor maps, and competition between motor point images for saccade programming, all of which cause further modulation of priority (attention) after projection of saliency and target maps to the SC. Only by incorporating these organizing brain principles into our models can we fully understand the transformation of complex visual information into the saccade programs underlying movements of overt attention. With MASC, a theoretical footing now exists to generate and test computationally explicit predictions of behavioral and neural responses in visually complex real-world contexts. |
Kivilcim Afacan-Seref; Natalie A Steinemann; Annabelle Blangero; Simon P Kelly Dynamic interplay of value and sensory information in high-speed decision making Journal Article Current Biology, 28 (5), pp. 795–802, 2018. @article{AfacanSeref2018, title = {Dynamic interplay of value and sensory information in high-speed decision making}, author = {Kivilcim Afacan-Seref and Natalie A Steinemann and Annabelle Blangero and Simon P Kelly}, doi = {10.1016/j.cub.2018.01.071}, year = {2018}, date = {2018-03-01}, journal = {Current Biology}, volume = {28}, number = {5}, pages = {795--802}, abstract = {In dynamic environments, split-second sensorimotor decisions must be prioritized according to potential payoffs to maximize overall rewards. The impact of relative value on deliberative perceptual judgments has been examined extensively [1–6], but relatively little is known about value-biasing mechanisms in the common situation where physical evidence is strong but the time to act is severely limited. In prominent decision models, a noisy but statistically stationary representation of sensory evidence is integrated over time to an action-triggering bound, and value-biases are affected by starting the integrator closer to the more valuable bound. Here, we show significant departures from this account for humans making rapid sensory-instructed action choices. Behavior was best explained by a simple model in which the evidence representation—and hence, rate of accumulation—is itself biased by value and is non-stationary, increasing over the short decision time frame. Because the value bias initially dominates, the model uniquely predicts a dynamic ‘‘turn-around'' effect on low-value cues, where the accumulator first launches toward the incorrect action but is then re-routed to the correct one. This was clearly exhibited in electrophysiological signals reflecting motor preparation and evidence accumulation. Finally, we construct an extended model that implements this dynamic effect through plausible sensory neural response modulations and demonstrate the correspondence between decision signal dynamics simulated from a behavioral fit of that model and the empirical decision signals. Our findings suggest that value and sensory information can exert simultaneous and dynamically countervailing influences on the trajectory of the accumulation-to-bound process, driving rapid, sensory-guided actions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In dynamic environments, split-second sensorimotor decisions must be prioritized according to potential payoffs to maximize overall rewards. The impact of relative value on deliberative perceptual judgments has been examined extensively [1–6], but relatively little is known about value-biasing mechanisms in the common situation where physical evidence is strong but the time to act is severely limited. In prominent decision models, a noisy but statistically stationary representation of sensory evidence is integrated over time to an action-triggering bound, and value-biases are affected by starting the integrator closer to the more valuable bound. Here, we show significant departures from this account for humans making rapid sensory-instructed action choices. Behavior was best explained by a simple model in which the evidence representation—and hence, rate of accumulation—is itself biased by value and is non-stationary, increasing over the short decision time frame. Because the value bias initially dominates, the model uniquely predicts a dynamic ‘‘turn-around'' effect on low-value cues, where the accumulator first launches toward the incorrect action but is then re-routed to the correct one. This was clearly exhibited in electrophysiological signals reflecting motor preparation and evidence accumulation. Finally, we construct an extended model that implements this dynamic effect through plausible sensory neural response modulations and demonstrate the correspondence between decision signal dynamics simulated from a behavioral fit of that model and the empirical decision signals. Our findings suggest that value and sensory information can exert simultaneous and dynamically countervailing influences on the trajectory of the accumulation-to-bound process, driving rapid, sensory-guided actions. |
Arash Afraz; Patrick Cavanagh The gender-specific face aftereffect is based in retinotopic not spatiotopic coordinates across several natural image transformations Journal Article Journal of Vision, 9 (10), pp. 1–17, 2009. @article{Afraz2009, title = {The gender-specific face aftereffect is based in retinotopic not spatiotopic coordinates across several natural image transformations}, author = {Arash Afraz and Patrick Cavanagh}, doi = {10.1167/9.10.10}, year = {2009}, date = {2009-01-01}, journal = {Journal of Vision}, volume = {9}, number = {10}, pages = {1--17}, abstract = {In four experiments, we measured the gender-specific face-aftereffect following subject's eye movement, head rotation, or head movement toward the display and following movement of the adapting stimulus itself to a new test location. In all experiments, the face aftereffect was strongest at the retinal position, orientation, and size of the adaptor. There was no advantage for the spatiotopic location in any experiment nor was there an advantage for the location newly occupied by the adapting face after it moved in the final experiment. Nevertheless, the aftereffect showed a broad gradient of transfer across location, orientation and size that, although centered on the retinotopic values of the adapting stimulus, covered ranges far exceeding the tuning bandwidths of neurons in early visual cortices. These results are consistent with a high-level site of adaptation (e.g. FFA) where units of face analysis have modest coverage of visual field, centered in retinotopic coordinates, but relatively broad tolerance for variations in size and orientation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In four experiments, we measured the gender-specific face-aftereffect following subject's eye movement, head rotation, or head movement toward the display and following movement of the adapting stimulus itself to a new test location. In all experiments, the face aftereffect was strongest at the retinal position, orientation, and size of the adaptor. There was no advantage for the spatiotopic location in any experiment nor was there an advantage for the location newly occupied by the adapting face after it moved in the final experiment. Nevertheless, the aftereffect showed a broad gradient of transfer across location, orientation and size that, although centered on the retinotopic values of the adapting stimulus, covered ranges far exceeding the tuning bandwidths of neurons in early visual cortices. These results are consistent with a high-level site of adaptation (e.g. FFA) where units of face analysis have modest coverage of visual field, centered in retinotopic coordinates, but relatively broad tolerance for variations in size and orientation. |
Zaeinab Afsari; José P Ossandón; Peter Konig The dynamic effect of reading direction habit on spatial asymmetry of image perception Journal Article Journal of Vision, 16 (11), pp. 1–21, 2016. @article{Afsari2016, title = {The dynamic effect of reading direction habit on spatial asymmetry of image perception}, author = {Zaeinab Afsari and José P Ossandón and Peter Konig}, doi = {10.1167/16.11.8.doi}, year = {2016}, date = {2016-01-01}, journal = {Journal of Vision}, volume = {16}, number = {11}, pages = {1--21}, abstract = {Exploration of images after stimulus onset is initially biased to the left. Here, we studied the causes of such an asymmetry and investigated effects of reading habits, text primes, and priming by systematically biased eye movements on this spatial bias in visual exploration. Bilinguals first read text primes with right- to-left (RTL) or left-to-right (LTR) reading directions and subsequently explored natural images. In Experiment 1, native RTL speakers showed a leftward free-viewing shift after reading LTR primes but a weaker rightward bias after reading RTL primes. This demonstrates that reading direction dynamically influences the spatial bias. However, native LTR speakers wholearnedanRTL languagelateinlife showed a leftward bias after reading either LTR or RTL primes, which suggests the role of habit formation in the production of the spatial bias. In Experiment 2, LTR bilinguals showed a slightly enhanced leftward bias after reading LTR text primes in their second language. This might contribute to the differences of native RTL and LTR speakers observed in Experiment 1. In Experiment 3, LTR bilinguals read normal (LTR, habitual reading) and mirrored left-to-right (mLTR, nonhabitual reading) texts. We observed a strong leftward bias in both cases, indicating that the bias direction is influenced by habitual reading direction and is not secondary to the actual reading direction. This is confirmed in Experiment 4, in which LTR participants were asked to follow RTL and LTR moving dots in prior image presentation and showed no change in the normal spatial bias. In conclusion, the horizontal bias is a dynamic property and is modulated by habitual reading direction. Introduction}, keywords = {}, pubstate = {published}, tppubtype = {article} } Exploration of images after stimulus onset is initially biased to the left. Here, we studied the causes of such an asymmetry and investigated effects of reading habits, text primes, and priming by systematically biased eye movements on this spatial bias in visual exploration. Bilinguals first read text primes with right- to-left (RTL) or left-to-right (LTR) reading directions and subsequently explored natural images. In Experiment 1, native RTL speakers showed a leftward free-viewing shift after reading LTR primes but a weaker rightward bias after reading RTL primes. This demonstrates that reading direction dynamically influences the spatial bias. However, native LTR speakers wholearnedanRTL languagelateinlife showed a leftward bias after reading either LTR or RTL primes, which suggests the role of habit formation in the production of the spatial bias. In Experiment 2, LTR bilinguals showed a slightly enhanced leftward bias after reading LTR text primes in their second language. This might contribute to the differences of native RTL and LTR speakers observed in Experiment 1. In Experiment 3, LTR bilinguals read normal (LTR, habitual reading) and mirrored left-to-right (mLTR, nonhabitual reading) texts. We observed a strong leftward bias in both cases, indicating that the bias direction is influenced by habitual reading direction and is not secondary to the actual reading direction. This is confirmed in Experiment 4, in which LTR participants were asked to follow RTL and LTR moving dots in prior image presentation and showed no change in the normal spatial bias. In conclusion, the horizontal bias is a dynamic property and is modulated by habitual reading direction. Introduction |
Mehmet N Ağaoğlu; Michael H Herzog; Haluk Öğmen Field-like interactions between motion-based reference frames Journal Article Attention, Perception, and Psychophysics, 77 (6), pp. 2082–2097, 2015. @article{Agaoglu2015, title = {Field-like interactions between motion-based reference frames}, author = {Mehmet N Ağaoğlu and Michael H Herzog and Haluk Öğmen}, doi = {10.3758/s13414-015-0890-9}, year = {2015}, date = {2015-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {77}, number = {6}, pages = {2082--2097}, abstract = {A reference frame is required to specify how motion is perceived. For example, the motion ofpart ofan object is usually perceived relative to the motion of the object itself. Johansson (Psychological Research, 38,379–393, 1976)pro-posed that the perceptual system carries out a vector decomposition, which rewsults in common and relative motion percepts. Because vector decomposition is an ill-posed problem, several studies have introduced constraints by means ofwhich the number of solutions can be substantially reduced. Here, we have adopted an alternative approach and studied how, rather than why, a subset ofsolutions is selected by the visual system. We propose that each retinotopic motion vector creates a reference-frame field in the retinotopic space, and that the fields created by different motion vectors interact in order to determine a motion vector that will serve as the reference frame at a given point and time in space. To test this theory, we performed a set ofpsychophysical experiments. The field-like influence of motion-based reference frames was manifested by increased nonspatiotopic percepts of the backward motion of a target square with decreasing distance from a drifting grating. We then sought to determine whether these field-like effects ofmotion-based reference frames can also be extended to stationary landmarks. The results suggest that reference-field interactions occur only between motion-generated fields. Finally, we investigated whether and how different reference fields interact with each other, and found that different reference-field interactions are nonlinear and depend on how the motion vectors are grouped. These findings are discussed from the perspective ofthe reference-frame metric field (RFMF) theory, according to which perceptual grouping operations play a central and essential role in determining the prevailing reference frames.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A reference frame is required to specify how motion is perceived. For example, the motion ofpart ofan object is usually perceived relative to the motion of the object itself. Johansson (Psychological Research, 38,379–393, 1976)pro-posed that the perceptual system carries out a vector decomposition, which rewsults in common and relative motion percepts. Because vector decomposition is an ill-posed problem, several studies have introduced constraints by means ofwhich the number of solutions can be substantially reduced. Here, we have adopted an alternative approach and studied how, rather than why, a subset ofsolutions is selected by the visual system. We propose that each retinotopic motion vector creates a reference-frame field in the retinotopic space, and that the fields created by different motion vectors interact in order to determine a motion vector that will serve as the reference frame at a given point and time in space. To test this theory, we performed a set ofpsychophysical experiments. The field-like influence of motion-based reference frames was manifested by increased nonspatiotopic percepts of the backward motion of a target square with decreasing distance from a drifting grating. We then sought to determine whether these field-like effects ofmotion-based reference frames can also be extended to stationary landmarks. The results suggest that reference-field interactions occur only between motion-generated fields. Finally, we investigated whether and how different reference fields interact with each other, and found that different reference-field interactions are nonlinear and depend on how the motion vectors are grouped. These findings are discussed from the perspective ofthe reference-frame metric field (RFMF) theory, according to which perceptual grouping operations play a central and essential role in determining the prevailing reference frames. |
Mehmet N Ağaoğlu; Michael H Herzog; Haluk Öğmen The effective reference frame in perceptual judgments of motion direction Journal Article Vision Research, 107 , pp. 101–112, 2015. @article{Agaoglu2015a, title = {The effective reference frame in perceptual judgments of motion direction}, author = {Mehmet N Ağaoğlu and Michael H Herzog and Haluk Öğmen}, doi = {10.1016/j.visres.2014.12.009}, year = {2015}, date = {2015-01-01}, journal = {Vision Research}, volume = {107}, pages = {101--112}, abstract = {The retinotopic projection of stimulus motion depends both on the motion of the stimulus and the movements of the observer. In this study, we aimed to quantify the contributions of endogenous (retinotopic) and exogenous (spatiotopic and motion-based) reference frames on judgments of motion direction. We used a variant of the induced motion paradigm and we created different experimental conditions in which the predictions of each reference frame were different. Finally, assuming additive contributions from different reference frames, we used a linear model to account for the data. Our results suggest that the effective reference frame for motion perception emerges from an amalgamation of motion-based, retinotopic and spatiotopic reference frames. In determining the percept, the influence of relative motion, defined by a motion-based reference frame, dominates those of retinotopic and spatiotopic motions within a finite region. We interpret these findings within the context of the Reference Frame Metric Field (RFMF) theory, which states that local motion vectors might have perceptual reference-frame fields associated with them, and interactions between these fields determine the selection of the effective reference frame.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The retinotopic projection of stimulus motion depends both on the motion of the stimulus and the movements of the observer. In this study, we aimed to quantify the contributions of endogenous (retinotopic) and exogenous (spatiotopic and motion-based) reference frames on judgments of motion direction. We used a variant of the induced motion paradigm and we created different experimental conditions in which the predictions of each reference frame were different. Finally, assuming additive contributions from different reference frames, we used a linear model to account for the data. Our results suggest that the effective reference frame for motion perception emerges from an amalgamation of motion-based, retinotopic and spatiotopic reference frames. In determining the percept, the influence of relative motion, defined by a motion-based reference frame, dominates those of retinotopic and spatiotopic motions within a finite region. We interpret these findings within the context of the Reference Frame Metric Field (RFMF) theory, which states that local motion vectors might have perceptual reference-frame fields associated with them, and interactions between these fields determine the selection of the effective reference frame. |
Mehmet N Ağaoğlu; Susana T L Chung Can (should) theories of crowding be unified? Journal Article Journal of Vision, 16 (15), pp. 1–22, 2016. @article{Agaoglu2016, title = {Can (should) theories of crowding be unified?}, author = {Mehmet N Ağaoğlu and Susana T L Chung}, doi = {10.1167/16.15.10}, year = {2016}, date = {2016-01-01}, journal = {Journal of Vision}, volume = {16}, number = {15}, pages = {1--22}, abstract = {Objects in clutter are difficult to recognize, a phenomenon known as crowding. There is little consensus on the underlying mechanisms of crowding, and a large number of models have been proposed. There have also been attempts at unifying the explanations of crowding under a single model, such as the weighted feature model of Harrison and Bex (2015) and the texture synthesis model of Rosenholtz and colleagues (Balas, Nakano, & Rosenholtz, 2009; Keshvari & Rosenholtz, 2016). The goal of this work was to test various models of crowding and to assess whether a unifying account can be developed. Adopting Harrison and Bex's (2015) experimental paradigm, we asked observers to report the orientation of two concentric C-stimuli. Contrary to the predictions of their model, observers' recognition accuracy was worse for the inner C-stimulus. In addition, we demonstrated that the stimulus paradigm used by Harrison and Bex has a crucial confounding factor, eccentricity, which limits its usage to a very narrow range of stimulus parameters. Nevertheless, reporting the orientations of both C-stimuli in this paradigm proved very useful in pitting different crowding models against each other. Specifically, we tested deterministic and probabilistic versions of averaging, substitution, and attentional resolution models as well as the texture synthesis model. None of the models alone was able to explain the entire set of data. Based on these findings, we discuss whether the explanations of crowding can (should) be unified.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objects in clutter are difficult to recognize, a phenomenon known as crowding. There is little consensus on the underlying mechanisms of crowding, and a large number of models have been proposed. There have also been attempts at unifying the explanations of crowding under a single model, such as the weighted feature model of Harrison and Bex (2015) and the texture synthesis model of Rosenholtz and colleagues (Balas, Nakano, & Rosenholtz, 2009; Keshvari & Rosenholtz, 2016). The goal of this work was to test various models of crowding and to assess whether a unifying account can be developed. Adopting Harrison and Bex's (2015) experimental paradigm, we asked observers to report the orientation of two concentric C-stimuli. Contrary to the predictions of their model, observers' recognition accuracy was worse for the inner C-stimulus. In addition, we demonstrated that the stimulus paradigm used by Harrison and Bex has a crucial confounding factor, eccentricity, which limits its usage to a very narrow range of stimulus parameters. Nevertheless, reporting the orientations of both C-stimuli in this paradigm proved very useful in pitting different crowding models against each other. Specifically, we tested deterministic and probabilistic versions of averaging, substitution, and attentional resolution models as well as the texture synthesis model. None of the models alone was able to explain the entire set of data. Based on these findings, we discuss whether the explanations of crowding can (should) be unified. |
Mehmet N Ağaoğlu; Aaron M Clarke; Michael H Herzog; Haluk Ögmen Motion-based nearest vector metric for reference frame selection in the perception of motion Journal Article Journal of Vision, 16 (7), pp. 1–16, 2016. @article{Agaoglu2016a, title = {Motion-based nearest vector metric for reference frame selection in the perception of motion}, author = {Mehmet N Ağaoğlu and Aaron M Clarke and Michael H Herzog and Haluk Ögmen}, doi = {10.1167/16.7.14}, year = {2016}, date = {2016-01-01}, journal = {Journal of Vision}, volume = {16}, number = {7}, pages = {1--16}, abstract = {We investigated how the visual system selects a reference frame for the perception of motion. Two concentric arcs underwent circular motion around the center of the display, where observers fixated. The outer (target) arc's angular velocity profile was modulated by a sine wave midflight whereas the inner (reference) arc moved at a constant angular speed. The task was to report whether the target reversed its direction of motion at any point during its motion. We investigated the effects of spatial and figural factors by systematically varying the radial and angular distances between the arcs, and their relative sizes. We found that the effectiveness of the reference frame decreases with increasing radial- and angular-distance measures. Drastic changes in the relative sizes of the arcs did not influence motion reversal thresholds, suggesting no influence of stimulus form on perceived motion. We also investigated the effect of common velocity by introducing velocity fluctuations to the reference arc as well. We found no effect of whether or not a reference frame has a constant motion. We examined several form- and motion-based metrics, which could potentially unify our findings. We found that a motion-based nearest vector metric can fully account for all the data reported here. These findings suggest that the selection of reference frames for motion processing does not result from a winner-take-all process, but instead, can be explained by a field whose strength decreases with the distance between the nearest motion vectors regardless of the form of the moving objects.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We investigated how the visual system selects a reference frame for the perception of motion. Two concentric arcs underwent circular motion around the center of the display, where observers fixated. The outer (target) arc's angular velocity profile was modulated by a sine wave midflight whereas the inner (reference) arc moved at a constant angular speed. The task was to report whether the target reversed its direction of motion at any point during its motion. We investigated the effects of spatial and figural factors by systematically varying the radial and angular distances between the arcs, and their relative sizes. We found that the effectiveness of the reference frame decreases with increasing radial- and angular-distance measures. Drastic changes in the relative sizes of the arcs did not influence motion reversal thresholds, suggesting no influence of stimulus form on perceived motion. We also investigated the effect of common velocity by introducing velocity fluctuations to the reference arc as well. We found no effect of whether or not a reference frame has a constant motion. We examined several form- and motion-based metrics, which could potentially unify our findings. We found that a motion-based nearest vector metric can fully account for all the data reported here. These findings suggest that the selection of reference frames for motion processing does not result from a winner-take-all process, but instead, can be explained by a field whose strength decreases with the distance between the nearest motion vectors regardless of the form of the moving objects. |
Mehmet N Ağaoğlu; Haluk Öğmen; Susana T L Chung Unmasking saccadic uncrowding Journal Article Vision Research, 127 , pp. 152–164, 2016. @article{Agaoglu2016b, title = {Unmasking saccadic uncrowding}, author = {Mehmet N Ağaoğlu and Haluk Öğmen and Susana T L Chung}, doi = {10.1016/j.visres.2016.08.003}, year = {2016}, date = {2016-01-01}, journal = {Vision Research}, volume = {127}, pages = {152--164}, abstract = {Stimuli that are briefly presented around the time of saccades are often perceived with spatiotemporal distortions. These distortions do not always have deleterious effects on the visibility and identification of a stimulus. Recent studies reported that when a stimulus is the target of an intended saccade, it is released from both masking and crowding. Here, we investigated pre-saccadic changes in single and crowded letter recognition performance in the absence (Experiment 1) and the presence (Experiment 2) of backward masks to determine the extent to which saccadic “uncrowding” and “unmasking” mechanisms are similar. Our results show that pre-saccadic improvements in letter recognition performance are mostly due to the presence of masks and/or stimulus transients which occur after the target is presented. More importantly, we did not find any decrease in crowding strength before impending saccades. A simplified version of a dual-channel neural model, originally proposed to explain masking phenomena, with several saccadic add-on mechanisms, could account for our results in Experiment 1. However, this model falls short in explaining how saccades drastically reduced the effect of backward masking (Experiment 2). The addition of a remapping mechanism that alters the relative spatial positions of stimuli was needed to fully account for the improvements observed when backward masks followed the letter stimuli. Taken together, our results (i) are inconsistent with saccadic uncrowding, (ii) strongly support saccadic unmasking, and (iii) suggest that pre-saccadic letter recognition is modulated by multiple perisaccadic mechanisms with different time courses.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Stimuli that are briefly presented around the time of saccades are often perceived with spatiotemporal distortions. These distortions do not always have deleterious effects on the visibility and identification of a stimulus. Recent studies reported that when a stimulus is the target of an intended saccade, it is released from both masking and crowding. Here, we investigated pre-saccadic changes in single and crowded letter recognition performance in the absence (Experiment 1) and the presence (Experiment 2) of backward masks to determine the extent to which saccadic “uncrowding” and “unmasking” mechanisms are similar. Our results show that pre-saccadic improvements in letter recognition performance are mostly due to the presence of masks and/or stimulus transients which occur after the target is presented. More importantly, we did not find any decrease in crowding strength before impending saccades. A simplified version of a dual-channel neural model, originally proposed to explain masking phenomena, with several saccadic add-on mechanisms, could account for our results in Experiment 1. However, this model falls short in explaining how saccades drastically reduced the effect of backward masking (Experiment 2). The addition of a remapping mechanism that alters the relative spatial positions of stimuli was needed to fully account for the improvements observed when backward masks followed the letter stimuli. Taken together, our results (i) are inconsistent with saccadic uncrowding, (ii) strongly support saccadic unmasking, and (iii) suggest that pre-saccadic letter recognition is modulated by multiple perisaccadic mechanisms with different time courses. |
Mehmet N Ağaoğlu; Susana T L Chung Interaction between stimulus contrast and pre-saccadic crowding Journal Article Royal Society Open Science, 4 (2), pp. 1–17, 2017. @article{Agaoglu2017, title = {Interaction between stimulus contrast and pre-saccadic crowding}, author = {Mehmet N Ağaoğlu and Susana T L Chung}, doi = {10.1098/rsos.160559}, year = {2017}, date = {2017-01-01}, journal = {Royal Society Open Science}, volume = {4}, number = {2}, pages = {1--17}, abstract = {Objects that are briefly flashed around the time of saccades are mislocalized. Previously, robust interactions between saccadic perceptual distortions and stimulus contrast have been reported. It is also known that crowding depends on the contrast of the target and flankers. Here, we investigated how stimulus contrast and crowding interact with pre-saccadic perception. We asked observers to report the orientation of a tilted Gabor presented in the periphery, with or without four flanking vertically oriented Gabors. Observers performed the task either following a saccade or while maintaining fixation. Contrasts of the target and flankers were independently set to either high or low, with equal probability. In both the fixation and saccade conditions, the flanked conditions resulted in worse discrimination performance—the crowding effect. In the unflanked saccade trials, performance significantly decreased with target-to-saccade onset for low-contrast targets but not for high-contrast targets. In the presence of flankers, impending saccades reduced performance only for low-contrast, but not for high-contrast flankers. Interestingly, average performance in the fixation and saccade conditions was mostly similar in all contrast conditions. Moreover, the magnitude of crowding was influenced by saccades only when the target had high contrast and the flankers had low contrasts. Overall, our results are consistent with modulation of perisaccadic spatial localization by contrast and saccadic suppression, but at odds with a recent report of pre-saccadic release of crowding.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objects that are briefly flashed around the time of saccades are mislocalized. Previously, robust interactions between saccadic perceptual distortions and stimulus contrast have been reported. It is also known that crowding depends on the contrast of the target and flankers. Here, we investigated how stimulus contrast and crowding interact with pre-saccadic perception. We asked observers to report the orientation of a tilted Gabor presented in the periphery, with or without four flanking vertically oriented Gabors. Observers performed the task either following a saccade or while maintaining fixation. Contrasts of the target and flankers were independently set to either high or low, with equal probability. In both the fixation and saccade conditions, the flanked conditions resulted in worse discrimination performance—the crowding effect. In the unflanked saccade trials, performance significantly decreased with target-to-saccade onset for low-contrast targets but not for high-contrast targets. In the presence of flankers, impending saccades reduced performance only for low-contrast, but not for high-contrast flankers. Interestingly, average performance in the fixation and saccade conditions was mostly similar in all contrast conditions. Moreover, the magnitude of crowding was influenced by saccades only when the target had high contrast and the flankers had low contrasts. Overall, our results are consistent with modulation of perisaccadic spatial localization by contrast and saccadic suppression, but at odds with a recent report of pre-saccadic release of crowding. |
Stephen J Agauas; Laura E Thomas Change detection for real-world objects in perihand space Journal Article Attention, Perception, and Psychophysics, 81 (7), pp. 2365–2383, 2019. @article{Agauas2019, title = {Change detection for real-world objects in perihand space}, author = {Stephen J Agauas and Laura E Thomas}, doi = {10.3758/s13414-019-01820-6}, year = {2019}, date = {2019-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {81}, number = {7}, pages = {2365--2383}, publisher = {Attention, Perception, & Psychophysics}, abstract = {Recent evidence has demonstrated that observers experience visual-processing biases in perihand space that may be tied to the hands' relevance for grasping actions. Our previous work suggested that when the hands are positioned to afford a power-grasp action, observers show increased temporal sensitivity that could aid with fast and forceful action, whereas when the hands are instead at the ready to perform a precision-grasp action, observers show enhanced spatial sensitivity that benefits delicate and detail-oriented actions. In the present investigation we seek to extend these previous findings by examining how object affordances may interact with hand positioning to shape visual biases in perihand space. Across three experiments, we examined how long participants took to perform a change detection task on photos of real objects, while we manipulated hand position (near/far from display), grasp posture (power/precision), and change type (orientation/identity). Participants viewed objects that afforded either a power grasp or a precision grasp, or were ungraspable. Although we were unable to uncover evidence of altered vision in perihand space in our first experiment, mirroring previous findings, in Experiments 2 and 3 our participants showed grasp-dependent biases near the hands when detecting changes to target objects that afforded a power grasp. Interestingly, ungraspable target objects were not subject to the same perihand space biases. Taken together, our results suggest that the influence of hand position on change detection performance is mediated not only by the hands' grasp posture, but also by a target object's affordances for grasping.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent evidence has demonstrated that observers experience visual-processing biases in perihand space that may be tied to the hands' relevance for grasping actions. Our previous work suggested that when the hands are positioned to afford a power-grasp action, observers show increased temporal sensitivity that could aid with fast and forceful action, whereas when the hands are instead at the ready to perform a precision-grasp action, observers show enhanced spatial sensitivity that benefits delicate and detail-oriented actions. In the present investigation we seek to extend these previous findings by examining how object affordances may interact with hand positioning to shape visual biases in perihand space. Across three experiments, we examined how long participants took to perform a change detection task on photos of real objects, while we manipulated hand position (near/far from display), grasp posture (power/precision), and change type (orientation/identity). Participants viewed objects that afforded either a power grasp or a precision grasp, or were ungraspable. Although we were unable to uncover evidence of altered vision in perihand space in our first experiment, mirroring previous findings, in Experiments 2 and 3 our participants showed grasp-dependent biases near the hands when detecting changes to target objects that afforded a power grasp. Interestingly, ungraspable target objects were not subject to the same perihand space biases. Taken together, our results suggest that the influence of hand position on change detection performance is mediated not only by the hands' grasp posture, but also by a target object's affordances for grasping. |
Elena Aggius-Vella; Monica Gori; Silvia Animali; Claudio Campus; Paola Binda Non-spatial skills differ in the front and rear peri-personal space Journal Article Neuropsychologia, 147 , pp. 1–8, 2020. @article{AggiusVella2020, title = {Non-spatial skills differ in the front and rear peri-personal space}, author = {Elena Aggius-Vella and Monica Gori and Silvia Animali and Claudio Campus and Paola Binda}, doi = {10.1016/j.neuropsychologia.2020.107619}, year = {2020}, date = {2020-01-01}, journal = {Neuropsychologia}, volume = {147}, pages = {1--8}, publisher = {Elsevier Ltd}, abstract = {In measuring behavioural and pupillary responses to auditory oddball stimuli delivered in the front and rear peri-personal space, we find that pupils dilate in response to rare stimuli, both target and distracters. Dilation in response to targets is stronger than the response to distracters, implying a task relevance effect on pupil responses. Crucially, pupil dilation in response to targets is also selectively modulated by the location of sound sources: stronger in the front than in the rear peri-personal space, in spite of matching behavioural performance. This supports the concept that even non-spatial skills, such as the ability to alert in response to behaviourally relevant events, are differentially engaged across subregions of the peri-personal space.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In measuring behavioural and pupillary responses to auditory oddball stimuli delivered in the front and rear peri-personal space, we find that pupils dilate in response to rare stimuli, both target and distracters. Dilation in response to targets is stronger than the response to distracters, implying a task relevance effect on pupil responses. Crucially, pupil dilation in response to targets is also selectively modulated by the location of sound sources: stronger in the front than in the rear peri-personal space, in spite of matching behavioural performance. This supports the concept that even non-spatial skills, such as the ability to alert in response to behaviourally relevant events, are differentially engaged across subregions of the peri-personal space. |
Dimitris Agrafiotis; Nishan Canagarajah; David R Bull; Matthew Dye Perceptually optimised sign language video coding based on eye tracking analysis Journal Article Electronics Letters, 39 , pp. 1–2, 2003. @article{Agrafiotis2003, title = {Perceptually optimised sign language video coding based on eye tracking analysis}, author = {Dimitris Agrafiotis and Nishan Canagarajah and David R Bull and Matthew Dye}, doi = {10.1049/el}, year = {2003}, date = {2003-01-01}, journal = {Electronics Letters}, volume = {39}, pages = {1--2}, abstract = {A perceptually optimised approach to sign language video coding is presented. The proposed approach is based on the results (included) of an eye tracking study in the visual attention of sign language viewers. Results show reductions in bit rate of over 30% with very good subjective quality.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A perceptually optimised approach to sign language video coding is presented. The proposed approach is based on the results (included) of an eye tracking study in the visual attention of sign language viewers. Results show reductions in bit rate of over 30% with very good subjective quality. |
D Agrafiotis; S J C Davies; N Canagarajah; D R Bull Towards efficient context-specific video coding based on gaze-tracking analysis Journal Article ACM Transactions on Multimedia Computing, Communications and Applications, 3 (4), pp. 1–15, 2007. @article{Agrafiotis2007, title = {Towards efficient context-specific video coding based on gaze-tracking analysis}, author = {D Agrafiotis and S J C Davies and N Canagarajah and D R Bull}, doi = {10.1145/1314303.1314307}, year = {2007}, date = {2007-01-01}, journal = {ACM Transactions on Multimedia Computing, Communications and Applications}, volume = {3}, number = {4}, pages = {1--15}, abstract = {This article discusses a framework for model-based, context-dependent video coding based on exploitation of characteristics of the human visual system. The system utilizes variable-quality coding based on priority maps which are created using mostly context-dependent rules. The technique is demonstrated through two case studies of specific video context, namely open signed content and football sequences. Eye-tracking analysis is employed for identifying the characteristics of each context, which are subsequently exploited for coding purposes, either directly or through a gaze prediction model. The framework is shown to achieve a considerable improvement in coding efficiency.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This article discusses a framework for model-based, context-dependent video coding based on exploitation of characteristics of the human visual system. The system utilizes variable-quality coding based on priority maps which are created using mostly context-dependent rules. The technique is demonstrated through two case studies of specific video context, namely open signed content and football sequences. Eye-tracking analysis is employed for identifying the characteristics of each context, which are subsequently exploited for coding purposes, either directly or through a gaze prediction model. The framework is shown to achieve a considerable improvement in coding efficiency. |
Ioannis Agtzidis; Inga Meyhöfer; Michael Dorr; Rebekka Lencer Following Forrest Gump: Smooth pursuit related brain activation during free movie viewing Journal Article NeuroImage, 216 , pp. 1–11, 2020. @article{Agtzidis2020, title = {Following Forrest Gump: Smooth pursuit related brain activation during free movie viewing}, author = {Ioannis Agtzidis and Inga Meyhöfer and Michael Dorr and Rebekka Lencer}, doi = {10.1016/j.neuroimage.2019.116491}, year = {2020}, date = {2020-01-01}, journal = {NeuroImage}, volume = {216}, pages = {1--11}, abstract = {Most fMRI studies investigating smooth pursuit (SP) related brain activity have used simple synthetic stimuli such as a sinusoidally moving dot. However, real-life situations are much more complex and SP does not occur in isolation but within sequences of saccades and fixations. This raises the question whether the same brain networks for SP that have been identified under laboratory conditions are activated when following moving objects in a movie. Here, we used the publicly available studyforrest data set that provides eye movement recordings along with 3 T fMRI recordings from 15 subjects while watching the Hollywood movie “Forrest Gump”. All three major eye movement events, namely fixations, saccades, and smooth pursuit, were detected with a state-of-the-art algorithm. In our analysis, smooth pursuit (SP) was the eye movement of interest, while saccades were acting as the steady state of viewing behaviour due to their lower variability. For the fMRI analysis we used an event-related design modelling saccades and SP as regressors initially. Because of the interdependency of SP and content motion, we then added a new low-level content motion regressor to separate brain activations from these two sources. We identified higher BOLD-responses during SP than saccades bilaterally in MT+/V5, in middle cingulate extending to precuneus, and in the right temporoparietal junction. When the motion regressor was added, SP showed higher BOLD-response relative to saccades bilaterally in the cortex lining the superior temporal sulcus, precuneus, and supplementary eye field, presumably due to a confounding effect of background motion. Only parts of V2 showed higher activation during saccades in comparison to SP. Taken together, our approach should be regarded as proof of principle for deciphering brain activity related to SP, which is one of the most prominent eye movements besides saccades, in complex dynamic naturalistic situations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most fMRI studies investigating smooth pursuit (SP) related brain activity have used simple synthetic stimuli such as a sinusoidally moving dot. However, real-life situations are much more complex and SP does not occur in isolation but within sequences of saccades and fixations. This raises the question whether the same brain networks for SP that have been identified under laboratory conditions are activated when following moving objects in a movie. Here, we used the publicly available studyforrest data set that provides eye movement recordings along with 3 T fMRI recordings from 15 subjects while watching the Hollywood movie “Forrest Gump”. All three major eye movement events, namely fixations, saccades, and smooth pursuit, were detected with a state-of-the-art algorithm. In our analysis, smooth pursuit (SP) was the eye movement of interest, while saccades were acting as the steady state of viewing behaviour due to their lower variability. For the fMRI analysis we used an event-related design modelling saccades and SP as regressors initially. Because of the interdependency of SP and content motion, we then added a new low-level content motion regressor to separate brain activations from these two sources. We identified higher BOLD-responses during SP than saccades bilaterally in MT+/V5, in middle cingulate extending to precuneus, and in the right temporoparietal junction. When the motion regressor was added, SP showed higher BOLD-response relative to saccades bilaterally in the cortex lining the superior temporal sulcus, precuneus, and supplementary eye field, presumably due to a confounding effect of background motion. Only parts of V2 showed higher activation during saccades in comparison to SP. Taken together, our approach should be regarded as proof of principle for deciphering brain activity related to SP, which is one of the most prominent eye movements besides saccades, in complex dynamic naturalistic situations. |
Luis Aguado; Karisa B Parkington; Teresa Dieguez-Risco; José A Hinojosa; Roxane J Itier Joint modulation of facial expression processing by contextual congruency and task demands Journal Article Brain Sciences, 9 , pp. 1–20, 2019. @article{Aguado2019, title = {Joint modulation of facial expression processing by contextual congruency and task demands}, author = {Luis Aguado and Karisa B Parkington and Teresa Dieguez-Risco and José A Hinojosa and Roxane J Itier}, doi = {10.3390/brainsci9050116}, year = {2019}, date = {2019-01-01}, journal = {Brain Sciences}, volume = {9}, pages = {1--20}, abstract = {Faces showing expressions of happiness or anger were presented together with sentences that described happiness-inducing or anger-inducing situations. Two main variables were manipulated: (i) congruency between contexts and expressions (congruent/incongruent) and (ii) the task assigned to the participant, discriminating the emotion shown by the target face (emotion task) or judging whether the expression shown by the face was congruent or not with the context (congruency task). Behavioral and electrophysiological results (event-related potentials (ERP)) showed that processing facial expressions was jointly influenced by congruency and task demands. ERP results revealed task effects at frontal sites, with larger positive amplitudes between 250–450 ms in the congruency task, reflecting the higher cognitive effort required by this task. Effects of congruency appeared at latencies and locations corresponding to the early posterior negativity (EPN) and late positive potential (LPP) components that have previously been found to be sensitive to emotion and affective congruency. The magnitude and spatial distribution of the congruency effects varied depending on the task and the target expression. These results are discussed in terms of the modulatory role of context on facial expression processing and the different mechanisms underlying the processing of expressions of positive and negative emotions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Faces showing expressions of happiness or anger were presented together with sentences that described happiness-inducing or anger-inducing situations. Two main variables were manipulated: (i) congruency between contexts and expressions (congruent/incongruent) and (ii) the task assigned to the participant, discriminating the emotion shown by the target face (emotion task) or judging whether the expression shown by the face was congruent or not with the context (congruency task). Behavioral and electrophysiological results (event-related potentials (ERP)) showed that processing facial expressions was jointly influenced by congruency and task demands. ERP results revealed task effects at frontal sites, with larger positive amplitudes between 250–450 ms in the congruency task, reflecting the higher cognitive effort required by this task. Effects of congruency appeared at latencies and locations corresponding to the early posterior negativity (EPN) and late positive potential (LPP) components that have previously been found to be sensitive to emotion and affective congruency. The magnitude and spatial distribution of the congruency effects varied depending on the task and the target expression. These results are discussed in terms of the modulatory role of context on facial expression processing and the different mechanisms underlying the processing of expressions of positive and negative emotions. |
Carlos Aguilar; Eric Castet Gaze-contingent simulation of retinopathy: Some potential pitfalls and remedies Journal Article Vision Research, 51 (9), pp. 997–1012, 2011. @article{Aguilar2011, title = {Gaze-contingent simulation of retinopathy: Some potential pitfalls and remedies}, author = {Carlos Aguilar and Eric Castet}, doi = {10.1016/j.visres.2011.02.010}, year = {2011}, date = {2011-01-01}, journal = {Vision Research}, volume = {51}, number = {9}, pages = {997--1012}, publisher = {Elsevier Ltd}, abstract = {Many important results in visual neuroscience rely on the use of gaze-contingent retinal stabilization techniques. Our work focuses on the important fraction of these studies that is concerned with the retinal stabilization of visual filters that degrade some specific portions of the visual field. For instance, macular scotomas, often induced by age related macular degeneration, can be simulated by continuously displaying a gaze-contingent mask in the center of the visual field. The gaze-contingent rules used in most of these studies imply only a very minimal processing of ocular data. By analyzing the relationship between gaze and scotoma locations for different oculo-motor patterns, we show that such a minimal processing might have adverse perceptual and oculomotor consequences due mainly to two potential problems: (a) a transient blink-induced motion of the scotoma while gaze is static, and (b) the intrusion of post-saccadic slow eye movements. We have developed new gaze-contingent rules to solve these two problems. We have also suggested simple ways of tackling two unrecognized problems that are a potential source of mismatch between gaze and scotoma locations. Overall, the present work should help design, describe and test the paradigms used to simulate retinopathy with gaze-contingent displays.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Many important results in visual neuroscience rely on the use of gaze-contingent retinal stabilization techniques. Our work focuses on the important fraction of these studies that is concerned with the retinal stabilization of visual filters that degrade some specific portions of the visual field. For instance, macular scotomas, often induced by age related macular degeneration, can be simulated by continuously displaying a gaze-contingent mask in the center of the visual field. The gaze-contingent rules used in most of these studies imply only a very minimal processing of ocular data. By analyzing the relationship between gaze and scotoma locations for different oculo-motor patterns, we show that such a minimal processing might have adverse perceptual and oculomotor consequences due mainly to two potential problems: (a) a transient blink-induced motion of the scotoma while gaze is static, and (b) the intrusion of post-saccadic slow eye movements. We have developed new gaze-contingent rules to solve these two problems. We have also suggested simple ways of tackling two unrecognized problems that are a potential source of mismatch between gaze and scotoma locations. Overall, the present work should help design, describe and test the paradigms used to simulate retinopathy with gaze-contingent displays. |
Stephanie Ahken; Gilles Comeau; Sylvie Hébert; Ramesh Balasubramaniam Eye movement patterns during the processing of musical and linguistic syntactic incongruities. Journal Article Psychomusicology: Music, Mind, and Brain, 22 (1), pp. 18–25, 2012. @article{Ahken2012, title = {Eye movement patterns during the processing of musical and linguistic syntactic incongruities.}, author = {Stephanie Ahken and Gilles Comeau and Sylvie Hébert and Ramesh Balasubramaniam}, doi = {10.1037/a0026751}, year = {2012}, date = {2012-01-01}, journal = {Psychomusicology: Music, Mind, and Brain}, volume = {22}, number = {1}, pages = {18--25}, abstract = {It has been suggested that music and language share syntax-supporting brain mechanisms. Consequently, violations of syntax in either domain may have similar effects. The present study examined the effects of syntactic incongruities on eye movements and reading time in both music and language domains. In the music notation condition, the syntactic incongruities violated the prevailing musical tonality (i.e., the last bar of the incongruent sequence was a nontonic chord or nontonic note in the given key). In the linguistic condition, syntactic incongruities violated the expected grammatical structure (i.e., sentences with anomalies carrying the progressive –ing affix or the past tense inflection). Eighteen pianists were asked to sight-read and play musical phrases (music condition) and read sentences aloud (linguistic condition). Syntactic incongruities in both domains were associated with an increase in the mean proportion and duration of fixations in the target region of interest, as well as longer reading duration. The results are consistent with the growing evidence of a shared network of neural structures for syntactic processing, while not ruling out the possibility of independent networks for each domain.}, keywords = {}, pubstate = {published}, tppubtype = {article} } It has been suggested that music and language share syntax-supporting brain mechanisms. Consequently, violations of syntax in either domain may have similar effects. The present study examined the effects of syntactic incongruities on eye movements and reading time in both music and language domains. In the music notation condition, the syntactic incongruities violated the prevailing musical tonality (i.e., the last bar of the incongruent sequence was a nontonic chord or nontonic note in the given key). In the linguistic condition, syntactic incongruities violated the expected grammatical structure (i.e., sentences with anomalies carrying the progressive –ing affix or the past tense inflection). Eighteen pianists were asked to sight-read and play musical phrases (music condition) and read sentences aloud (linguistic condition). Syntactic incongruities in both domains were associated with an increase in the mean proportion and duration of fixations in the target region of interest, as well as longer reading duration. The results are consistent with the growing evidence of a shared network of neural structures for syntactic processing, while not ruling out the possibility of independent networks for each domain. |
Sheeraz Ahmad; He Huang; Angela J Yu Cost-sensitive Bayesian control policy in human active sensing Journal Article Frontiers in Human Neuroscience, 8 (December), pp. 1–12, 2014. @article{Ahmad2014, title = {Cost-sensitive Bayesian control policy in human active sensing}, author = {Sheeraz Ahmad and He Huang and Angela J Yu}, doi = {10.3389/fnhum.2014.00955}, year = {2014}, date = {2014-01-01}, journal = {Frontiers in Human Neuroscience}, volume = {8}, number = {December}, pages = {1--12}, abstract = {An important but poorly understood aspect of sensory processing is the role of active sensing, the use of self-motion such as eye or head movements to focus sensing resources on the most rewarding or informative aspects of the sensory environment. Here, we present behavioral data from a visual search experiment, as well as a Bayesian model of within-trial dynamics of sensory processing and eye movements. Within this Bayes-optimal inference and control framework, which we call C-DAC (Context-Dependent Active Controller), various types of behavioral costs, such as temporal delay, response error, and sensor repositioning cost, are explicitly minimized. This contrasts with previously proposed algorithms that optimize abstract statistical objectives such as anticipated information gain (Infomax) (Butko and Movellan, 2010) and expected posterior maximum (greedy MAP) (Najemnik and Geisler, 2005). We find that C-DAC captures human visual search dynamics better than previous models, in particular a certain form of "confirmation bias" apparent in the way human subjects utilize prior knowledge about the spatial distribution of the search target to improve search speed and accuracy. We also examine several computationally efficient approximations to C-DAC that may present biologically more plausible accounts of the neural computations underlying active sensing, as well as practical tools for solving active sensing problems in engineering applications. To summarize, this paper makes the following key contributions: human visual search behavioral data, a context-sensitive Bayesian active sensing model, a comparative study between different models of human active sensing, and a family of efficient approximations to the optimal model.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An important but poorly understood aspect of sensory processing is the role of active sensing, the use of self-motion such as eye or head movements to focus sensing resources on the most rewarding or informative aspects of the sensory environment. Here, we present behavioral data from a visual search experiment, as well as a Bayesian model of within-trial dynamics of sensory processing and eye movements. Within this Bayes-optimal inference and control framework, which we call C-DAC (Context-Dependent Active Controller), various types of behavioral costs, such as temporal delay, response error, and sensor repositioning cost, are explicitly minimized. This contrasts with previously proposed algorithms that optimize abstract statistical objectives such as anticipated information gain (Infomax) (Butko and Movellan, 2010) and expected posterior maximum (greedy MAP) (Najemnik and Geisler, 2005). We find that C-DAC captures human visual search dynamics better than previous models, in particular a certain form of "confirmation bias" apparent in the way human subjects utilize prior knowledge about the spatial distribution of the search target to improve search speed and accuracy. We also examine several computationally efficient approximations to C-DAC that may present biologically more plausible accounts of the neural computations underlying active sensing, as well as practical tools for solving active sensing problems in engineering applications. To summarize, this paper makes the following key contributions: human visual search behavioral data, a context-sensitive Bayesian active sensing model, a comparative study between different models of human active sensing, and a family of efficient approximations to the optimal model. |
Byunghoon “Tony” Ahn; Jason M Harley Facial expressions when learning with a Queer History App: Application of the control value theory of achievement emotions Journal Article British Journal of Educational Technology, 51 (5), pp. 1563–1576, 2020. @article{Ahn2020, title = {Facial expressions when learning with a Queer History App: Application of the control value theory of achievement emotions}, author = {Byunghoon “Tony” Ahn and Jason M Harley}, doi = {10.1111/bjet.12989}, year = {2020}, date = {2020-01-01}, journal = {British Journal of Educational Technology}, volume = {51}, number = {5}, pages = {1563--1576}, abstract = {Learning analytics (LA) incorporates analyzing cognitive, social and emotional processes in learning scenarios to make informed decisions regarding instructional design and delivery. Research has highlighted important roles that emotions play in learning. We have extended this field of research by exploring the role of emotions in a relatively uncommon learning scenario: learning about queer history with a multimedia mobile app. Specifically, we used an automatic facial recognition software (FaceReader 7) to measure learners' discrete emotions and a counter-balanced multiple-choice quiz to assess learning. We also used an eye tracker (EyeLink 1000) to identify the emotions learners experienced while they read specific content, as opposed to the emotions they experienced over the course of the entire learning session. A total of 33 out of 57 of the learners' data were eligible to be analyzed. Results revealed that learners expressed more negative-activating emotions (ie, anger, anxiety) and negative-deactivating emotions (ie, sadness) than positive-activating emotions (ie, happiness). Learners with an angry emotion profile had the highest learning gains. The importance of examining typically undesirable emotions in learning, such as anger, is discussed using the control-value theory of achievement emotions. Further, this study describes a multimodal methodology to integrate behavioral trace data into learning analytics research.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Learning analytics (LA) incorporates analyzing cognitive, social and emotional processes in learning scenarios to make informed decisions regarding instructional design and delivery. Research has highlighted important roles that emotions play in learning. We have extended this field of research by exploring the role of emotions in a relatively uncommon learning scenario: learning about queer history with a multimedia mobile app. Specifically, we used an automatic facial recognition software (FaceReader 7) to measure learners' discrete emotions and a counter-balanced multiple-choice quiz to assess learning. We also used an eye tracker (EyeLink 1000) to identify the emotions learners experienced while they read specific content, as opposed to the emotions they experienced over the course of the entire learning session. A total of 33 out of 57 of the learners' data were eligible to be analyzed. Results revealed that learners expressed more negative-activating emotions (ie, anger, anxiety) and negative-deactivating emotions (ie, sadness) than positive-activating emotions (ie, happiness). Learners with an angry emotion profile had the highest learning gains. The importance of examining typically undesirable emotions in learning, such as anger, is discussed using the control-value theory of achievement emotions. Further, this study describes a multimodal methodology to integrate behavioral trace data into learning analytics research. |
Aarit Ahuja; David L Sheinberg Behavioral and oculomotor evidence for visual simulation of object movement Journal Article Journal of Vision, 19 (6), pp. 1–17, 2019. @article{Ahuja2019, title = {Behavioral and oculomotor evidence for visual simulation of object movement}, author = {Aarit Ahuja and David L Sheinberg}, doi = {10.1167/19.6.13}, year = {2019}, date = {2019-06-01}, journal = {Journal of Vision}, volume = {19}, number = {6}, pages = {1--17}, publisher = {The Association for Research in Vision and Ophthalmology}, abstract = {We regularly interact with moving objects in our environment. Yet, little is known about how we extrapolate the future movements of visually perceived objects. One possibility is that movements are experienced by a mental visual simulation, allowing one to internally picture an object's upcoming motion trajectory, even as the object itself remains stationary. Here we examined this possibility by asking human participants to make judgments about the future position of a falling ball on an obstacle-filled display. We found that properties of the ball's trajectory were highly predictive of subjects' reaction times and accuracy on the task. We also found that the eye movements subjects made while attempting to ascertain where the ball might fall had significant spatiotemporal overlap with those made while actually perceiving the ball fall. These findings suggest that subjects simulated the ball's trajectory to inform their responses. Finally, we trained a convolutional neural network to see whether this problem could be solved by simple image analysis as opposed to the more intricate simulation strategy we propose. We found that while the network was able to solve our task, the model's output did not effectively or consistently predict human behavior. This implies that subjects employed a different strategy for solving our task, and bolsters the conclusion that they were engaging in visual simulation. The current study thus provides support for visual simulation of motion as a means of understanding complex visual scenes and paves the way for future investigations of this phenomenon at a neural level.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We regularly interact with moving objects in our environment. Yet, little is known about how we extrapolate the future movements of visually perceived objects. One possibility is that movements are experienced by a mental visual simulation, allowing one to internally picture an object's upcoming motion trajectory, even as the object itself remains stationary. Here we examined this possibility by asking human participants to make judgments about the future position of a falling ball on an obstacle-filled display. We found that properties of the ball's trajectory were highly predictive of subjects' reaction times and accuracy on the task. We also found that the eye movements subjects made while attempting to ascertain where the ball might fall had significant spatiotemporal overlap with those made while actually perceiving the ball fall. These findings suggest that subjects simulated the ball's trajectory to inform their responses. Finally, we trained a convolutional neural network to see whether this problem could be solved by simple image analysis as opposed to the more intricate simulation strategy we propose. We found that while the network was able to solve our task, the model's output did not effectively or consistently predict human behavior. This implies that subjects employed a different strategy for solving our task, and bolsters the conclusion that they were engaging in visual simulation. The current study thus provides support for visual simulation of motion as a means of understanding complex visual scenes and paves the way for future investigations of this phenomenon at a neural level. |
Suzon Ajasse; Ryad B Benosman; Jean Lorenceau Effects of pupillary responses to luminance and attention on visual spatial discrimination Journal Article Journal of Vision, 18 (11), pp. 1–14, 2018. @article{Ajasse2018, title = {Effects of pupillary responses to luminance and attention on visual spatial discrimination}, author = {Suzon Ajasse and Ryad B Benosman and Jean Lorenceau}, doi = {10.1167/18.11.6}, year = {2018}, date = {2018-01-01}, journal = {Journal of Vision}, volume = {18}, number = {11}, pages = {1--14}, abstract = {The optic quality of the eyes is, at least in part, determined by pupil size. Large pupils let more light enter the eyes, but degrade the point spread function, and thus the spatial resolution that can be achieved (Campbell & Gregory, 1960). In natural conditions, the pupil is mainly driven by the luminance (and possibly the color and contrast) at the gazed location, but is also modulated by attention and cognitive factors. Whether changes in eyes' optics related to pupil size modulation by luminance and attention impacts visual processing was assessed in two experiments. In Experiment 1, we measured pupil size using a constantly visible display made of four disks with different luminance levels, with no other task than fixating the disks in succession. The results confirmed that pupil size depends on the luminance of the gazed stimulus. Experiment 2, using similar settings as Experiment 1, used a two-interval forced-choice design to test whether discriminating high spatial frequencies that requires covert attention to parafoveal stimuli is better during the fixation of bright disks that entails a small pupil size, and hence better eyes' optics, as compared to fixating dark disks that entails a large pupil size, and hence poorer eyes' optics. As in Experiment 1, we observed large modulations of pupil size depending on the luminance of the gazed stimulus, but pupil dynamics was more variable, with marked pupil dilation during stimulus encoding, presumably because the demanding spatial frequency discrimination task engaged attention. However, discrimination performance and mean pupil size were not correlated. Despite this lack of correlation, the slopes of pupil dilation during stimulus encoding were correlated to performance, while the slopes of pupil dilation during decision-making were not. We discuss these results regarding the possible functional roles of pupil size modulations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The optic quality of the eyes is, at least in part, determined by pupil size. Large pupils let more light enter the eyes, but degrade the point spread function, and thus the spatial resolution that can be achieved (Campbell & Gregory, 1960). In natural conditions, the pupil is mainly driven by the luminance (and possibly the color and contrast) at the gazed location, but is also modulated by attention and cognitive factors. Whether changes in eyes' optics related to pupil size modulation by luminance and attention impacts visual processing was assessed in two experiments. In Experiment 1, we measured pupil size using a constantly visible display made of four disks with different luminance levels, with no other task than fixating the disks in succession. The results confirmed that pupil size depends on the luminance of the gazed stimulus. Experiment 2, using similar settings as Experiment 1, used a two-interval forced-choice design to test whether discriminating high spatial frequencies that requires covert attention to parafoveal stimuli is better during the fixation of bright disks that entails a small pupil size, and hence better eyes' optics, as compared to fixating dark disks that entails a large pupil size, and hence poorer eyes' optics. As in Experiment 1, we observed large modulations of pupil size depending on the luminance of the gazed stimulus, but pupil dynamics was more variable, with marked pupil dilation during stimulus encoding, presumably because the demanding spatial frequency discrimination task engaged attention. However, discrimination performance and mean pupil size were not correlated. Despite this lack of correlation, the slopes of pupil dilation during stimulus encoding were correlated to performance, while the slopes of pupil dilation during decision-making were not. We discuss these results regarding the possible functional roles of pupil size modulations. |
Sara Ajina; Christopher Kennard; Geraint Rees; Holly Bridge Motion area V5/MT+ response to global motion in the absence of V1 resembles early visual cortex Journal Article Brain, 138 (1), pp. 164–178, 2015. @article{Ajina2015, title = {Motion area V5/MT+ response to global motion in the absence of V1 resembles early visual cortex}, author = {Sara Ajina and Christopher Kennard and Geraint Rees and Holly Bridge}, doi = {10.1093/brain/awu328}, year = {2015}, date = {2015-01-01}, journal = {Brain}, volume = {138}, number = {1}, pages = {164--178}, abstract = {Motion area V5/MT+ shows a variety of characteristic visual responses, often linked to perception, which are heavily influenced by its rich connectivity with the primary visual cortex (V1). This human motion area also receives a number of inputs from other visual regions, including direct subcortical connections and callosal connections with the contralateral hemisphere. Little is currently known about such alternative inputs to V5/MT+ and how they may drive and influence its activity. Using functional magnetic resonance imaging, the response of human V5/MT+ to increasing the proportion of coherent motion was measured in seven patients with unilateral V1 damage acquired during adulthood, and a group of healthy age-matched controls. When V1 was damaged, the typical V5/MT+ response to increasing coherence was lost. Rather, V5/MT+ in patients showed a negative trend with coherence that was similar to coherence-related activity in V1 of healthy control subjects. This shift to a response-pattern more typical of early visual cortex suggests that in the absence of V1, V5/MT+ activity may be shaped by similar direct subcortical input. This is likely to reflect intact residual pathways rather than a change in connectivity, and has important implications for blindsight function. It also confirms predictions that V1 is critically involved in normal V5/MT+ global motion processing, consistent with a convergent model of V1 input to V5/MT+. Historically, most attempts to model cortical visual responses do not consider the contribution of direct subcortical inputs that may bypass striate cortex, such as input to V5/MT+. We have shown that the signal change driven by these non-striate pathways can be measured, and suggest that models of the intact visual system may benefit from considering their contribution.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Motion area V5/MT+ shows a variety of characteristic visual responses, often linked to perception, which are heavily influenced by its rich connectivity with the primary visual cortex (V1). This human motion area also receives a number of inputs from other visual regions, including direct subcortical connections and callosal connections with the contralateral hemisphere. Little is currently known about such alternative inputs to V5/MT+ and how they may drive and influence its activity. Using functional magnetic resonance imaging, the response of human V5/MT+ to increasing the proportion of coherent motion was measured in seven patients with unilateral V1 damage acquired during adulthood, and a group of healthy age-matched controls. When V1 was damaged, the typical V5/MT+ response to increasing coherence was lost. Rather, V5/MT+ in patients showed a negative trend with coherence that was similar to coherence-related activity in V1 of healthy control subjects. This shift to a response-pattern more typical of early visual cortex suggests that in the absence of V1, V5/MT+ activity may be shaped by similar direct subcortical input. This is likely to reflect intact residual pathways rather than a change in connectivity, and has important implications for blindsight function. It also confirms predictions that V1 is critically involved in normal V5/MT+ global motion processing, consistent with a convergent model of V1 input to V5/MT+. Historically, most attempts to model cortical visual responses do not consider the contribution of direct subcortical inputs that may bypass striate cortex, such as input to V5/MT+. We have shown that the signal change driven by these non-striate pathways can be measured, and suggest that models of the intact visual system may benefit from considering their contribution. |
Sara Ajina; Holly Bridge Blindsight relies on a functional connection between hMT+ and the lateral geniculate nucleus, not the pulvinar Journal Article PLoS Biology, 16 (7), pp. e2005769, 2018. @article{Ajina2018, title = {Blindsight relies on a functional connection between hMT+ and the lateral geniculate nucleus, not the pulvinar}, author = {Sara Ajina and Holly Bridge}, doi = {10.1371/journal.pbio.2005769}, year = {2018}, date = {2018-01-01}, journal = {PLoS Biology}, volume = {16}, number = {7}, pages = {e2005769}, abstract = {When the primary visual cortex (V1) is damaged, the principal visual pathway is lost, causing a loss of vision in the opposite visual field. While conscious vision is impaired, patients can still respond to certain images; this is known as ‘blindsight'. Recently, a direct anatomical connection between the lateral geniculate nucleus (LGN) and human motion area hMT+ has been implicated in blindsight. However, a functional connection between these structures has not been demonstrated. We quantified functional MRI responses to motion in 14 patients with unilateral V1 damage (with and without blindsight). Patients with blindsight showed significant activity and a preserved sensitivity to speed in motion area hMT+, which was absent in patients without blindsight. We then compared functional connectivity between motion area hMT+ and a number of structures implicated in blindsight, including the ventral pulvinar. Only patients with blindsight showed an intact functional connection with the LGN but not the other structures, supporting a specific functional role for the LGN in blindsight.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When the primary visual cortex (V1) is damaged, the principal visual pathway is lost, causing a loss of vision in the opposite visual field. While conscious vision is impaired, patients can still respond to certain images; this is known as ‘blindsight'. Recently, a direct anatomical connection between the lateral geniculate nucleus (LGN) and human motion area hMT+ has been implicated in blindsight. However, a functional connection between these structures has not been demonstrated. We quantified functional MRI responses to motion in 14 patients with unilateral V1 damage (with and without blindsight). Patients with blindsight showed significant activity and a preserved sensitivity to speed in motion area hMT+, which was absent in patients without blindsight. We then compared functional connectivity between motion area hMT+ and a number of structures implicated in blindsight, including the ventral pulvinar. Only patients with blindsight showed an intact functional connection with the LGN but not the other structures, supporting a specific functional role for the LGN in blindsight. |
Sara Ajina; Miriam Pollard; Holly Bridge The superior colliculus and amygdala support evaluation of face trait in blindsight Journal Article Frontiers in Neurology, 11 , pp. 1–18, 2020. @article{Ajina2020, title = {The superior colliculus and amygdala support evaluation of face trait in blindsight}, author = {Sara Ajina and Miriam Pollard and Holly Bridge}, doi = {10.3389/fneur.2020.00769}, year = {2020}, date = {2020-01-01}, journal = {Frontiers in Neurology}, volume = {11}, pages = {1--18}, abstract = {Humans can respond rapidly to viewed expressions of fear, even in the absence of conscious awareness. This is demonstrated using visual masking paradigms in healthy individuals and in patients with cortical blindness due to damage to the primary visual cortex (V1) - so called affective blindsight. Humans have also been shown to implicitly process facial expressions representing important social dimensions. Two major axes, dominance and trustworthiness, are proposed to characterize the social dimensions of face evaluation. The processing of both types of implicit stimuli is believed to occur via similar subcortical pathways involving the amygdala. However, we do not know whether unconscious processing of more subtle expressions of facial traits can occur in blindsight, and if so, how. To test this, we studied 13 patients with unilateral V1 damage and visual field loss. We assessed their ability to detect and discriminate faces that had been manipulated along two orthogonal axes of trustworthiness and dominance to generate five trait levels inside the blind visual field: dominant, submissive, trustworthy, untrustworthy, and neutral. We compared neural activity and functional connectivity in patients classified as blindsight positive or negative for these stimuli. We found that dominant faces were most likely to be detected above chance, with individuals demonstrating unique interactions between performance and face trait. Only patients with blindsight (n = 8) showed significant preference in the superior colliculus and amygdala for face traits in the blind visual field, and a critical functional connection between the amygdala and superior colliculus in the damaged hemisphere. We also found a significant correlation between behavioral performance and fMRI activity in the amygdala and lateral geniculate nucleus across all participants. Our findings confirm that affective blindsight involving the superior colliculus and amygdala extends to the processing of socially salient but emotionally neutral facial expressions when V1 is damaged. This pathway is distinct from that which supports motion blindsight, as both types of blindsight can exist in the absence of the other with corresponding patterns of residual connectivity.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Humans can respond rapidly to viewed expressions of fear, even in the absence of conscious awareness. This is demonstrated using visual masking paradigms in healthy individuals and in patients with cortical blindness due to damage to the primary visual cortex (V1) - so called affective blindsight. Humans have also been shown to implicitly process facial expressions representing important social dimensions. Two major axes, dominance and trustworthiness, are proposed to characterize the social dimensions of face evaluation. The processing of both types of implicit stimuli is believed to occur via similar subcortical pathways involving the amygdala. However, we do not know whether unconscious processing of more subtle expressions of facial traits can occur in blindsight, and if so, how. To test this, we studied 13 patients with unilateral V1 damage and visual field loss. We assessed their ability to detect and discriminate faces that had been manipulated along two orthogonal axes of trustworthiness and dominance to generate five trait levels inside the blind visual field: dominant, submissive, trustworthy, untrustworthy, and neutral. We compared neural activity and functional connectivity in patients classified as blindsight positive or negative for these stimuli. We found that dominant faces were most likely to be detected above chance, with individuals demonstrating unique interactions between performance and face trait. Only patients with blindsight (n = 8) showed significant preference in the superior colliculus and amygdala for face traits in the blind visual field, and a critical functional connection between the amygdala and superior colliculus in the damaged hemisphere. We also found a significant correlation between behavioral performance and fMRI activity in the amygdala and lateral geniculate nucleus across all participants. Our findings confirm that affective blindsight involving the superior colliculus and amygdala extends to the processing of socially salient but emotionally neutral facial expressions when V1 is damaged. This pathway is distinct from that which supports motion blindsight, as both types of blindsight can exist in the absence of the other with corresponding patterns of residual connectivity. |
Başak Akdoğan; Fuat Balcı; Hedderik van Rijn Temporal expectation indexed by pupillary response Journal Article Timing & Time Perception, 4 (4), pp. 354–370, 2016. @article{Akdogan2016, title = {Temporal expectation indexed by pupillary response}, author = {Başak Akdoğan and Fuat Balcı and Hedderik van Rijn}, doi = {10.1163/22134468-00002075}, year = {2016}, date = {2016-01-01}, journal = {Timing & Time Perception}, volume = {4}, number = {4}, pages = {354--370}, abstract = {Forming temporal expectations plays an instrumental role for the optimization of behavior and allo- cation of attentional resources. Although the effects of temporal expectations on visual attention are well-established, the question of whether temporal predictions modulate the behavioral outputs of the autonomic nervous system such as the pupillary response remains unanswered. Therefore, this study aimed to obtain an online measure of pupil size while human participants were asked to dif- ferentiate between visual targets presented after varying time intervals since trial onset. Specifically, we manipulated temporal predictability in the presentation of target stimuli consisting of letters which appeared after either a short or long delay duration (1.5 vs. 3 s) in the majority of trials (75%) within different test blocks. In the remaining trials (25%), no target stimulus was present to investi- gate the trajectory of preparatory pupillary response under a low level of temporal uncertainty. The results revealed that the rate of preparatory pupillary response was contingent upon the time of target appearance such that pupils dilated at a higher rate when the targets were expected to appear after a shorter as compared to a longer delay period irrespective of target presence. The finding that pupil size can track temporal regularities and exhibit differential preparatory response between dif- ferent delay conditions points to the existence of a distributed neural network subserving temporal information processing which is crucial for cognitive functioning and goal-directed behavior.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Forming temporal expectations plays an instrumental role for the optimization of behavior and allo- cation of attentional resources. Although the effects of temporal expectations on visual attention are well-established, the question of whether temporal predictions modulate the behavioral outputs of the autonomic nervous system such as the pupillary response remains unanswered. Therefore, this study aimed to obtain an online measure of pupil size while human participants were asked to dif- ferentiate between visual targets presented after varying time intervals since trial onset. Specifically, we manipulated temporal predictability in the presentation of target stimuli consisting of letters which appeared after either a short or long delay duration (1.5 vs. 3 s) in the majority of trials (75%) within different test blocks. In the remaining trials (25%), no target stimulus was present to investi- gate the trajectory of preparatory pupillary response under a low level of temporal uncertainty. The results revealed that the rate of preparatory pupillary response was contingent upon the time of target appearance such that pupils dilated at a higher rate when the targets were expected to appear after a shorter as compared to a longer delay period irrespective of target presence. The finding that pupil size can track temporal regularities and exhibit differential preparatory response between dif- ferent delay conditions points to the existence of a distributed neural network subserving temporal information processing which is crucial for cognitive functioning and goal-directed behavior. |
Ozgur E Akman; Richard A Clement; David S Broomhead; Sabira K Mannan; Ian Moorhead; Hugh R Wilson Probing bottom-up processing with multistable images Journal Article Journal of Eye Movement Research, 1 (3), pp. 1–7, 2009. @article{Akman2009, title = {Probing bottom-up processing with multistable images}, author = {Ozgur E Akman and Richard A Clement and David S Broomhead and Sabira K Mannan and Ian Moorhead and Hugh R Wilson}, year = {2009}, date = {2009-01-01}, journal = {Journal of Eye Movement Research}, volume = {1}, number = {3}, pages = {1--7}, abstract = {The selection of fixation targets involves a combination of top-down and bottom-up processing. The role of bottom-up processing can be enhanced by using multistable stimuli because their constantly changing appearance seems to depend predominantly on stimulusdriven factors. We used this approach to investigate whether visual processing models based on V1 need to be extended to incorporate specific computations attributed to V4. Eye movements of 8 subjects were recorded during free viewing of the Marroquin pattern in which illusory circles appear and disappear. Fixations were concentrated on features arranged in concentric rings within the pattern. Comparison with simulated fixation data demonstrated that the saliency of these features can be predicted with appropriate weighting of lateral connections in existing V1 models.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The selection of fixation targets involves a combination of top-down and bottom-up processing. The role of bottom-up processing can be enhanced by using multistable stimuli because their constantly changing appearance seems to depend predominantly on stimulusdriven factors. We used this approach to investigate whether visual processing models based on V1 need to be extended to incorporate specific computations attributed to V4. Eye movements of 8 subjects were recorded during free viewing of the Marroquin pattern in which illusory circles appear and disappear. Fixations were concentrated on features arranged in concentric rings within the pattern. Comparison with simulated fixation data demonstrated that the saliency of these features can be predicted with appropriate weighting of lateral connections in existing V1 models. |
Umair Akram; Jason G Ellis; Andriy Myachykov; Nicola L Barclay Preferential attention towards the eye-region amongst individuals with insomnia Journal Article Journal of Sleep Research, 26 (1), pp. 84–91, 2017. @article{Akram2017, title = {Preferential attention towards the eye-region amongst individuals with insomnia}, author = {Umair Akram and Jason G Ellis and Andriy Myachykov and Nicola L Barclay}, doi = {10.1111/jsr.12456}, year = {2017}, date = {2017-01-01}, journal = {Journal of Sleep Research}, volume = {26}, number = {1}, pages = {84--91}, abstract = {People with insomnia often perceive their own facial appearance as more tired compared with the appearance of others. Evidence also highlights the eye-region in projecting tiredness cues to perceivers, and tiredness judgements often rely on preferential attention towards this region. Using a novel eye-tracking paradigm, this study examined: (i) whether individuals with insomnia display preferential attention towards the eyeregion, relative to nose and mouth regions, whilst observing faces compared with normal-sleepers; and (ii) whether an attentional bias towards the eye-region amongst individuals with insomnia is self-specific or general in nature. Twenty individuals with DSM-5 Insomnia Disorder and 20 normal-sleepers viewed 48 neutral facial photographs (24 of themselves, 24 of other people) for periods of 4000 ms. Eye movements were recorded using eye-tracking, and first fixation onset, first fixation duration and total gaze duration were examined for three interest-regions (eyes, nose, mouth). Significant group 9 interest-region interactions indicated that, regardless of the face presented, participants with insomnia were quicker to attend to, and spent more time observing, the eye-region relative to the nose and mouth regions compared with normal-sleepers. However, no group 9 face 9 interest-region interactions were established. Thus, whilst individuals with insomnia displayed preferential attention towards the eye-region in general, this effect was not accentuated during self-perception. Insomnia appears to be characterized by a general, rather than self-specific, attentional bias towards the eye-region. These findings contribute to our understanding of face perception in insomnia, and provide tentative support for cognitive models of insomnia demonstrating that individuals with insomnia monitor faces in general, with a specific focus around the eye-region, for cues associated with tiredness.}, keywords = {}, pubstate = {published}, tppubtype = {article} } People with insomnia often perceive their own facial appearance as more tired compared with the appearance of others. Evidence also highlights the eye-region in projecting tiredness cues to perceivers, and tiredness judgements often rely on preferential attention towards this region. Using a novel eye-tracking paradigm, this study examined: (i) whether individuals with insomnia display preferential attention towards the eyeregion, relative to nose and mouth regions, whilst observing faces compared with normal-sleepers; and (ii) whether an attentional bias towards the eye-region amongst individuals with insomnia is self-specific or general in nature. Twenty individuals with DSM-5 Insomnia Disorder and 20 normal-sleepers viewed 48 neutral facial photographs (24 of themselves, 24 of other people) for periods of 4000 ms. Eye movements were recorded using eye-tracking, and first fixation onset, first fixation duration and total gaze duration were examined for three interest-regions (eyes, nose, mouth). Significant group 9 interest-region interactions indicated that, regardless of the face presented, participants with insomnia were quicker to attend to, and spent more time observing, the eye-region relative to the nose and mouth regions compared with normal-sleepers. However, no group 9 face 9 interest-region interactions were established. Thus, whilst individuals with insomnia displayed preferential attention towards the eye-region in general, this effect was not accentuated during self-perception. Insomnia appears to be characterized by a general, rather than self-specific, attentional bias towards the eye-region. These findings contribute to our understanding of face perception in insomnia, and provide tentative support for cognitive models of insomnia demonstrating that individuals with insomnia monitor faces in general, with a specific focus around the eye-region, for cues associated with tiredness. |
Andrea Alamia; Alexandre Zénon Statistical regularities attract attention when task-relevant Journal Article Frontiers in Human Neuroscience, 10 , pp. 1–10, 2016. @article{Alamia2016, title = {Statistical regularities attract attention when task-relevant}, author = {Andrea Alamia and Alexandre Zénon}, doi = {10.3389/fnhum.2016.00042}, year = {2016}, date = {2016-01-01}, journal = {Frontiers in Human Neuroscience}, volume = {10}, pages = {1--10}, abstract = {Visual attention seems essential for learning the statistical regularities in our environment, a process known as statistical learning. However, how attention is allocated when exploring a novel visual scene whose statistical structure is unknown remains unclear. In order to address this question, we investigated visual attention allocation during a task in which we manipulated the conditional probability of occurrence of colored stimuli, unbeknown to the subjects. Participants were instructed to detect a target colored dot among two dots moving along separate circular paths. We evaluated implicit statistical learning, i.e. the effect of color predictability on reaction times (RT), and recorded eye position concurrently. Attention allocation was indexed by comparing the Mahalanobis distance between the position, velocity and acceleration of the eyes and the 2 colored dots. We found that learning the conditional probabilities occurred very early during the course of the experiment as shown by the fact that, starting already from the first block, predictable stimuli were detected with shorter RT than unpredictable ones. In terms of attentional allocation, we found that the predictive stimulus attracted gaze only when it was informative about the occurrence of the target but not when it predicted the occurrence of a task-irrelevant stimulus. This suggests that attention allocation was influenced by regularities only when they were instrumental in performing the task. Moreover, we found that the attentional bias towards task-relevant predictive stimuli occurred at a very early stage of learning, concomitantly with the first effects of learning on RT. In conclusion, these results show that statistical regularities capture visual attention only after a few occurrences, provided these regularities are instrumental to perform the task.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual attention seems essential for learning the statistical regularities in our environment, a process known as statistical learning. However, how attention is allocated when exploring a novel visual scene whose statistical structure is unknown remains unclear. In order to address this question, we investigated visual attention allocation during a task in which we manipulated the conditional probability of occurrence of colored stimuli, unbeknown to the subjects. Participants were instructed to detect a target colored dot among two dots moving along separate circular paths. We evaluated implicit statistical learning, i.e. the effect of color predictability on reaction times (RT), and recorded eye position concurrently. Attention allocation was indexed by comparing the Mahalanobis distance between the position, velocity and acceleration of the eyes and the 2 colored dots. We found that learning the conditional probabilities occurred very early during the course of the experiment as shown by the fact that, starting already from the first block, predictable stimuli were detected with shorter RT than unpredictable ones. In terms of attentional allocation, we found that the predictive stimulus attracted gaze only when it was informative about the occurrence of the target but not when it predicted the occurrence of a task-irrelevant stimulus. This suggests that attention allocation was influenced by regularities only when they were instrumental in performing the task. Moreover, we found that the attentional bias towards task-relevant predictive stimuli occurred at a very early stage of learning, concomitantly with the first effects of learning on RT. In conclusion, these results show that statistical regularities capture visual attention only after a few occurrences, provided these regularities are instrumental to perform the task. |
Andrea Alamia; Oleg Solopchuk; Alexandre Zénon Strong conscious cues suppress preferential gaze allocation to unconscious cues Journal Article Frontiers in Human Neuroscience, 12 , pp. 1–9, 2018. @article{Alamia2018, title = {Strong conscious cues suppress preferential gaze allocation to unconscious cues}, author = {Andrea Alamia and Oleg Solopchuk and Alexandre Zénon}, doi = {10.3389/fnhum.2018.00427}, year = {2018}, date = {2018-01-01}, journal = {Frontiers in Human Neuroscience}, volume = {12}, pages = {1--9}, abstract = {Visual attention allows relevant information to be selected for further processing. Both conscious and unconscious visual stimuli can bias attentional allocation, but how these two types of visual information interact to guide attention remains unclear. In this study, we explored attentional allocation during a motion discrimination task with varied motion strength and unconscious associations between stimuli and cues. Participants were instructed to report the motion direction of two colored patches of dots. Unbeknown to participants, dot colors were sometimes informative of the correct response. We found that subjects learnt the associations between colors and motion direction but failed to report this association using the questionnaire filled at the end of the experiment, confirming that learning remained unconscious. The eye movement analyses revealed that allocation of attention to unconscious sources of information occurred mostly when motion coherence was low, indicating that unconscious cues influence attentional allocation only in the absence of strong conscious cues. All in all, our results reveal that conscious and unconscious sources of information interact with each other to influence attentional allocation and suggest a selection process that weights cues in proportion to their reliability.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual attention allows relevant information to be selected for further processing. Both conscious and unconscious visual stimuli can bias attentional allocation, but how these two types of visual information interact to guide attention remains unclear. In this study, we explored attentional allocation during a motion discrimination task with varied motion strength and unconscious associations between stimuli and cues. Participants were instructed to report the motion direction of two colored patches of dots. Unbeknown to participants, dot colors were sometimes informative of the correct response. We found that subjects learnt the associations between colors and motion direction but failed to report this association using the questionnaire filled at the end of the experiment, confirming that learning remained unconscious. The eye movement analyses revealed that allocation of attention to unconscious sources of information occurred mostly when motion coherence was low, indicating that unconscious cues influence attentional allocation only in the absence of strong conscious cues. All in all, our results reveal that conscious and unconscious sources of information interact with each other to influence attentional allocation and suggest a selection process that weights cues in proportion to their reliability. |
Andrea Alamia; Rufin VanRullen; Emanuele Pasqualotto; André Mouraux; Alexandre Zenon Pupil-linked arousal responds to unconscious surprisal Journal Article Journal of Neuroscience, 39 (27), pp. 5369–5376, 2019. @article{Alamia2019, title = {Pupil-linked arousal responds to unconscious surprisal}, author = {Andrea Alamia and Rufin VanRullen and Emanuele Pasqualotto and André Mouraux and Alexandre Zenon}, doi = {10.1523/JNEUROSCI.3010-18.2019}, year = {2019}, date = {2019-01-01}, journal = {Journal of Neuroscience}, volume = {39}, number = {27}, pages = {5369--5376}, abstract = {Pupil size under constant illumination reflects brain arousal state, and dilates in response to novel information, or surprisal. Whether this response can be observed regardless of conscious perception is still unknown. In the present study, male and female adult humans performed an implicit learning task across a series of three experiments. We measured pupil and brain-evoked potentials to stimuli that violated transition statistics but were not relevant to the task. We found that pupil size dilated following these surprising events, in the absence of awareness of transition statistics, and only when attention was allocated to the stimulus. These pupil responses correlated with central potentials, evoking an anterior cingulate origin. Arousal response to surprisal outside the scope of conscious perception points to the fundamental relationship between arousal and information processing and indicates that pupil size can be used to track the progression of implicit learning.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Pupil size under constant illumination reflects brain arousal state, and dilates in response to novel information, or surprisal. Whether this response can be observed regardless of conscious perception is still unknown. In the present study, male and female adult humans performed an implicit learning task across a series of three experiments. We measured pupil and brain-evoked potentials to stimuli that violated transition statistics but were not relevant to the task. We found that pupil size dilated following these surprising events, in the absence of awareness of transition statistics, and only when attention was allocated to the stimulus. These pupil responses correlated with central potentials, evoking an anterior cingulate origin. Arousal response to surprisal outside the scope of conscious perception points to the fundamental relationship between arousal and information processing and indicates that pupil size can be used to track the progression of implicit learning. |
Concetta F Alberti; Peter J Bex Binocular contrast summation and inhibition depends on spatial frequency, eccentricity and binocular disparity Journal Article Ophthalmic and Physiological Optics, 38 (5), pp. 525–537, 2018. @article{Alberti2018, title = {Binocular contrast summation and inhibition depends on spatial frequency, eccentricity and binocular disparity}, author = {Concetta F Alberti and Peter J Bex}, doi = {10.1111/opo.12581}, year = {2018}, date = {2018-01-01}, journal = {Ophthalmic and Physiological Optics}, volume = {38}, number = {5}, pages = {525--537}, abstract = {Purpose: When central vision is compromised, visually-guided behaviour becomes dependent on peripheral retina, often at a preferred retinal locus (PRL). Previous studies have examined adaptation to central vision loss with monocular 2D paradigms, whereas in real tasks, patients make binocular eye movements to targets of various sizes and depth in 3D environments. Methods: We therefore examined monocular and binocular contrast sensitivity functions with a 26-AFC (alternate forced choice) band-pass filtered letter identification task at 2° or 6° eccentricity in observers with simulated central vision loss. Binocular stimuli were presented in corresponding or non-corresponding stereo- scopic retinal locations. Gaze-contingent scotomas (0.5° radius disks of pink noise) were simulated independently in each eye with a 1000 Hz eye tracker and 120 Hz dichoptic shutter glasses. Results: Contrast sensitivity was higher for binocular than monocular conditions, but only exceeded probability summation at low-mid spatial frequencies in corresponding retinal locations. At high spatial frequencies or non-corresponding retinal locations, binocular contrast sensitivity showed evidence of interocular suppression. Conclusions: These results suggest that binocular vision deficits may be underestimated by monocular vision tests and identify a method that can be used to select a PRL based on binocular contrast summation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: When central vision is compromised, visually-guided behaviour becomes dependent on peripheral retina, often at a preferred retinal locus (PRL). Previous studies have examined adaptation to central vision loss with monocular 2D paradigms, whereas in real tasks, patients make binocular eye movements to targets of various sizes and depth in 3D environments. Methods: We therefore examined monocular and binocular contrast sensitivity functions with a 26-AFC (alternate forced choice) band-pass filtered letter identification task at 2° or 6° eccentricity in observers with simulated central vision loss. Binocular stimuli were presented in corresponding or non-corresponding stereo- scopic retinal locations. Gaze-contingent scotomas (0.5° radius disks of pink noise) were simulated independently in each eye with a 1000 Hz eye tracker and 120 Hz dichoptic shutter glasses. Results: Contrast sensitivity was higher for binocular than monocular conditions, but only exceeded probability summation at low-mid spatial frequencies in corresponding retinal locations. At high spatial frequencies or non-corresponding retinal locations, binocular contrast sensitivity showed evidence of interocular suppression. Conclusions: These results suggest that binocular vision deficits may be underestimated by monocular vision tests and identify a method that can be used to select a PRL based on binocular contrast summation. |
Andrea Albonico; Manuela Malaspina; Emanuela Bricolo; Marialuisa Martelli; Roberta Daini Temporal dissociation between the focal and orientation components of spatial attention in central and peripheral vision Journal Article Acta Psychologica, 171 , pp. 85–92, 2016. @article{Albonico2016, title = {Temporal dissociation between the focal and orientation components of spatial attention in central and peripheral vision}, author = {Andrea Albonico and Manuela Malaspina and Emanuela Bricolo and Marialuisa Martelli and Roberta Daini}, doi = {10.1016/j.actpsy.2016.10.003}, year = {2016}, date = {2016-01-01}, journal = {Acta Psychologica}, volume = {171}, pages = {85--92}, publisher = {Elsevier B.V.}, abstract = {Selective attention, i.e. the ability to concentrate one's limited processing resources on one aspect of the environment, is a multifaceted concept that includes different processes like spatial attention and its subcomponents of orienting and focusing. Several studies, indeed, have shown that visual tasks performance is positively influenced not only by attracting attention to the target location (orientation component), but also by the adjustment of the size of the attentional window according to task demands (focal component). Nevertheless, the relative weight of the two components in central and peripheral vision has never been studied. We conducted two experiments to explore whether different components of spatial attention have different effects in central and peripheral vision. In order to do so, participants underwent either a detection (Experiment 1) or a discrimination (Experiment 2) task where different types of cues elicited different components of spatial attention: a red dot, a small square and a big square (an optimal stimulus for the orientation component, an optimal and a sub-optimal stimulus for the focal component respectively). Response times and cue-size effects indicated a stronger effect of the small square or of the dot in different conditions, suggesting the existence of a dissociation in terms of mechanisms between the focal and the orientation components of spatial attention. Specifically, we found that the orientation component was stronger in periphery, while the focal component was noticeable only in central vision and characterized by an exogenous nature.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Selective attention, i.e. the ability to concentrate one's limited processing resources on one aspect of the environment, is a multifaceted concept that includes different processes like spatial attention and its subcomponents of orienting and focusing. Several studies, indeed, have shown that visual tasks performance is positively influenced not only by attracting attention to the target location (orientation component), but also by the adjustment of the size of the attentional window according to task demands (focal component). Nevertheless, the relative weight of the two components in central and peripheral vision has never been studied. We conducted two experiments to explore whether different components of spatial attention have different effects in central and peripheral vision. In order to do so, participants underwent either a detection (Experiment 1) or a discrimination (Experiment 2) task where different types of cues elicited different components of spatial attention: a red dot, a small square and a big square (an optimal stimulus for the orientation component, an optimal and a sub-optimal stimulus for the focal component respectively). Response times and cue-size effects indicated a stronger effect of the small square or of the dot in different conditions, suggesting the existence of a dissociation in terms of mechanisms between the focal and the orientation components of spatial attention. Specifically, we found that the orientation component was stronger in periphery, while the focal component was noticeable only in central vision and characterized by an exogenous nature. |
Noor Z Al Dahhan; John R Kirby; Donald C Brien; Rina Gupta; Allyson Harrison; Douglas P Munoz; Noor Z Al Understanding the biological basis of dyslexia at a neural systems level Journal Article Brain Communications, pp. 1–16, 2020. @article{AlDahhan2020a, title = {Understanding the biological basis of dyslexia at a neural systems level}, author = {Noor Z {Al Dahhan} and John R Kirby and Donald C Brien and Rina Gupta and Allyson Harrison and Douglas P Munoz and Noor Z Al}, doi = {10.1093/braincomms/fcaa173}, year = {2020}, date = {2020-01-01}, journal = {Brain Communications}, pages = {1--16}, abstract = {We examined the naming speed performance of 18 typically achieving and 16 dyslexic adults while simultaneously recording eye movements, articulations and fMRI data. Naming speed tasks, which require participants to name a list of letters or objects, have been proposed as a proxy for reading and are thought to recruit similar reading networks in the left hemisphere of the brain as more complex reading tasks. We employed letter and object naming speed tasks, with task manipulations to make the stimuli more or less phonologically and/or visually similar. Compared to typically achieving readers, readers with dyslexia had a poorer behav-ioural naming speed task performance, longer fixation durations, more regressions and increased activation in areas of the reading network in the left-hemisphere. Whereas increased network activation was positively associated with performance in dyslexics, it was negatively related to performance in typically achieving readers. Readers with dyslexia had greater bilateral activation and recruited additional regions involved with memory, namely the amygdala and hippocampus; in contrast, the typically achieving readers additionally activated the dorsolateral prefrontal cortex. Areas within the reading network were differentially activated by stimulus manipulations to the naming speed tasks. There was less efficient naming speed behavioural performance, longer fixation durations, more regressions and increased neural activity when letter stimuli were both phonologically and visually similar. Discussion focuses on the differences in activation within the reading network, how they are related to behavioural task differences, and how progress in furthering the understanding of the relationship between behavioural performance and brain activity can change the overall trajectories of children with reading difficulties by contributing to both early identification and remediation processes. Abbreviations: AC-PC ¼ anterior commissure-posterior commissure plane; AG ¼ angular gyrus; DLPFC ¼ dorsolateral pre-frontal cortex; FEF ¼ frontal eye fields; FG ¼ fusiform gyrus; fMRI ¼ functional magnetic resonance imaging; IFG ¼ inferior frontal gyrus; LC ¼ letters control NS task; MFG ¼ middle frontal gyrus; MOG ¼ middle occipital gyrus; MP-RAGE ¼ magnet-ization-prepared rapid gradient-echo; MTG ¼ middle temporal gyrus; NS ¼ naming speed; OC ¼ object control NS task; OPS ¼ phonologically similar object NS task; PEF ¼ parietal eye field; PS ¼ phonologically similar NS task; RFX GLM ¼ random-effects multi-subject general linear model; ROI ¼ regions of interest; SEF ¼ supplementary eye field; SMG ¼ supramarginal gyrus; STG ¼ superior temporal gyrus; VS ¼ visually similar NS task; VPS ¼ visually and phonologically similar NS task.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We examined the naming speed performance of 18 typically achieving and 16 dyslexic adults while simultaneously recording eye movements, articulations and fMRI data. Naming speed tasks, which require participants to name a list of letters or objects, have been proposed as a proxy for reading and are thought to recruit similar reading networks in the left hemisphere of the brain as more complex reading tasks. We employed letter and object naming speed tasks, with task manipulations to make the stimuli more or less phonologically and/or visually similar. Compared to typically achieving readers, readers with dyslexia had a poorer behav-ioural naming speed task performance, longer fixation durations, more regressions and increased activation in areas of the reading network in the left-hemisphere. Whereas increased network activation was positively associated with performance in dyslexics, it was negatively related to performance in typically achieving readers. Readers with dyslexia had greater bilateral activation and recruited additional regions involved with memory, namely the amygdala and hippocampus; in contrast, the typically achieving readers additionally activated the dorsolateral prefrontal cortex. Areas within the reading network were differentially activated by stimulus manipulations to the naming speed tasks. There was less efficient naming speed behavioural performance, longer fixation durations, more regressions and increased neural activity when letter stimuli were both phonologically and visually similar. Discussion focuses on the differences in activation within the reading network, how they are related to behavioural task differences, and how progress in furthering the understanding of the relationship between behavioural performance and brain activity can change the overall trajectories of children with reading difficulties by contributing to both early identification and remediation processes. Abbreviations: AC-PC ¼ anterior commissure-posterior commissure plane; AG ¼ angular gyrus; DLPFC ¼ dorsolateral pre-frontal cortex; FEF ¼ frontal eye fields; FG ¼ fusiform gyrus; fMRI ¼ functional magnetic resonance imaging; IFG ¼ inferior frontal gyrus; LC ¼ letters control NS task; MFG ¼ middle frontal gyrus; MOG ¼ middle occipital gyrus; MP-RAGE ¼ magnet-ization-prepared rapid gradient-echo; MTG ¼ middle temporal gyrus; NS ¼ naming speed; OC ¼ object control NS task; OPS ¼ phonologically similar object NS task; PEF ¼ parietal eye field; PS ¼ phonologically similar NS task; RFX GLM ¼ random-effects multi-subject general linear model; ROI ¼ regions of interest; SEF ¼ supplementary eye field; SMG ¼ supramarginal gyrus; STG ¼ superior temporal gyrus; VS ¼ visually similar NS task; VPS ¼ visually and phonologically similar NS task. |
Francesca Ales; Luciano Giromini; Alessandro Zennaro Complexity and cognitive engagement in the Rorschach task: An eye-tracking study Journal Article Journal of Personality Assessment, 102 (4), pp. 538–550, 2020. @article{Ales2020, title = {Complexity and cognitive engagement in the Rorschach task: An eye-tracking study}, author = {Francesca Ales and Luciano Giromini and Alessandro Zennaro}, doi = {10.1080/00223891.2019.1575227}, year = {2020}, date = {2020-01-01}, journal = {Journal of Personality Assessment}, volume = {102}, number = {4}, pages = {538--550}, publisher = {Routledge}, abstract = {This study investigated whether complexity and the other related Rorschach Performance Assessment System (R-PAS) variables in the engagement and cognitive processing domain would associate with eye-tracking measures reflecting increased cognitive engagement and effort while visually scanning the Rorschach inkblots. A nonclinical sample of 71 adult volunteers were administered the Rorschach task while their eye movements were recorded using an eye tracker. Then, the average duration of fixations, the average number of fixations, the average amplitude of saccades, and the average maximum pupil size recorded during the response phase (RP) of the Rorschach administration were correlated with protocol-level, R-PAS variables located in the engagement and cognitive processing. As expected, complexity correlated, with a large effect size (r =.526, p textless.01), with the number of fixations occurring during the RP of Rorschach administration. Some other variables related to complexity (e.g., Synthesis, Sy) also produced similar associations. The other eye-tracking variables under examination, however, produced weak or nonsignificant correlations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study investigated whether complexity and the other related Rorschach Performance Assessment System (R-PAS) variables in the engagement and cognitive processing domain would associate with eye-tracking measures reflecting increased cognitive engagement and effort while visually scanning the Rorschach inkblots. A nonclinical sample of 71 adult volunteers were administered the Rorschach task while their eye movements were recorded using an eye tracker. Then, the average duration of fixations, the average number of fixations, the average amplitude of saccades, and the average maximum pupil size recorded during the response phase (RP) of the Rorschach administration were correlated with protocol-level, R-PAS variables located in the engagement and cognitive processing. As expected, complexity correlated, with a large effect size (r =.526, p textless.01), with the number of fixations occurring during the RP of Rorschach administration. Some other variables related to complexity (e.g., Synthesis, Sy) also produced similar associations. The other eye-tracking variables under examination, however, produced weak or nonsignificant correlations. |
Stepan Aleshin; Gergo Ziman; Ilona Kovács; Jochen Braun Perceptual reversals in binocular rivalry: Improved detection from OKN Journal Article Journal of Vision, 19 (3), pp. 1–18, 2019. @article{Aleshin2019, title = {Perceptual reversals in binocular rivalry: Improved detection from OKN}, author = {Stepan Aleshin and Gergo Ziman and Ilona Kovács and Jochen Braun}, doi = {10.1167/19.3.5}, year = {2019}, date = {2019-01-01}, journal = {Journal of Vision}, volume = {19}, number = {3}, pages = {1--18}, abstract = {When binocular rivalry is induced by opponent motion displays, perceptual reversals are often associated with changed oculomotor behavior (Frässle, Sommer, Jansen, Naber, & Einhäuser, 2014; Fujiwara et al., 2017). Specifically, the direction of smooth pursuit phases in optokinetic nystagmus typically corresponds to the direction of motion that dominates perceptual appearance at any given time. Here we report an improved analysis that continuously estimates perceived motion in terms of ''cumulative smooth pursuit.'' In essence, smooth pursuit segments are identified, interpolated where necessary, and joined probabilistically into a continuous record of cumulative smooth pursuit (i.e., probability of eye position disregarding blinks, saccades, signal losses, and artefacts). The analysis is fully automated and robust in healthy, developmental, and patient populations. To validate reliability, we compare volitional reports of perceptual reversals in rivalry displays, and of physical reversals in nonrivalrous control displays. Cumulative smooth pursuit detects physical reversals and estimates eye velocity more accurately than existing methods do (Frässle et al., 2014). It also appears to distinguish dominant and transitional perceptual states, detecting changes with a precision of ± 100 ms. We conclude that cumulative smooth pursuit significantly improves the monitoring of binocular rivalry by means of recording optokinetic nystagmus.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When binocular rivalry is induced by opponent motion displays, perceptual reversals are often associated with changed oculomotor behavior (Frässle, Sommer, Jansen, Naber, & Einhäuser, 2014; Fujiwara et al., 2017). Specifically, the direction of smooth pursuit phases in optokinetic nystagmus typically corresponds to the direction of motion that dominates perceptual appearance at any given time. Here we report an improved analysis that continuously estimates perceived motion in terms of ''cumulative smooth pursuit.'' In essence, smooth pursuit segments are identified, interpolated where necessary, and joined probabilistically into a continuous record of cumulative smooth pursuit (i.e., probability of eye position disregarding blinks, saccades, signal losses, and artefacts). The analysis is fully automated and robust in healthy, developmental, and patient populations. To validate reliability, we compare volitional reports of perceptual reversals in rivalry displays, and of physical reversals in nonrivalrous control displays. Cumulative smooth pursuit detects physical reversals and estimates eye velocity more accurately than existing methods do (Frässle et al., 2014). It also appears to distinguish dominant and transitional perceptual states, detecting changes with a precision of ± 100 ms. We conclude that cumulative smooth pursuit significantly improves the monitoring of binocular rivalry by means of recording optokinetic nystagmus. |
Robert G Alexander; Gregory J Zelinsky Visual similarity effects in categorical search Journal Article Journal of Vision, 11 (8), pp. 1–15, 2011. @article{Alexander2011, title = {Visual similarity effects in categorical search}, author = {Robert G Alexander and Gregory J Zelinsky}, doi = {10.1167/11.8.9}, year = {2011}, date = {2011-01-01}, journal = {Journal of Vision}, volume = {11}, number = {8}, pages = {1--15}, abstract = {We asked how visual similarity relationships affect search guidance to categorically defined targets (no visual preview). Experiment 1 used a web-based task to collect visual similarity rankings between two target categories, teddy bears and butterflies, and random-category objects, from which we created search displays in Experiment 2 having either high-similarity distractors, low-similarity distractors, or "mixed" displays with high-, medium-, and low-similarity distractors. Analysis of target-absent trials revealed faster manual responses and fewer fixated distractors on low-similarity displays compared to high-similarity displays. On mixed displays, first fixations were more frequent on high-similarity distractors (bear = 49%; butterfly = 58%) than on low-similarity distractors (bear = 9%; butterfly = 12%). Experiment 3 used the same high/low/mixed conditions, but now these conditions were created using similarity estimates from a computer vision model that ranked objects in terms of color, texture, and shape similarity. The same patterns were found, suggesting that categorical search can indeed be guided by purely visual similarity. Experiment 4 compared cases where the model and human rankings differed and when they agreed. We found that similarity effects were best predicted by cases where the two sets of rankings agreed, suggesting that both human visual similarity rankings and the computer vision model captured features important for guiding search to categorical targets.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We asked how visual similarity relationships affect search guidance to categorically defined targets (no visual preview). Experiment 1 used a web-based task to collect visual similarity rankings between two target categories, teddy bears and butterflies, and random-category objects, from which we created search displays in Experiment 2 having either high-similarity distractors, low-similarity distractors, or "mixed" displays with high-, medium-, and low-similarity distractors. Analysis of target-absent trials revealed faster manual responses and fewer fixated distractors on low-similarity displays compared to high-similarity displays. On mixed displays, first fixations were more frequent on high-similarity distractors (bear = 49%; butterfly = 58%) than on low-similarity distractors (bear = 9%; butterfly = 12%). Experiment 3 used the same high/low/mixed conditions, but now these conditions were created using similarity estimates from a computer vision model that ranked objects in terms of color, texture, and shape similarity. The same patterns were found, suggesting that categorical search can indeed be guided by purely visual similarity. Experiment 4 compared cases where the model and human rankings differed and when they agreed. We found that similarity effects were best predicted by cases where the two sets of rankings agreed, suggesting that both human visual similarity rankings and the computer vision model captured features important for guiding search to categorical targets. |
Robert G Alexander; Gregory J Zelinsky Effects of part-based similarity on visual search: The Frankenbear experiment Journal Article Vision Research, 54 , pp. 20–30, 2012. @article{Alexander2012, title = {Effects of part-based similarity on visual search: The Frankenbear experiment}, author = {Robert G Alexander and Gregory J Zelinsky}, doi = {10.1016/j.visres.2011.12.004}, year = {2012}, date = {2012-01-01}, journal = {Vision Research}, volume = {54}, pages = {20--30}, abstract = {Do the target-distractor and distractor-distractor similarity relationships known to exist for simple stimuli extend to real-world objects, and are these effects expressed in search guidance or target verification? Parts of photorealistic distractors were replaced with target parts to create four levels of target-distractor similarity under heterogenous and homogenous conditions. We found that increasing target-distractor similarity and decreasing distractor-distractor similarity impaired search guidance and target verification, but that target-distractor similarity and heterogeneity/homogeneity interacted only in measures of guidance; distractor homogeneity lessens effects of target-distractor similarity by causing gaze to fixate the target sooner, not by speeding target detection following its fixation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Do the target-distractor and distractor-distractor similarity relationships known to exist for simple stimuli extend to real-world objects, and are these effects expressed in search guidance or target verification? Parts of photorealistic distractors were replaced with target parts to create four levels of target-distractor similarity under heterogenous and homogenous conditions. We found that increasing target-distractor similarity and decreasing distractor-distractor similarity impaired search guidance and target verification, but that target-distractor similarity and heterogeneity/homogeneity interacted only in measures of guidance; distractor homogeneity lessens effects of target-distractor similarity by causing gaze to fixate the target sooner, not by speeding target detection following its fixation. |
Robert G Alexander; Joseph Schmidt; Gregory J Zelinsky Are summary statistics enough? Evidence for the importance of shape in guiding visual search Journal Article Visual Cognition, 22 (3), pp. 595–609, 2014. @article{Alexander2014, title = {Are summary statistics enough? Evidence for the importance of shape in guiding visual search}, author = {Robert G Alexander and Joseph Schmidt and Gregory J Zelinsky}, doi = {10.1080/13506285.2014.890989}, year = {2014}, date = {2014-01-01}, journal = {Visual Cognition}, volume = {22}, number = {3}, pages = {595--609}, abstract = {Peripheral vision outside the focus of attention may rely on summary statistics. We used a gaze-contingent paradigm to directly test this assumption by asking whether search performance differed between targets and statistically-matched visualizations of the same targets. Four-object search displays included one statistically-matched object that was replaced by an unaltered version of the object during the first eye movement. Targets were designated by previews, which were never altered. Two types of statistically-matched objects were tested: One that maintained global shape and one that did not. Differences in guidance were found between targets and statistically-matched objects when shape was not preserved, suggesting that they were not informationally equivalent. Responses were also slower after target fixation when shape was not preserved, suggesting an extrafoveal processing of the target that again used shape information. We conclude that summary statistics must include some global shape information to approximate the peripheral information used during search.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Peripheral vision outside the focus of attention may rely on summary statistics. We used a gaze-contingent paradigm to directly test this assumption by asking whether search performance differed between targets and statistically-matched visualizations of the same targets. Four-object search displays included one statistically-matched object that was replaced by an unaltered version of the object during the first eye movement. Targets were designated by previews, which were never altered. Two types of statistically-matched objects were tested: One that maintained global shape and one that did not. Differences in guidance were found between targets and statistically-matched objects when shape was not preserved, suggesting that they were not informationally equivalent. Responses were also slower after target fixation when shape was not preserved, suggesting an extrafoveal processing of the target that again used shape information. We conclude that summary statistics must include some global shape information to approximate the peripheral information used during search. |
Robert G Alexander; Gregory J Zelinsky Occluded information is restored at preview but not during visual search Journal Article Journal of Vision, 18 (11), pp. 1–16, 2018. @article{Alexander2018, title = {Occluded information is restored at preview but not during visual search}, author = {Robert G Alexander and Gregory J Zelinsky}, doi = {10.1167/18.11.4}, year = {2018}, date = {2018-01-01}, journal = {Journal of Vision}, volume = {18}, number = {11}, pages = {1--16}, abstract = {Objects often appear with some amount of occlusion. We fill in missing information using local shape features even before attending to those objects—a process called amodal completion. Here we explore the possibility that knowledge about common realistic objects can be used to ‘‘restore'' missing information even in cases where amodal completion is not expected. We systematically varied whether visual search targets were occluded or not, both at preview and in search displays. Button-press responses were longest when the preview was unoccluded and the target was occluded in the search display. This pattern is consistent with a target-verification process that uses the features visible at preview but does not restore missing information in the search display. However, visual search guidance was weakest whenever the target was occluded in the search display, regardless of whether it was occluded at preview. This pattern suggests that information missing during the preview was restored and used to guide search, thereby resulting in a feature mismatch and poor guidance. If this process were preattentive, as with amodal completion, we should have found roughly equivalent search guidance across all conditions because the target would always be unoccluded or restored, resulting in no mismatch. We conclude that realistic objects are restored behind occluders during search target preview, even in situations not prone to amodal completion, and this restoration does not occur preattentively during search.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objects often appear with some amount of occlusion. We fill in missing information using local shape features even before attending to those objects—a process called amodal completion. Here we explore the possibility that knowledge about common realistic objects can be used to ‘‘restore'' missing information even in cases where amodal completion is not expected. We systematically varied whether visual search targets were occluded or not, both at preview and in search displays. Button-press responses were longest when the preview was unoccluded and the target was occluded in the search display. This pattern is consistent with a target-verification process that uses the features visible at preview but does not restore missing information in the search display. However, visual search guidance was weakest whenever the target was occluded in the search display, regardless of whether it was occluded at preview. This pattern suggests that information missing during the preview was restored and used to guide search, thereby resulting in a feature mismatch and poor guidance. If this process were preattentive, as with amodal completion, we should have found roughly equivalent search guidance across all conditions because the target would always be unoccluded or restored, resulting in no mismatch. We conclude that realistic objects are restored behind occluders during search target preview, even in situations not prone to amodal completion, and this restoration does not occur preattentively during search. |
Robert G Alexander; Roxanna J Nahvi; Gregory J Zelinsky Specifying the precision of guiding features for visual search Journal Article Journal of Experimental Psychology: Human Perception and Performance, 45 (9), pp. 1248–1264, 2019. @article{Alexander2019, title = {Specifying the precision of guiding features for visual search}, author = {Robert G Alexander and Roxanna J Nahvi and Gregory J Zelinsky}, doi = {10.1037/xhp0000668}, year = {2019}, date = {2019-01-01}, journal = {Journal of Experimental Psychology: Human Perception and Performance}, volume = {45}, number = {9}, pages = {1248--1264}, abstract = {Visual search is the task of finding things with uncertain locations. Despite decades of research, the features that guide visual search remain poorly specified, especially in realistic contexts. This study tested the role of two features-shape and orientation- both in the presence and absence of hue information. We conducted five experiments to describe preview-target mismatch effects, decreases in performance caused by differences between the image of the target as it appears in the preview and as it appears in the actual search display. These mismatch effects provide direct measures of feature importance, with larger performance decrements expected for more important features. Contrary to previous conclusions, our data suggest that shape and orientation only guide visual search when color is not available. By varying the probability of mismatch in each feature dimension, we also show that these patterns of feature guidance do not change with the probability that the previewed feature will be invalid. We conclude that the target representations used to guide visual search are much less precise than previously believed, with participants encoding and using color and little else.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual search is the task of finding things with uncertain locations. Despite decades of research, the features that guide visual search remain poorly specified, especially in realistic contexts. This study tested the role of two features-shape and orientation- both in the presence and absence of hue information. We conducted five experiments to describe preview-target mismatch effects, decreases in performance caused by differences between the image of the target as it appears in the preview and as it appears in the actual search display. These mismatch effects provide direct measures of feature importance, with larger performance decrements expected for more important features. Contrary to previous conclusions, our data suggest that shape and orientation only guide visual search when color is not available. By varying the probability of mismatch in each feature dimension, we also show that these patterns of feature guidance do not change with the probability that the previewed feature will be invalid. We conclude that the target representations used to guide visual search are much less precise than previously believed, with participants encoding and using color and little else. |
Defne Alfandari; Artem V Belopolsky; Christian N L Olivers Eye movements reveal learning and information-seeking in attentional template acquisition Journal Article Visual Cognition, 27 (5-8), pp. 467–486, 2019. @article{Alfandari2019, title = {Eye movements reveal learning and information-seeking in attentional template acquisition}, author = {Defne Alfandari and Artem V Belopolsky and Christian N L Olivers}, doi = {10.1080/13506285.2019.1636918}, year = {2019}, date = {2019-01-01}, journal = {Visual Cognition}, volume = {27}, number = {5-8}, pages = {467--486}, publisher = {Taylor & Francis}, abstract = {Visual attention serves to select relevant visual information. However, observers often first need to find out what is relevant. Little is known about this information-seeking process and how it affects attention. We employed a cued visual search task in combination with eye tracking to investigate which oculomotor measures reflect the acquisition of information for a subsequent task. A cue indicated as to which target to look for in a following search display. Cue-target combinations were repeated several times, enabling learning of the target. We found that reductions in cue fixation times and saccade size provided stable indices of learning. Despite the learning, participants continued to attend to repeated cues. Several factors contribute to people attending to information they already know: First, the information value provided by the cue continues to drive attention. Second, even in the absence of information value, attention continues to be directed to cue features that previously signalled relevant information. Third, the decision to attend to a known cue depends on cognitive effort. We propose that this combination of information value, previous relevance, and effort is best captured within an information-seeking framework, and that oculomotor parameters provide a useful proxy for uncovering these factors and their interactions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual attention serves to select relevant visual information. However, observers often first need to find out what is relevant. Little is known about this information-seeking process and how it affects attention. We employed a cued visual search task in combination with eye tracking to investigate which oculomotor measures reflect the acquisition of information for a subsequent task. A cue indicated as to which target to look for in a following search display. Cue-target combinations were repeated several times, enabling learning of the target. We found that reductions in cue fixation times and saccade size provided stable indices of learning. Despite the learning, participants continued to attend to repeated cues. Several factors contribute to people attending to information they already know: First, the information value provided by the cue continues to drive attention. Second, even in the absence of information value, attention continues to be directed to cue features that previously signalled relevant information. Third, the decision to attend to a known cue depends on cognitive effort. We propose that this combination of information value, previous relevance, and effort is best captured within an information-seeking framework, and that oculomotor parameters provide a useful proxy for uncovering these factors and their interactions. |
Sara Alhanbali; Piers Dawes; Rebecca E Millman; Kevin J Munro Measures of listening effort are multidimensional Journal Article Ear and Hearing, 40 (5), pp. 1084–1097, 2019. @article{Alhanbali2019, title = {Measures of listening effort are multidimensional}, author = {Sara Alhanbali and Piers Dawes and Rebecca E Millman and Kevin J Munro}, doi = {10.1097/AUD.0000000000000697}, year = {2019}, date = {2019-01-01}, journal = {Ear and Hearing}, volume = {40}, number = {5}, pages = {1084--1097}, abstract = {OBJECTIVES: Listening effort can be defined as the cognitive resources required to perform a listening task. The literature on listening effort is as confusing as it is voluminous: measures of listening effort rarely correlate with each other and sometimes result in contradictory findings. Here, we directly compared simultaneously recorded multimodal measures of listening effort. After establishing the reliability of the measures, we investigated validity by quantifying correlations between measures and then grouping-related measures through factor analysis. DESIGN: One hundred and sixteen participants with audiometric thresholds ranging from normal to severe hearing loss took part in the study (age range: 55 to 85 years old, 50.3% male). We simultaneously measured pupil size, electroencephalographic alpha power, skin conductance, and self-report listening effort. One self-report measure of fatigue was also included. The signal to noise ratio (SNR) was adjusted at 71% criterion performance using sequences of 3 digits. The main listening task involved correct recall of a random digit from a sequence of six presented at a SNR where performance was around 82 to 93%. Test-retest reliability of the measures was established by retesting 30 participants 7 days after the initial session. RESULTS: With the exception of skin conductance and the self-report measure of fatigue, interclass correlation coefficients (ICC) revealed good test-retest reliability (minimum ICC: 0.71). Weak or nonsignificant correlations were identified between measures. Factor analysis, using only the reliable measures, revealed four underlying dimensions: factor 1 included SNR, hearing level, baseline alpha power, and performance accuracy; factor 2 included pupillometry; factor 3 included alpha power (during speech presentation and during retention); factor 4 included self-reported listening effort and baseline alpha power. CONCLUSIONS: The good ICC suggests that poor test reliability is not the reason for the lack of correlation between measures. We have demonstrated that measures traditionally used as indicators of listening effort tap into multiple underlying dimensions. We therefore propose that there is no "gold standard" measure of listening effort and that different measures of listening effort should not be used interchangeably. When choosing method(s) to measure listening effort, the nature of the task and aspects of increased listening demands that are of interest should be taken into account. The findings of this study provide a framework for understanding and interpreting listening effort measures.}, keywords = {}, pubstate = {published}, tppubtype = {article} } OBJECTIVES: Listening effort can be defined as the cognitive resources required to perform a listening task. The literature on listening effort is as confusing as it is voluminous: measures of listening effort rarely correlate with each other and sometimes result in contradictory findings. Here, we directly compared simultaneously recorded multimodal measures of listening effort. After establishing the reliability of the measures, we investigated validity by quantifying correlations between measures and then grouping-related measures through factor analysis. DESIGN: One hundred and sixteen participants with audiometric thresholds ranging from normal to severe hearing loss took part in the study (age range: 55 to 85 years old, 50.3% male). We simultaneously measured pupil size, electroencephalographic alpha power, skin conductance, and self-report listening effort. One self-report measure of fatigue was also included. The signal to noise ratio (SNR) was adjusted at 71% criterion performance using sequences of 3 digits. The main listening task involved correct recall of a random digit from a sequence of six presented at a SNR where performance was around 82 to 93%. Test-retest reliability of the measures was established by retesting 30 participants 7 days after the initial session. RESULTS: With the exception of skin conductance and the self-report measure of fatigue, interclass correlation coefficients (ICC) revealed good test-retest reliability (minimum ICC: 0.71). Weak or nonsignificant correlations were identified between measures. Factor analysis, using only the reliable measures, revealed four underlying dimensions: factor 1 included SNR, hearing level, baseline alpha power, and performance accuracy; factor 2 included pupillometry; factor 3 included alpha power (during speech presentation and during retention); factor 4 included self-reported listening effort and baseline alpha power. CONCLUSIONS: The good ICC suggests that poor test reliability is not the reason for the lack of correlation between measures. We have demonstrated that measures traditionally used as indicators of listening effort tap into multiple underlying dimensions. We therefore propose that there is no "gold standard" measure of listening effort and that different measures of listening effort should not be used interchangeably. When choosing method(s) to measure listening effort, the nature of the task and aspects of increased listening demands that are of interest should be taken into account. The findings of this study provide a framework for understanding and interpreting listening effort measures. |
Sara Alhanbali; Kevin J Munro; Piers Dawes; Peter J Carolan; Rebecca E Millman International Journal of Audiology, pp. 1–12, 2020. @article{Alhanbali2020, title = {Dimensions of self-reported listening effort and fatigue on a digits-in-noise task, and association with baseline pupil size and performance accuracy}, author = {Sara Alhanbali and Kevin J Munro and Piers Dawes and Peter J Carolan and Rebecca E Millman}, doi = {10.1080/14992027.2020.1853262}, year = {2020}, date = {2020-01-01}, journal = {International Journal of Audiology}, pages = {1--12}, publisher = {Taylor & Francis}, abstract = {Objective: Pupillometry is sensitive to cognitive resource allocation and has been used as a potential measure of listening-related effort and fatigue. We investigated associations between peak pupil diameter, pre-stimulus pupil diameter, performance on a listening task, and the dimensionality of self-reported outcomes (task-related listening effort and fatigue). Design: Pupillometry was recorded while participants performed a speech-in-noise task. Participants rated their experience of listening effort and fatigue using the NASA-Task Load Index (NASA-TLX) and the Visual Analogue Scale of Fatigue (VAS-F), respectively. The dimensionality of the NASA-TLX and the VAS-F was investigated using factor analysis. Study sample: 82 participants with either normal hearing or aided hearing impairment (age range: 55–85 years old, 43 male). Results: Hierarchal linear regression analyses suggested that pre-stimulus pupil diameter predicts a dimension of self-reported fatigue, which we interpreted as tiredness/drowsiness, and listening task performance when controlling for hearing level and age: Larger pre-stimulus pupil diameter was associated with less tiredness/drowsiness and better task performance. Conclusion: Pre-stimulus pupil diameter is a potential index of listening fatigue associated with speech processing in challenging listening conditions. To our knowledge, this is the first investigation of the associations between pre-stimulus pupil diameter and self-reported ratings of listening effort and fatigue.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objective: Pupillometry is sensitive to cognitive resource allocation and has been used as a potential measure of listening-related effort and fatigue. We investigated associations between peak pupil diameter, pre-stimulus pupil diameter, performance on a listening task, and the dimensionality of self-reported outcomes (task-related listening effort and fatigue). Design: Pupillometry was recorded while participants performed a speech-in-noise task. Participants rated their experience of listening effort and fatigue using the NASA-Task Load Index (NASA-TLX) and the Visual Analogue Scale of Fatigue (VAS-F), respectively. The dimensionality of the NASA-TLX and the VAS-F was investigated using factor analysis. Study sample: 82 participants with either normal hearing or aided hearing impairment (age range: 55–85 years old, 43 male). Results: Hierarchal linear regression analyses suggested that pre-stimulus pupil diameter predicts a dimension of self-reported fatigue, which we interpreted as tiredness/drowsiness, and listening task performance when controlling for hearing level and age: Larger pre-stimulus pupil diameter was associated with less tiredness/drowsiness and better task performance. Conclusion: Pre-stimulus pupil diameter is a potential index of listening fatigue associated with speech processing in challenging listening conditions. To our knowledge, this is the first investigation of the associations between pre-stimulus pupil diameter and self-reported ratings of listening effort and fatigue. |
Arjen Alink; Felix Euler; Nikolaus Kriegeskorte; Wolf Singer; Axel Kohler Auditory motion direction encoding in auditory cortex and high-level visual cortex Journal Article Human Brain Mapping, 33 (4), pp. 969–978, 2012. @article{Alink2012, title = {Auditory motion direction encoding in auditory cortex and high-level visual cortex}, author = {Arjen Alink and Felix Euler and Nikolaus Kriegeskorte and Wolf Singer and Axel Kohler}, doi = {10.1002/hbm.21263}, year = {2012}, date = {2012-01-01}, journal = {Human Brain Mapping}, volume = {33}, number = {4}, pages = {969--978}, abstract = {The aim of this functional magnetic resonance imaging (fMRI) study was to identify human brain areas that are sensitive to the direction of auditory motion. Such directional sensitivity was assessed in a hypothesis-free manner by analyzing fMRI response patterns across the entire brain volume using a spherical-searchlight approach. In addition, we assessed directional sensitivity in three predefined brain areas that have been associated with auditory motion perception in previous neuroimaging studies. These were the primary auditory cortex, the planum temporale and the visual motion complex (hMT/ from fMRI response patterns in the right auditory cortex and in a high-level visual area located in the V5þ). Our whole-brain analysis revealed that the direction of sound-source movement could be decoded right lateral occipital cortex. Our region-of-interest-based analysis showed that the decoding of the direc- tion of auditory motion was most reliable with activation patterns of the left and right planum temporale. Auditory motion direction could not be decoded from activation patterns in hMT/V5þ. These findings provide further evidence for the planum temporale playing a central role in supporting auditory motion perception. In addition, our findings suggest a cross-modal transfer of directional information to high- level visual cortex in healthy humans.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The aim of this functional magnetic resonance imaging (fMRI) study was to identify human brain areas that are sensitive to the direction of auditory motion. Such directional sensitivity was assessed in a hypothesis-free manner by analyzing fMRI response patterns across the entire brain volume using a spherical-searchlight approach. In addition, we assessed directional sensitivity in three predefined brain areas that have been associated with auditory motion perception in previous neuroimaging studies. These were the primary auditory cortex, the planum temporale and the visual motion complex (hMT/ from fMRI response patterns in the right auditory cortex and in a high-level visual area located in the V5þ). Our whole-brain analysis revealed that the direction of sound-source movement could be decoded right lateral occipital cortex. Our region-of-interest-based analysis showed that the decoding of the direc- tion of auditory motion was most reliable with activation patterns of the left and right planum temporale. Auditory motion direction could not be decoded from activation patterns in hMT/V5þ. These findings provide further evidence for the planum temporale playing a central role in supporting auditory motion perception. In addition, our findings suggest a cross-modal transfer of directional information to high- level visual cortex in healthy humans. |
Micah Allen; Darya Frank; Samuel D Schwarzkopf; Francesca Fardo; Joel S Winston; Tobias U Hauser; Geraint Rees Unexpected arousal modulates the influence of sensory noise on confidence Journal Article eLife, 5 , pp. 1–17, 2016. @article{Allen2016, title = {Unexpected arousal modulates the influence of sensory noise on confidence}, author = {Micah Allen and Darya Frank and Samuel D Schwarzkopf and Francesca Fardo and Joel S Winston and Tobias U Hauser and Geraint Rees}, doi = {10.7554/eLife.18103}, year = {2016}, date = {2016-01-01}, journal = {eLife}, volume = {5}, pages = {1--17}, abstract = {Human perception is invariably accompanied by a graded feeling of confidence that guides metacognitive awareness and decision-making. It is often assumed that this arises solely from the feed-forward encoding of the strength or precision of sensory inputs. In contrast, interoceptive inference models suggest that confidence reflects a weighted integration of sensory precision and expectations about internal states, such as arousal. Here we test this hypothesis using a novel psychophysical paradigm, in which unseen disgust-cues induced unexpected, unconscious arousal just before participants discriminated motion signals of variable precision. Across measures of perceptual bias, uncertainty, and physiological arousal we found that arousing disgust cues modulated the encoding of sensory noise. Furthermore, the degree to which trial-by-trial pupil fluctuations encoded this nonlinear interaction correlated with trial level confidence. Our results suggest that unexpected arousal regulates perceptual precision, such that subjective confidence reflects the integration of both external sensory and internal, embodied states.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Human perception is invariably accompanied by a graded feeling of confidence that guides metacognitive awareness and decision-making. It is often assumed that this arises solely from the feed-forward encoding of the strength or precision of sensory inputs. In contrast, interoceptive inference models suggest that confidence reflects a weighted integration of sensory precision and expectations about internal states, such as arousal. Here we test this hypothesis using a novel psychophysical paradigm, in which unseen disgust-cues induced unexpected, unconscious arousal just before participants discriminated motion signals of variable precision. Across measures of perceptual bias, uncertainty, and physiological arousal we found that arousing disgust cues modulated the encoding of sensory noise. Furthermore, the degree to which trial-by-trial pupil fluctuations encoded this nonlinear interaction correlated with trial level confidence. Our results suggest that unexpected arousal regulates perceptual precision, such that subjective confidence reflects the integration of both external sensory and internal, embodied states. |
Fredrik Allenmark; Zhuanghua Shi; Rasmus L Pistorius; Laura A Theisinger; Nikolaos Koutsouleris; Peter Falkai; Hermann J Müller; Christine M Falter-Wagner Acquisition and use of ‘priors' in autism: Typical in deciding where to look, atypical in deciding what Is there Journal Article Journal of Autism and Developmental Disorders, pp. 1–15, 2020. @article{Allenmark2020, title = {Acquisition and use of ‘priors' in autism: Typical in deciding where to look, atypical in deciding what Is there}, author = {Fredrik Allenmark and Zhuanghua Shi and Rasmus L Pistorius and Laura A Theisinger and Nikolaos Koutsouleris and Peter Falkai and Hermann J Müller and Christine M Falter-Wagner}, doi = {10.1007/s10803-020-04828-2}, year = {2020}, date = {2020-01-01}, journal = {Journal of Autism and Developmental Disorders}, pages = {1--15}, publisher = {Springer US}, abstract = {Individuals with Autism Spectrum Disorder (ASD) are thought to under-rely on prior knowledge in perceptual decision-making. This study examined whether this applies to decisions of attention allocation, of relevance for ‘predictive-coding' accounts of ASD. In a visual search task, a salient but task-irrelevant distractor appeared with higher probability in one display half. Individuals with ASD learned to avoid ‘attentional capture' by distractors in the probable region as effectively as control participants—indicating typical priors for deploying attention. However, capture by a ‘surprising' distractor at an unlikely location led to greatly slowed identification of a subsequent target at that location—indicating that individuals with ASD attempt to control surprise (unexpected attentional capture) by over-regulating parameters in post-selective decision-making.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Individuals with Autism Spectrum Disorder (ASD) are thought to under-rely on prior knowledge in perceptual decision-making. This study examined whether this applies to decisions of attention allocation, of relevance for ‘predictive-coding' accounts of ASD. In a visual search task, a salient but task-irrelevant distractor appeared with higher probability in one display half. Individuals with ASD learned to avoid ‘attentional capture' by distractors in the probable region as effectively as control participants—indicating typical priors for deploying attention. However, capture by a ‘surprising' distractor at an unlikely location led to greatly slowed identification of a subsequent target at that location—indicating that individuals with ASD attempt to control surprise (unexpected attentional capture) by over-regulating parameters in post-selective decision-making. |
Ava-Ann Allman; Chawki Benkelfat; France Durand; Igor Sibon; Alain Dagher; Marco Leyton; Glen B Baker; Gillian A O'Driscoll Effect of D-amphetamine on inhibition and motor planning as a function of baseline performance. Journal Article Psychopharmacology, 211 (4), pp. 423–33, 2010. @article{Allman2010, title = {Effect of D-amphetamine on inhibition and motor planning as a function of baseline performance.}, author = {Ava-Ann Allman and Chawki Benkelfat and France Durand and Igor Sibon and Alain Dagher and Marco Leyton and Glen B Baker and Gillian A O'Driscoll}, doi = {10.1007/s00213-010-1912-x}, year = {2010}, date = {2010-01-01}, journal = {Psychopharmacology}, volume = {211}, number = {4}, pages = {423--33}, abstract = {RATIONALE: Baseline performance has been reported to predict dopamine (DA) effects on working memory, following an inverted-U pattern. This pattern may hold true for other executive functions that are DA-sensitive. OBJECTIVES: The objective of this study is to investigate the effect of D: -amphetamine, an indirect DA agonist, on two other putatively DA-sensitive executive functions, inhibition and motor planning, as a function of baseline performance. METHODS: Participants with no prior stimulant exposure participated in a double-blind crossover study of a single dose of 0.3 mg/kg, p.o. of D: -amphetamine and placebo. Participants were divided into high and low groups, based on their performance on the antisaccade and predictive saccade tasks on the baseline day. Executive functions, mood states, heart rate and blood pressure were assessed before (T0) and after drug administration, at 1.5 (T1), 2.5 (T2) and 3.5 h (T3) post-drug. RESULTS: Antisaccade errors decreased with D: -amphetamine irrespective of baseline performance (p = 0.025). For antisaccade latency, participants who generated short-latency antisaccades at baseline had longer latencies on D: -amphetamine than placebo, while those with long-latency antisaccades at baseline had shorter latencies on D: -amphetamine than placebo (drug x group}, keywords = {}, pubstate = {published}, tppubtype = {article} } RATIONALE: Baseline performance has been reported to predict dopamine (DA) effects on working memory, following an inverted-U pattern. This pattern may hold true for other executive functions that are DA-sensitive. OBJECTIVES: The objective of this study is to investigate the effect of D: -amphetamine, an indirect DA agonist, on two other putatively DA-sensitive executive functions, inhibition and motor planning, as a function of baseline performance. METHODS: Participants with no prior stimulant exposure participated in a double-blind crossover study of a single dose of 0.3 mg/kg, p.o. of D: -amphetamine and placebo. Participants were divided into high and low groups, based on their performance on the antisaccade and predictive saccade tasks on the baseline day. Executive functions, mood states, heart rate and blood pressure were assessed before (T0) and after drug administration, at 1.5 (T1), 2.5 (T2) and 3.5 h (T3) post-drug. RESULTS: Antisaccade errors decreased with D: -amphetamine irrespective of baseline performance (p = 0.025). For antisaccade latency, participants who generated short-latency antisaccades at baseline had longer latencies on D: -amphetamine than placebo, while those with long-latency antisaccades at baseline had shorter latencies on D: -amphetamine than placebo (drug x group |
Ava-Ann Allman; Ulrich Ettinger; Ridha Joober; Gillian A O'Driscoll Effects of methylphenidate on basic and higher-order oculomotor functions Journal Article Journal of Psychopharmacology, 26 (11), pp. 1471–1479, 2012. @article{Allman2012, title = {Effects of methylphenidate on basic and higher-order oculomotor functions}, author = {Ava-Ann Allman and Ulrich Ettinger and Ridha Joober and Gillian A O'Driscoll}, doi = {10.1177/0269881112446531}, year = {2012}, date = {2012-01-01}, journal = {Journal of Psychopharmacology}, volume = {26}, number = {11}, pages = {1471--1479}, abstract = {Eye movements are sensitive indicators of pharmacological effects on sensorimotor and cognitive processing. Methylphenidate (MPH) is one of the most prescribed medications in psychiatry. It is increasingly used as a cognitive enhancer by healthy individuals. However, little is known of its effect on healthy cognition. Here we used oculomotor tests to evaluate the effects of MPH on basic oculomotor and executive functions. Twenty-nine males were given 20mg of MPH orally in a double-blind placebo-controlled crossover design. Participants performed visually-guided saccades, sinusoidal smooth pursuit, predictive saccades and antisaccades one hour post-capsule administration. Heart rate and blood pressure were assessed prior to capsule administration, and again before and after task performance. Visually-guided saccade latency decreased with MPH (ptextless0.004). Smooth pursuit gain increased on MPH (ptextless0.001) and number of saccades during pursuit decreased (ptextless0.001). Proportion of predictive saccades increased on MPH (ptextless0.004), specifically in conditions with predictable timing. Peak velocity of predictive saccades increased with MPH (ptextless0.01). Antisaccade errors and latency were unaffected. Physiological variables were also unaffected. The effects on visually-guided saccade latency and peak velocity are consistent with MPH effects on dopamine in basal ganglia. The improvements in predictive saccade conditions and smooth pursuit suggest effects on timing functions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Eye movements are sensitive indicators of pharmacological effects on sensorimotor and cognitive processing. Methylphenidate (MPH) is one of the most prescribed medications in psychiatry. It is increasingly used as a cognitive enhancer by healthy individuals. However, little is known of its effect on healthy cognition. Here we used oculomotor tests to evaluate the effects of MPH on basic oculomotor and executive functions. Twenty-nine males were given 20mg of MPH orally in a double-blind placebo-controlled crossover design. Participants performed visually-guided saccades, sinusoidal smooth pursuit, predictive saccades and antisaccades one hour post-capsule administration. Heart rate and blood pressure were assessed prior to capsule administration, and again before and after task performance. Visually-guided saccade latency decreased with MPH (ptextless0.004). Smooth pursuit gain increased on MPH (ptextless0.001) and number of saccades during pursuit decreased (ptextless0.001). Proportion of predictive saccades increased on MPH (ptextless0.004), specifically in conditions with predictable timing. Peak velocity of predictive saccades increased with MPH (ptextless0.01). Antisaccade errors and latency were unaffected. Physiological variables were also unaffected. The effects on visually-guided saccade latency and peak velocity are consistent with MPH effects on dopamine in basal ganglia. The improvements in predictive saccade conditions and smooth pursuit suggest effects on timing functions. |
Albandri Alotaibi; Geoffrey Underwood; Alastair D Smith Cultural differences in attention: Eye movement evidence from a comparative visual search task Journal Article Consciousness and Cognition, 55 , pp. 254–265, 2017. @article{Alotaibi2017, title = {Cultural differences in attention: Eye movement evidence from a comparative visual search task}, author = {Albandri Alotaibi and Geoffrey Underwood and Alastair D Smith}, doi = {10.1016/j.concog.2017.09.002}, year = {2017}, date = {2017-01-01}, journal = {Consciousness and Cognition}, volume = {55}, pages = {254--265}, abstract = {Individual differences in visual attention have been linked to thinking style: analytic thinking (common in individualistic cultures) is thought to promote attention to detail and focus on the most important part of a scene, whereas holistic thinking (common in collectivist cultures) promotes attention to the global structure of a scene and the relationship between its parts. However, this theory is primarily based on relatively simple judgement tasks. We compared groups from Great Britain (an individualist culture) and Saudi Arabia (a collectivist culture) on a more complex comparative visual search task, using simple natural scenes. A higher overall number of fixations for Saudi participants, along with longer search times, indicated less efficient search behaviour than British participants. Furthermore, intra-group comparisons of scan-path for Saudi participants revealed less similarity than within the British group. Together, these findings suggest that there is a positive relationship between an analytic cognitive style and controlled attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Individual differences in visual attention have been linked to thinking style: analytic thinking (common in individualistic cultures) is thought to promote attention to detail and focus on the most important part of a scene, whereas holistic thinking (common in collectivist cultures) promotes attention to the global structure of a scene and the relationship between its parts. However, this theory is primarily based on relatively simple judgement tasks. We compared groups from Great Britain (an individualist culture) and Saudi Arabia (a collectivist culture) on a more complex comparative visual search task, using simple natural scenes. A higher overall number of fixations for Saudi participants, along with longer search times, indicated less efficient search behaviour than British participants. Furthermore, intra-group comparisons of scan-path for Saudi participants revealed less similarity than within the British group. Together, these findings suggest that there is a positive relationship between an analytic cognitive style and controlled attention. |
Reem Alsadoon; Trude Heift Textual input enhancement for vowel blindness: A study with Arabic ESL learners Journal Article The Modern Language Journal, 99 (1), pp. 57–79, 2015. @article{Alsadoon2015, title = {Textual input enhancement for vowel blindness: A study with Arabic ESL learners}, author = {Reem Alsadoon and Trude Heift}, doi = {10.1111/modl.12188}, year = {2015}, date = {2015-01-01}, journal = {The Modern Language Journal}, volume = {99}, number = {1}, pages = {57--79}, abstract = {This study explores the impact of textual input enhancement on the noticing and intake of English vowels by Arabic L2 learners of English. Arabic L1 speakers are known to experience vowel blindness, commonly defined as a difficulty in the textual decoding and encoding of English vowels due to an insufficient decoding of the word form. Thirty beginner ESL learners participated in a training study during which the experimental group received textual input enhancement on English vowels. Students completed a pretest and an immediate and delayed posttest. An eye-tracker recorded students' eye fixations during the treatment phase. Results indicate that vowel blindness was significantly reduced for the experimental group who received vowel training in the form of textual input enhancement. This might be due to a longer focus on the target words as suggested by our eye-tracking data.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study explores the impact of textual input enhancement on the noticing and intake of English vowels by Arabic L2 learners of English. Arabic L1 speakers are known to experience vowel blindness, commonly defined as a difficulty in the textual decoding and encoding of English vowels due to an insufficient decoding of the word form. Thirty beginner ESL learners participated in a training study during which the experimental group received textual input enhancement on English vowels. Students completed a pretest and an immediate and delayed posttest. An eye-tracker recorded students' eye fixations during the treatment phase. Results indicate that vowel blindness was significantly reduced for the experimental group who received vowel training in the form of textual input enhancement. This might be due to a longer focus on the target words as suggested by our eye-tracking data. |
Christian F Altmann; Arne Deubelius; Zoe Kourtzi Shape saliency modulates contextual processing in the human lateral occipital complex Journal Article Journal of Cognitive Neuroscience, 16 (5), pp. 794–804, 2004. @article{Altmann2004, title = {Shape saliency modulates contextual processing in the human lateral occipital complex}, author = {Christian F Altmann and Arne Deubelius and Zoe Kourtzi}, doi = {10.1162/089892904970825}, year = {2004}, date = {2004-01-01}, journal = {Journal of Cognitive Neuroscience}, volume = {16}, number = {5}, pages = {794--804}, abstract = {Visual context influences our perception of target objects in natural scenes. However, little is known about the analysis of context information and its role in shape perception in the human brain. We investigated whether the human lateral occipital complex (LOC), known to be involved in the visual analysis of shapes, also processes information about the context of shapes within cluttered scenes. We employed an fMRI adaptation paradigm in which fMRI responses are lower for two identical than for two different stimuli presented consecutively. The stimuli consisted of closed target contours defined by aligned Gabor elements embedded in a background of randomly oriented Gabors. We measured fMRI adaptation in the LOC across changes in the context of the target shapes by manipulating the position and orientation of the background elements. No adaptation was observed across context changes when the background elements were presented in the same plane as the target elements. However, adaptation was observed when the grouping of the target elements was enhanced in a bottom-up (i.e., grouping by disparity or motion) or top-down (i.e., shape priming) manner and thus the saliency of the target shape increased. These findings suggest that the LOC processes information not only about shapes, but also about their context. This processing of context information in the LOC is modulated by figure-ground segmentation and grouping processes. That is, neural populations in the LOC encode context information when relevant to the perception of target shapes, but represent salient targets independent of context changes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual context influences our perception of target objects in natural scenes. However, little is known about the analysis of context information and its role in shape perception in the human brain. We investigated whether the human lateral occipital complex (LOC), known to be involved in the visual analysis of shapes, also processes information about the context of shapes within cluttered scenes. We employed an fMRI adaptation paradigm in which fMRI responses are lower for two identical than for two different stimuli presented consecutively. The stimuli consisted of closed target contours defined by aligned Gabor elements embedded in a background of randomly oriented Gabors. We measured fMRI adaptation in the LOC across changes in the context of the target shapes by manipulating the position and orientation of the background elements. No adaptation was observed across context changes when the background elements were presented in the same plane as the target elements. However, adaptation was observed when the grouping of the target elements was enhanced in a bottom-up (i.e., grouping by disparity or motion) or top-down (i.e., shape priming) manner and thus the saliency of the target shape increased. These findings suggest that the LOC processes information not only about shapes, but also about their context. This processing of context information in the LOC is modulated by figure-ground segmentation and grouping processes. That is, neural populations in the LOC encode context information when relevant to the perception of target shapes, but represent salient targets independent of context changes. |
Kaoru Amano; Tsunehiro Takeda; Tomoki Haji; Masahiko Terao; Kazushi Maruya; Kenji Matsumoto; Ikuya Murakami; Shin'ya Nishida Human neural responses involved in spatial pooling of locally ambiguous motion signals Journal Article Journal of Neurophysiology, 107 (12), pp. 3493–3508, 2012. @article{Amano2012, title = {Human neural responses involved in spatial pooling of locally ambiguous motion signals}, author = {Kaoru Amano and Tsunehiro Takeda and Tomoki Haji and Masahiko Terao and Kazushi Maruya and Kenji Matsumoto and Ikuya Murakami and Shin'ya Nishida}, doi = {10.1152/jn.00821.2011}, year = {2012}, date = {2012-01-01}, journal = {Journal of Neurophysiology}, volume = {107}, number = {12}, pages = {3493--3508}, abstract = {Early visual motion signals are local and one-dimensional (1-D). For specification of global two-dimensional (2-D) motion vectors, the visual system should appropriately integrate these signals across orientation and space. Previous neurophysiological studies have suggested that this integration process consists of two computational steps (estimation of local 2-D motion vectors, followed by their spatial pooling), both being identified in the area MT. Psychophysical findings, however, suggest that under certain stimulus conditions, the human visual system can also compute mathematically correct global motion vectors from direct pooling of spatially distributed 1-D motion signals. To study the neural mechanisms responsible for this novel 1-D motion pooling, we conducted human magnetoencephalography (MEG) and functional MRI experiments using a global motion stimulus comprising multiple moving Gabors (global-Gabor motion). In the first experiment, we measured MEG and blood oxygen level-dependent responses while changing motion coherence of global-Gabor motion. In the second experiment, we investigated cortical responses correlated with direction-selective adaptation to the global 2-D motion, not to local 1-D motions. We found that human MT complex (hMT+) responses show both coherence dependency and direction selectivity to global motion based on 1-D pooling. The results provide the first evidence that hMT+ is the locus of 1-D motion pooling, as well as that of conventional 2-D motion pooling.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Early visual motion signals are local and one-dimensional (1-D). For specification of global two-dimensional (2-D) motion vectors, the visual system should appropriately integrate these signals across orientation and space. Previous neurophysiological studies have suggested that this integration process consists of two computational steps (estimation of local 2-D motion vectors, followed by their spatial pooling), both being identified in the area MT. Psychophysical findings, however, suggest that under certain stimulus conditions, the human visual system can also compute mathematically correct global motion vectors from direct pooling of spatially distributed 1-D motion signals. To study the neural mechanisms responsible for this novel 1-D motion pooling, we conducted human magnetoencephalography (MEG) and functional MRI experiments using a global motion stimulus comprising multiple moving Gabors (global-Gabor motion). In the first experiment, we measured MEG and blood oxygen level-dependent responses while changing motion coherence of global-Gabor motion. In the second experiment, we investigated cortical responses correlated with direction-selective adaptation to the global 2-D motion, not to local 1-D motions. We found that human MT complex (hMT+) responses show both coherence dependency and direction selectivity to global motion based on 1-D pooling. The results provide the first evidence that hMT+ is the locus of 1-D motion pooling, as well as that of conventional 2-D motion pooling. |
Ken ichi Amemori; Satoko Amemori; Daniel J Gibson; Ann M Graybiel Striatal microstimulation induces persistent and repetitive negative decision-making predicted by striatal beta-band oscillation Journal Article Neuron, 99 (4), pp. 829–841, 2018. @article{Amemori2018, title = {Striatal microstimulation induces persistent and repetitive negative decision-making predicted by striatal beta-band oscillation}, author = {Ken ichi Amemori and Satoko Amemori and Daniel J Gibson and Ann M Graybiel}, doi = {10.1016/j.neuron.2018.07.022}, year = {2018}, date = {2018-01-01}, journal = {Neuron}, volume = {99}, number = {4}, pages = {829--841}, publisher = {Elsevier Inc.}, abstract = {Persistent thoughts inducing irrationally pessimistic and repetitive decisions are often symptoms of mood and anxiety disorders. Regional neural hyper-activities have been associated with these disorders, but it remains unclear whether there is a specific brain region causally involved in these persistent valuations. Here, we identified potential sources of such persistent states by microstimulating the striatum of macaques performing a task by which we could quantitatively estimate their subjective pessimistic states using their choices to accept or reject conflicting offers. We found that this microstimulation induced irrationally repetitive choices with negative evaluations. Local field potentials recorded in the same microstimulation sessions exhibited modulations of beta-band oscillatory activity that paralleled the persistent negative states influencing repetitive decisions. These findings demonstrate that local striatal zones can causally affect subjective states influencing persistent negative valuation and that abnormal beta-band oscillations can be associated with persistency in valuation accompanied by an anxiety-like state.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Persistent thoughts inducing irrationally pessimistic and repetitive decisions are often symptoms of mood and anxiety disorders. Regional neural hyper-activities have been associated with these disorders, but it remains unclear whether there is a specific brain region causally involved in these persistent valuations. Here, we identified potential sources of such persistent states by microstimulating the striatum of macaques performing a task by which we could quantitatively estimate their subjective pessimistic states using their choices to accept or reject conflicting offers. We found that this microstimulation induced irrationally repetitive choices with negative evaluations. Local field potentials recorded in the same microstimulation sessions exhibited modulations of beta-band oscillatory activity that paralleled the persistent negative states influencing repetitive decisions. These findings demonstrate that local striatal zones can causally affect subjective states influencing persistent negative valuation and that abnormal beta-band oscillations can be associated with persistency in valuation accompanied by an anxiety-like state. |
Ilhame Ameqrane; Pierre Pouget; Nicolas Wattiez; Roger Carpenter; Marcus Missal Implicit and explicit timing in oculomotor control Journal Article PLoS ONE, 9 (4), pp. e93958, 2014. @article{Ameqrane2014, title = {Implicit and explicit timing in oculomotor control}, author = {Ilhame Ameqrane and Pierre Pouget and Nicolas Wattiez and Roger Carpenter and Marcus Missal}, doi = {10.1371/journal.pone.0093958}, year = {2014}, date = {2014-01-01}, journal = {PLoS ONE}, volume = {9}, number = {4}, pages = {e93958}, abstract = {The passage of time can be estimated either explicitly, e.g. before leaving home in the morning, or implicitly, e.g. when catching a flying ball. In the present study, the latency of saccadic eye movements was used to evaluate differences between implicit and explicit timing. Humans were required to make a saccade between a central and a peripheral position on a computer screen. The delay between the extinction of a central target and the appearance of an eccentric target was the independent variable that could take one out of four different values (400, 900, 1400 or 1900 ms). In target trials, the delay period lasted for one of the four durations randomly. At the end of the delay, a saccade was initiated by the appearance of an eccentric target. Cue&target trials were similar to target trials but the duration of the delay was visually cued. In probe trials, the duration of the upcoming delay was cued, but there was no eccentric target and subjects had to internally generate a saccade at the estimated end of the delay. In target and cue&target trials, the mean and variance of latency distributions decreased as delay duration increased. In cue&target trials latencies were shorter. In probe trials, the variance increased with increasing delay duration and scalar variability was observed. The major differences in saccadic latency distributions were observed between visually-guided (target and cue&target trials) and internally-generated saccades (probe trials). In target and cue&target trials the timing of the response was implicit. In probe trials, the timing of the response was internally-generated and explicitly based on the duration of the visual cue. Scalar timing was observed only during probe trials. This study supports the hypothesis that there is no ubiquitous timing system in the brain but independent timing processes active depending on task demands.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The passage of time can be estimated either explicitly, e.g. before leaving home in the morning, or implicitly, e.g. when catching a flying ball. In the present study, the latency of saccadic eye movements was used to evaluate differences between implicit and explicit timing. Humans were required to make a saccade between a central and a peripheral position on a computer screen. The delay between the extinction of a central target and the appearance of an eccentric target was the independent variable that could take one out of four different values (400, 900, 1400 or 1900 ms). In target trials, the delay period lasted for one of the four durations randomly. At the end of the delay, a saccade was initiated by the appearance of an eccentric target. Cue&target trials were similar to target trials but the duration of the delay was visually cued. In probe trials, the duration of the upcoming delay was cued, but there was no eccentric target and subjects had to internally generate a saccade at the estimated end of the delay. In target and cue&target trials, the mean and variance of latency distributions decreased as delay duration increased. In cue&target trials latencies were shorter. In probe trials, the variance increased with increasing delay duration and scalar variability was observed. The major differences in saccadic latency distributions were observed between visually-guided (target and cue&target trials) and internally-generated saccades (probe trials). In target and cue&target trials the timing of the response was implicit. In probe trials, the timing of the response was internally-generated and explicitly based on the duration of the visual cue. Scalar timing was observed only during probe trials. This study supports the hypothesis that there is no ubiquitous timing system in the brain but independent timing processes active depending on task demands. |
Roy Amit; Dekel Abeles; Marisa Carrasco; Shlomit Yuval-Greenberg Oculomotor inhibition reflects temporal expectations Journal Article NeuroImage, 184 , pp. 279–292, 2019. @article{Amit2019a, title = {Oculomotor inhibition reflects temporal expectations}, author = {Roy Amit and Dekel Abeles and Marisa Carrasco and Shlomit Yuval-Greenberg}, doi = {10.1016/j.neuroimage.2018.09.026}, year = {2019}, date = {2019-01-01}, journal = {NeuroImage}, volume = {184}, pages = {279--292}, abstract = {The accurate extraction of signals out of noisy environments is a major challenge of the perceptual system. Forming temporal expectations and continuously matching them with perceptual input can facilitate this process. In humans, temporal expectations are typically assessed using behavioral measures, which provide only retrospective but no real-time estimates during target anticipation, or by using electrophysiological measures, which require extensive preprocessing and are difficult to interpret. Here we show a new correlate of temporal expectations based on oculomotor behavior. Observers performed an orientation-discrimination task on a central grating target, while their gaze position and EEG were monitored. In each trial, a cue preceded the target by a varying interval (“foreperiod”). In separate blocks, the cue was either predictive or non-predictive regarding the timing of the target. Results showed that saccades and blinks were inhibited more prior to an anticipated regular target than a less-anticipated irregular one. This consistent oculomotor inhibition effect enabled a trial-by-trial classification according to interval-regularity. Additionally, in the regular condition the slope of saccade-rate and drift were shallower for longer than shorter foreperiods, indicating their adjustment according to temporal expectations. Comparing the sensitivity of this oculomotor marker with those of other common predictability markers (e.g. alpha-suppression) showed that it is a sensitive marker for cue-related anticipation. In contrast, temporal changes in conditional probabilities (hazard-rate) modulated alpha-suppression more than cue-related anticipation. We conclude that pre-target oculomotor inhibition is a correlate of temporal predictions induced by cue-target associations, whereas alpha-suppression is more sensitive to conditional probabilities across time.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The accurate extraction of signals out of noisy environments is a major challenge of the perceptual system. Forming temporal expectations and continuously matching them with perceptual input can facilitate this process. In humans, temporal expectations are typically assessed using behavioral measures, which provide only retrospective but no real-time estimates during target anticipation, or by using electrophysiological measures, which require extensive preprocessing and are difficult to interpret. Here we show a new correlate of temporal expectations based on oculomotor behavior. Observers performed an orientation-discrimination task on a central grating target, while their gaze position and EEG were monitored. In each trial, a cue preceded the target by a varying interval (“foreperiod”). In separate blocks, the cue was either predictive or non-predictive regarding the timing of the target. Results showed that saccades and blinks were inhibited more prior to an anticipated regular target than a less-anticipated irregular one. This consistent oculomotor inhibition effect enabled a trial-by-trial classification according to interval-regularity. Additionally, in the regular condition the slope of saccade-rate and drift were shallower for longer than shorter foreperiods, indicating their adjustment according to temporal expectations. Comparing the sensitivity of this oculomotor marker with those of other common predictability markers (e.g. alpha-suppression) showed that it is a sensitive marker for cue-related anticipation. In contrast, temporal changes in conditional probabilities (hazard-rate) modulated alpha-suppression more than cue-related anticipation. We conclude that pre-target oculomotor inhibition is a correlate of temporal predictions induced by cue-target associations, whereas alpha-suppression is more sensitive to conditional probabilities across time. |
Roy Amit; Dekel Abeles; Shlomit Yuval-Greenberg Transient and sustained effects of stimulus properties on the generation of microsaccades Journal Article Journal of Vision, 19 (1), pp. 1–23, 2019. @article{Amit2019b, title = {Transient and sustained effects of stimulus properties on the generation of microsaccades}, author = {Roy Amit and Dekel Abeles and Shlomit Yuval-Greenberg}, doi = {10.1167/19.1.6}, year = {2019}, date = {2019-01-01}, journal = {Journal of Vision}, volume = {19}, number = {1}, pages = {1--23}, publisher = {The Association for Research in Vision and Ophthalmology}, abstract = {Saccades shift the gaze rapidly every few hundred milliseconds from one fixated location to the next, producing a flow of visual input into the visual system even in the absence of changes in the environment. During fixation, small saccades called microsaccades are produced 1–3 times per second, generating a flow of visual input. The characteristics of this visual flow are determined by the timings of the saccades and by the characteristics of the visual stimuli on which they are performed. Previous models of microsaccade generation have accounted for the effects of external stimulation on the production of microsaccades, but they have not considered the effects of the prolonged background stimulus on which microsaccades are performed. The effects of this stimulus on the process of microsaccade generation could be sustained, following its prolonged presentation, or transient, through the visual transients produced by the microsaccades themselves. In four experiments, we varied the properties of the constant displays and examined the resulting modulation of microsaccade properties: their sizes, their timings, and the correlations between properties of consecutive microsaccades. Findings show that displays of higher spatial frequency and contrast produce smaller microsaccades and longer minimal intervals between consecutive microsaccades; and smaller microsaccades are followed by smaller and delayed microsaccades. We explain these findings in light of previous models and suggest a conceptual model by which both sustained and transient effects of the stimulus have central roles in determining the generation of microsaccades.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Saccades shift the gaze rapidly every few hundred milliseconds from one fixated location to the next, producing a flow of visual input into the visual system even in the absence of changes in the environment. During fixation, small saccades called microsaccades are produced 1–3 times per second, generating a flow of visual input. The characteristics of this visual flow are determined by the timings of the saccades and by the characteristics of the visual stimuli on which they are performed. Previous models of microsaccade generation have accounted for the effects of external stimulation on the production of microsaccades, but they have not considered the effects of the prolonged background stimulus on which microsaccades are performed. The effects of this stimulus on the process of microsaccade generation could be sustained, following its prolonged presentation, or transient, through the visual transients produced by the microsaccades themselves. In four experiments, we varied the properties of the constant displays and examined the resulting modulation of microsaccade properties: their sizes, their timings, and the correlations between properties of consecutive microsaccades. Findings show that displays of higher spatial frequency and contrast produce smaller microsaccades and longer minimal intervals between consecutive microsaccades; and smaller microsaccades are followed by smaller and delayed microsaccades. We explain these findings in light of previous models and suggest a conceptual model by which both sustained and transient effects of the stimulus have central roles in determining the generation of microsaccades. |
Richard Amlôt; Robin Walker Are somatosensory saccades voluntary or reflexive? Journal Article Experimental Brain Research, 168 (4), pp. 557–565, 2006. @article{Amlot2006, title = {Are somatosensory saccades voluntary or reflexive?}, author = {Richard Aml{ô}t and Robin Walker}, doi = {10.1007/s00221-005-0116-9}, year = {2006}, date = {2006-01-01}, journal = {Experimental Brain Research}, volume = {168}, number = {4}, pages = {557--565}, abstract = {The present study examines whether the distinction between voluntary (endogenous) and reflexive (stimulus-elicited) saccades made in the visual modality can be applied to the somatosensory modality. The behavioural characteristics of putative reflexive pro-saccades and voluntary anti-saccades made to visual and somatosensory stimuli were examined. Both visual and somatosensory pro-saccades had much shorter latency than voluntary anti-saccades made in the direction opposite to a peripheral stimulus. Furthermore, erroneous pro-saccades were made towards both visual and somatosensory stimuli on approximately 11-13% of anti-saccade trials. The observed difference in pro- and anti-saccade latency and the presence of pro-saccade errors in the anti-saccade task indicates that a somatosensory stimulus can elicit a form of reflexive saccade comparable to pro-saccades made in the visual modality. It is proposed that a peripheral somatosensory stimulus can elicit a form of reflexive saccade and that somatosensory saccades do not depend exclusively on higher level endogenous control processes for their generation. However, a comparison of the underlying latency distributions and of peak-velocity profiles of saccades made to visual and somatosensory stimuli showed that this distinction may be less clearly defined for the somatosensory modality and that modality-specific differences (such as differences in neural conduction rates) in the underlying oculomotor structures involved in saccade target selection also need to be considered. It is further suggested that a broader conceptualisation of saccades and saccade programming beyond the simple voluntary and reflexive dichotomy, that takes into account the control processes involved in saccade generation for both modalities, may be required.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The present study examines whether the distinction between voluntary (endogenous) and reflexive (stimulus-elicited) saccades made in the visual modality can be applied to the somatosensory modality. The behavioural characteristics of putative reflexive pro-saccades and voluntary anti-saccades made to visual and somatosensory stimuli were examined. Both visual and somatosensory pro-saccades had much shorter latency than voluntary anti-saccades made in the direction opposite to a peripheral stimulus. Furthermore, erroneous pro-saccades were made towards both visual and somatosensory stimuli on approximately 11-13% of anti-saccade trials. The observed difference in pro- and anti-saccade latency and the presence of pro-saccade errors in the anti-saccade task indicates that a somatosensory stimulus can elicit a form of reflexive saccade comparable to pro-saccades made in the visual modality. It is proposed that a peripheral somatosensory stimulus can elicit a form of reflexive saccade and that somatosensory saccades do not depend exclusively on higher level endogenous control processes for their generation. However, a comparison of the underlying latency distributions and of peak-velocity profiles of saccades made to visual and somatosensory stimuli showed that this distinction may be less clearly defined for the somatosensory modality and that modality-specific differences (such as differences in neural conduction rates) in the underlying oculomotor structures involved in saccade target selection also need to be considered. It is further suggested that a broader conceptualisation of saccades and saccade programming beyond the simple voluntary and reflexive dichotomy, that takes into account the control processes involved in saccade generation for both modalities, may be required. |
Tatiana A Amor; Saulo D S Reis; Daniel Campos; Hans J Herrmann; José S Andrade Persistence in eye movement during visual search Journal Article Scientific Reports, 6 , pp. 20815, 2016. @article{Amor2016, title = {Persistence in eye movement during visual search}, author = {Tatiana A Amor and Saulo D S Reis and Daniel Campos and Hans J Herrmann and José S Andrade}, doi = {10.1038/srep20815}, year = {2016}, date = {2016-01-01}, journal = {Scientific Reports}, volume = {6}, pages = {20815}, publisher = {Nature Publishing Group}, abstract = {As any cognitive task, visual search involves a number of underlying processes that cannot be directly observed and measured. In this way, the movement of the eyes certainly represents the most explicit and closest connection we can get to the inner mechanisms governing this cognitive activity. Here we show that the process of eye movement during visual search, consisting of sequences of fixations intercalated by saccades, exhibits distinctive persistent behaviors. Initially, by focusing on saccadic directions and intersaccadic angles, we disclose that the probability distributions of these measures show a clear preference of participants towards a reading-like mechanism (geometrical persistence), whose features and potential advantages for searching/foraging are discussed. We then perform a Multifractal Detrended Fluctuation Analysis (MF-DFA) over the time series of jump magnitudes in the eye trajectory and find that it exhibits a typical multifractal behavior arising from the sequential combination of saccades and fixations. By inspecting the time series composed of only fixational movements, our results reveal instead a monofractal behavior with a Hurst exponent , which indicates the presence of long-range power-law positive correlations (statistical persistence). We expect that our methodological approach can be adopted as a way to understand persistence and strategy-planning during visual search.}, keywords = {}, pubstate = {published}, tppubtype = {article} } As any cognitive task, visual search involves a number of underlying processes that cannot be directly observed and measured. In this way, the movement of the eyes certainly represents the most explicit and closest connection we can get to the inner mechanisms governing this cognitive activity. Here we show that the process of eye movement during visual search, consisting of sequences of fixations intercalated by saccades, exhibits distinctive persistent behaviors. Initially, by focusing on saccadic directions and intersaccadic angles, we disclose that the probability distributions of these measures show a clear preference of participants towards a reading-like mechanism (geometrical persistence), whose features and potential advantages for searching/foraging are discussed. We then perform a Multifractal Detrended Fluctuation Analysis (MF-DFA) over the time series of jump magnitudes in the eye trajectory and find that it exhibits a typical multifractal behavior arising from the sequential combination of saccades and fixations. By inspecting the time series composed of only fixational movements, our results reveal instead a monofractal behavior with a Hurst exponent , which indicates the presence of long-range power-law positive correlations (statistical persistence). We expect that our methodological approach can be adopted as a way to understand persistence and strategy-planning during visual search. |
Tatiana A Amor; Mirko Luković; Hans J Herrmann; José S Andrade Influence of scene structure and content on visual search strategies Journal Article Journal of the Royal Society Interface, 14 (132), 2017. @article{Amor2017, title = {Influence of scene structure and content on visual search strategies}, author = {Tatiana A Amor and Mirko Lukovi{ć} and Hans J Herrmann and José S Andrade}, doi = {10.1098/rsif.2017.0406}, year = {2017}, date = {2017-01-01}, journal = {Journal of the Royal Society Interface}, volume = {14}, number = {132}, abstract = {When searching for a target within an image, our brain can adopt different strategies, but which one does it choose? This question can be answered by tracking the motion of the eye while it executes the task. Following many individuals performing various search tasks, we distinguish between two competing strategies. Motivated by these findings, we introduce a model that captures the interplay of the search strategies and allows us to create artificial eye-tracking trajectories, which could be compared with the experimental ones. Identifying the model parameters allows us to quantify the strategy employed in terms of ensemble averages, characterizing each experimental cohort. In this way, we can discern with high sensitivity the relation between the visual landscape and the average strategy, disclosing how small variations in the image induce changes in the strategy.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When searching for a target within an image, our brain can adopt different strategies, but which one does it choose? This question can be answered by tracking the motion of the eye while it executes the task. Following many individuals performing various search tasks, we distinguish between two competing strategies. Motivated by these findings, we introduce a model that captures the interplay of the search strategies and allows us to create artificial eye-tracking trajectories, which could be compared with the experimental ones. Identifying the model parameters allows us to quantify the strategy employed in terms of ensemble averages, characterizing each experimental cohort. In this way, we can discern with high sensitivity the relation between the visual landscape and the average strategy, disclosing how small variations in the image induce changes in the strategy. |
Lucía Amoruso; Agustín Ibáñez; Bruno Fonseca; Sebastián Gadea; Lucas Sedeño; Mariano Sigman; Adolfo M García; Ricardo Fraiman; Daniel Fraiman Variability in functional brain networks predicts expertise during action observation Journal Article NeuroImage, 146 , pp. 690–700, 2017. @article{Amoruso2017, title = {Variability in functional brain networks predicts expertise during action observation}, author = {Lucía Amoruso and Agustín Ibá{ñ}ez and Bruno Fonseca and Sebastián Gadea and Lucas Sede{ñ}o and Mariano Sigman and Adolfo M García and Ricardo Fraiman and Daniel Fraiman}, doi = {10.1016/j.neuroimage.2016.09.041}, year = {2017}, date = {2017-01-01}, journal = {NeuroImage}, volume = {146}, pages = {690--700}, publisher = {Elsevier}, abstract = {Observing an action performed by another individual activates, in the observer, similar circuits as those involved in the actual execution of that action. This activation is modulated by prior experience; indeed, sustained training in a particular motor domain leads to structural and functional changes in critical brain areas. Here, we capitalized on a novel graph-theory approach to electroencephalographic data (Fraiman et al., 2016) to test whether variability in functional brain networks implicated in Tango observation can discriminate between groups differing in their level of expertise. We found that experts and beginners significantly differed in the functional organization of task-relevant networks. Specifically, networks in expert Tango dancers exhibited less variability and a more robust functional architecture. Notably, these expertise-dependent effects were captured within networks derived from electrophysiological brain activity recorded in a very short time window (2 s). In brief, variability in the organization of task-related networks seems to be a highly sensitive indicator of long-lasting training effects. This finding opens new methodological and theoretical windows to explore the impact of domain-specific expertise on brain plasticity, while highlighting variability as a fruitful measure in neuroimaging research.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Observing an action performed by another individual activates, in the observer, similar circuits as those involved in the actual execution of that action. This activation is modulated by prior experience; indeed, sustained training in a particular motor domain leads to structural and functional changes in critical brain areas. Here, we capitalized on a novel graph-theory approach to electroencephalographic data (Fraiman et al., 2016) to test whether variability in functional brain networks implicated in Tango observation can discriminate between groups differing in their level of expertise. We found that experts and beginners significantly differed in the functional organization of task-relevant networks. Specifically, networks in expert Tango dancers exhibited less variability and a more robust functional architecture. Notably, these expertise-dependent effects were captured within networks derived from electrophysiological brain activity recorded in a very short time window (2 s). In brief, variability in the organization of task-related networks seems to be a highly sensitive indicator of long-lasting training effects. This finding opens new methodological and theoretical windows to explore the impact of domain-specific expertise on brain plasticity, while highlighting variability as a fruitful measure in neuroimaging research. |
Shu An; Weibin Mao; Sida Shang; Lili Kang The effects of post-stimulus elaboration, background valence, and item salience on the emotion-induced memory trade-off Journal Article Cognition and Emotion, pp. 1–14, 2020. @article{An2020c, title = {The effects of post-stimulus elaboration, background valence, and item salience on the emotion-induced memory trade-off}, author = {Shu An and Weibin Mao and Sida Shang and Lili Kang}, doi = {10.1080/02699931.2020.1797639}, year = {2020}, date = {2020-01-01}, journal = {Cognition and Emotion}, pages = {1--14}, publisher = {Taylor & Francis}, abstract = {The effect of emotion on memory often leads to the trade-off: enhanced memory for emotional items comes at the cost of memory for background information. Although this effect is usually attributed to overt attention during encoding, Steinmetz and Kensinger (2013) proposed that such an effect might also be related to post-stimulus elaboration. Based on previous different viewpoints, we used the directed forgetting paradigm to further explore the effect of post-stimulus elaboration on the memory trade-off. In the meantime, we also tested the roles of background valence and item salience (high salient items were placed in the centre of backgrounds while low salient items were placed in the periphery of backgrounds) in modulating the memory trade-off. Our results showed that there was a memory trade-off when backgrounds were neutral, whereas this was no longer the case when backgrounds were negative. This indicated the memory trade-off might be affected by background valence. Meanwhile, we found post-stimulus elaboration contributed to selective memory enhancement for backgrounds, while item salience enhanced item memory performance in the memory trade-off. These findings suggest the emotion-induced memory trade-off may be a complex memory effect, which can be influenced by different factors to varying degrees.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The effect of emotion on memory often leads to the trade-off: enhanced memory for emotional items comes at the cost of memory for background information. Although this effect is usually attributed to overt attention during encoding, Steinmetz and Kensinger (2013) proposed that such an effect might also be related to post-stimulus elaboration. Based on previous different viewpoints, we used the directed forgetting paradigm to further explore the effect of post-stimulus elaboration on the memory trade-off. In the meantime, we also tested the roles of background valence and item salience (high salient items were placed in the centre of backgrounds while low salient items were placed in the periphery of backgrounds) in modulating the memory trade-off. Our results showed that there was a memory trade-off when backgrounds were neutral, whereas this was no longer the case when backgrounds were negative. This indicated the memory trade-off might be affected by background valence. Meanwhile, we found post-stimulus elaboration contributed to selective memory enhancement for backgrounds, while item salience enhanced item memory performance in the memory trade-off. These findings suggest the emotion-induced memory trade-off may be a complex memory effect, which can be influenced by different factors to varying degrees. |
E J Anderson; Sabira K Mannan; Masud Husain; Geraint Rees; Petroc Sumner; Dominic J Mort; Donald McRobbie; Christopher Kennard Involvement of prefrontal cortex in visual search Journal Article Experimental Brain Research, 180 (2), pp. 289–302, 2007. @article{Anderson2007, title = {Involvement of prefrontal cortex in visual search}, author = {E J Anderson and Sabira K Mannan and Masud Husain and Geraint Rees and Petroc Sumner and Dominic J Mort and Donald McRobbie and Christopher Kennard}, doi = {10.1007/s00221-007-0860-0}, year = {2007}, date = {2007-01-01}, journal = {Experimental Brain Research}, volume = {180}, number = {2}, pages = {289--302}, abstract = {Visual search for target items embedded within a set of distracting items has consistently been shown to engage regions of occipital and parietal cortex, but the contribution of different regions of prefrontal cortex remains unclear. Here, we used fMRI to compare brain activity in 12 healthy participants performing efficient and inefficient search tasks in which target discriminability and the number of distractor items were manipulated. Matched baseline conditions were incorporated to control for visual and motor components of the tasks, allowing cortical activity associated with each type of search to be isolated. Region of interest analysis was applied to critical regions of prefrontal cortex to determine whether their involvement was common to both efficient and inefficient search, or unique to inefficient search alone. We found regions of the inferior and middle frontal cortex were only active during inefficient search, whereas an area in the superior frontal cortex (in the region of FEF) was active for both efficient and inefficient search. Thus, regions of ventral as well as dorsal prefrontal cortex are recruited during inefficient search, and we propose that this activity is related to processes that guide, control and monitor the allocation of selective attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual search for target items embedded within a set of distracting items has consistently been shown to engage regions of occipital and parietal cortex, but the contribution of different regions of prefrontal cortex remains unclear. Here, we used fMRI to compare brain activity in 12 healthy participants performing efficient and inefficient search tasks in which target discriminability and the number of distractor items were manipulated. Matched baseline conditions were incorporated to control for visual and motor components of the tasks, allowing cortical activity associated with each type of search to be isolated. Region of interest analysis was applied to critical regions of prefrontal cortex to determine whether their involvement was common to both efficient and inefficient search, or unique to inefficient search alone. We found regions of the inferior and middle frontal cortex were only active during inefficient search, whereas an area in the superior frontal cortex (in the region of FEF) was active for both efficient and inefficient search. Thus, regions of ventral as well as dorsal prefrontal cortex are recruited during inefficient search, and we propose that this activity is related to processes that guide, control and monitor the allocation of selective attention. |
Elaine J Anderson; Sabira K Mannan; Geraint Rees; Petroc Sumner; Christopher Kennard A role for spatial and nonspatial working memory processes in visual search Journal Article Experimental Psychology, 55 (5), pp. 301–312, 2008. @article{Anderson2008bb, title = {A role for spatial and nonspatial working memory processes in visual search}, author = {Elaine J Anderson and Sabira K Mannan and Geraint Rees and Petroc Sumner and Christopher Kennard}, doi = {10.1027/1618-3169.55.5.301}, year = {2008}, date = {2008-01-01}, journal = {Experimental Psychology}, volume = {55}, number = {5}, pages = {301--312}, abstract = {Searching a cluttered visual scene for a specific item of interest can take several seconds to perform if the target item is difficult to discriminate from surrounding items. Whether working memory processes are utilized to guide the path of attentional selection during such searches remains under debate. Previous studies have found evidence to support a role for spatial working memory in inefficient search, but the role of nonspatial working memory remains unclear. Here, we directly compared the role of spatial and nonspatial working memory for both an efficient and inefficient search task. In Experiment 1, we used a dual-task paradigm to investigate the effect of performing visual search within the retention interval of a spatial working memory task. Importantly, by incorporating two working memory loads (low and high) we were able to make comparisons between dual-task conditions, rather than between dual-task and single-task conditions. This design allows any interference effects observed to be attributed to changes in memory load, rather than to nonspecific effects related to "dual-task" performance. We found that the efficiency of the inefficient search task declined as spatial memory load increased, but that the efficient search task remained efficient. These results suggest that spatial memory plays an important role in inefficient but not efficient search. In Experiment 2, participants performed the same visual search tasks within the retention interval of visually matched spatial and verbal working memory tasks. Critically, we found comparable dual-task interference between inefficient search and both the spatial and nonspatial working memory tasks, indicating that inefficient search recruits working memory processes common to both domains.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Searching a cluttered visual scene for a specific item of interest can take several seconds to perform if the target item is difficult to discriminate from surrounding items. Whether working memory processes are utilized to guide the path of attentional selection during such searches remains under debate. Previous studies have found evidence to support a role for spatial working memory in inefficient search, but the role of nonspatial working memory remains unclear. Here, we directly compared the role of spatial and nonspatial working memory for both an efficient and inefficient search task. In Experiment 1, we used a dual-task paradigm to investigate the effect of performing visual search within the retention interval of a spatial working memory task. Importantly, by incorporating two working memory loads (low and high) we were able to make comparisons between dual-task conditions, rather than between dual-task and single-task conditions. This design allows any interference effects observed to be attributed to changes in memory load, rather than to nonspecific effects related to "dual-task" performance. We found that the efficiency of the inefficient search task declined as spatial memory load increased, but that the efficient search task remained efficient. These results suggest that spatial memory plays an important role in inefficient but not efficient search. In Experiment 2, participants performed the same visual search tasks within the retention interval of visually matched spatial and verbal working memory tasks. Critically, we found comparable dual-task interference between inefficient search and both the spatial and nonspatial working memory tasks, indicating that inefficient search recruits working memory processes common to both domains. |
Elaine J Anderson; Sabira K Mannan; Geraint Rees; Petroc Sumner; Christopher Kennard Overlapping functional anatomy for working memory and visual search. Journal Article Experimental Brain Research, 200 (1), pp. 91–107, 2010. @article{Anderson2010, title = {Overlapping functional anatomy for working memory and visual search.}, author = {Elaine J Anderson and Sabira K Mannan and Geraint Rees and Petroc Sumner and Christopher Kennard}, doi = {10.1007/s00221-009-2000-5}, year = {2010}, date = {2010-01-01}, journal = {Experimental Brain Research}, volume = {200}, number = {1}, pages = {91--107}, abstract = {Recent behavioural findings using dual-task paradigms demonstrate the importance of both spatial and non-spatial working memory processes in inefficient visual search (Anderson et al. in Exp Psychol 55:301-312, 2008). Here, using functional magnetic resonance imaging (fMRI), we sought to determine whether brain areas recruited during visual search are also involved in working memory. Using visually matched spatial and non-spatial working memory tasks, we confirmed previous behavioural findings that show significant dual-task interference effects occur when inefficient visual search is performed concurrently with either working memory task. Furthermore, we find considerable overlap in the cortical network activated by inefficient search and both working memory tasks. Our findings suggest that the interference effects observed behaviourally may have arisen from competition for cortical processes subserved by these overlapping regions. Drawing on previous findings (Anderson et al. in Exp Brain Res 180:289-302, 2007), we propose that the most likely anatomical locus for these interference effects is the inferior and middle frontal cortex of the right hemisphere. These areas are associated with attentional selection from memory as well as manipulation of information in memory, and we propose that the visual search and working memory tasks used here compete for common processing resources underlying these mechanisms.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent behavioural findings using dual-task paradigms demonstrate the importance of both spatial and non-spatial working memory processes in inefficient visual search (Anderson et al. in Exp Psychol 55:301-312, 2008). Here, using functional magnetic resonance imaging (fMRI), we sought to determine whether brain areas recruited during visual search are also involved in working memory. Using visually matched spatial and non-spatial working memory tasks, we confirmed previous behavioural findings that show significant dual-task interference effects occur when inefficient visual search is performed concurrently with either working memory task. Furthermore, we find considerable overlap in the cortical network activated by inefficient search and both working memory tasks. Our findings suggest that the interference effects observed behaviourally may have arisen from competition for cortical processes subserved by these overlapping regions. Drawing on previous findings (Anderson et al. in Exp Brain Res 180:289-302, 2007), we propose that the most likely anatomical locus for these interference effects is the inferior and middle frontal cortex of the right hemisphere. These areas are associated with attentional selection from memory as well as manipulation of information in memory, and we propose that the visual search and working memory tasks used here compete for common processing resources underlying these mechanisms. |
Brian A Anderson; Steven Yantis Value-driven attentional and oculomotor capture during goal-directed, unconstrained viewing Journal Article Attention, Perception, and Psychophysics, 74 , pp. 1644–1653, 2012. @article{Anderson2012, title = {Value-driven attentional and oculomotor capture during goal-directed, unconstrained viewing}, author = {Brian A Anderson and Steven Yantis}, doi = {10.3758/s13414-012-0348-2}, year = {2012}, date = {2012-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {74}, pages = {1644--1653}, abstract = {Covert shifts of attention precede and direct overt eye movements to stimuli that are task relevant or physically salient. A growing body of evidence suggests that the learned value of perceptual stimuli strongly influences their attentional priority. For example, previously rewarded but otherwise irrelevant and inconspicuous stimuli capture covert attention involuntarily. It is unknown, however, whether stimuli also draw eye movements involuntarily as a consequence of their reward history. Here, we show that previously rewarded but currently task-irrelevant stimuli capture both attention and the eyes. Value-driven oculomotor capture was observed during unconstrained viewing, when neither eye movements nor fixations were required, and was strongly related to individual differences in visual working memory capacity. The appearance of a reward-associated stimulus came to evoke pupil dilation over the course of training, which provides physiological evidence that the stimuli that elicit value-driven capture come to serve as reward-predictive cues. These findings reveal a close coupling of value-driven attentional capture and eye movements that has broad implications for theories of attention and reward learning.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Covert shifts of attention precede and direct overt eye movements to stimuli that are task relevant or physically salient. A growing body of evidence suggests that the learned value of perceptual stimuli strongly influences their attentional priority. For example, previously rewarded but otherwise irrelevant and inconspicuous stimuli capture covert attention involuntarily. It is unknown, however, whether stimuli also draw eye movements involuntarily as a consequence of their reward history. Here, we show that previously rewarded but currently task-irrelevant stimuli capture both attention and the eyes. Value-driven oculomotor capture was observed during unconstrained viewing, when neither eye movements nor fixations were required, and was strongly related to individual differences in visual working memory capacity. The appearance of a reward-associated stimulus came to evoke pupil dilation over the course of training, which provides physiological evidence that the stimuli that elicit value-driven capture come to serve as reward-predictive cues. These findings reveal a close coupling of value-driven attentional capture and eye movements that has broad implications for theories of attention and reward learning. |
Nicola C Anderson; Walter F Bischof; Kaitlin E W Laidlaw; Evan F Risko; Alan Kingstone Recurrence quantification analysis of eye movements Journal Article Behavior Research Methods, 45 , pp. 842–856, 2013. @article{Anderson2013, title = {Recurrence quantification analysis of eye movements}, author = {Nicola C Anderson and Walter F Bischof and Kaitlin E W Laidlaw and Evan F Risko and Alan Kingstone}, doi = {10.3758/s13428-012-0299-5}, year = {2013}, date = {2013-01-01}, journal = {Behavior Research Methods}, volume = {45}, pages = {842--856}, abstract = {Recurrence quantification analysis (RQA) has been successfully used for describing dynamic systems that are too complex to be characterized adequately by standard methods in time series analysis. More recently, RQA has been used for analyzing the coordination of gaze patterns between cooperating individuals. Here, we extend RQA to the characterization of fixation sequences, and we show that the global and local temporal characteristics of fixation sequences can be captured by a small number of RQA measures that have a clear interpretation in this context. We applied RQA to the analysis of a study in which observers looked at different scenes under natural or gaze-contingent viewing conditions, and we found large differences in the RQA measures between the viewing conditions, indicating that RQA is a powerful new tool for the analysis of the temporal patterns of eye movement behavior.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recurrence quantification analysis (RQA) has been successfully used for describing dynamic systems that are too complex to be characterized adequately by standard methods in time series analysis. More recently, RQA has been used for analyzing the coordination of gaze patterns between cooperating individuals. Here, we extend RQA to the characterization of fixation sequences, and we show that the global and local temporal characteristics of fixation sequences can be captured by a small number of RQA measures that have a clear interpretation in this context. We applied RQA to the analysis of a study in which observers looked at different scenes under natural or gaze-contingent viewing conditions, and we found large differences in the RQA measures between the viewing conditions, indicating that RQA is a powerful new tool for the analysis of the temporal patterns of eye movement behavior. |
Giles M Anderson; Glyn W Humphreys Top-down expectancy versus bottom-up guidance in search for known color-form conjunctions Journal Article Attention, Perception, and Psychophysics, 77 (8), pp. 2622–2639, 2015. @article{Anderson2015, title = {Top-down expectancy versus bottom-up guidance in search for known color-form conjunctions}, author = {Giles M Anderson and Glyn W Humphreys}, doi = {10.3758/s13414-015-0960-z}, year = {2015}, date = {2015-11-01}, journal = {Attention, Perception, and Psychophysics}, volume = {77}, number = {8}, pages = {2622--2639}, publisher = {Springer US}, abstract = {We assessed the effects of pairing a target object with its familiar color on eye movements in visual search, under conditions where the familiar color could or could not be predicted. In Experiment 1 participants searched for a yellow- or purple-colored corn target amongst aubergine distractors, half of which were yellow and half purple. Search was more efficient when the color of the target was familiar and early eye movements more likely to be directed to targets carrying a familiar color than an unfamiliar color. Ex- periment 2 introduced cues which predicted the target color at 80 % validity. Cue validity did not affect whether early fixations were to the target. Invalid cues, however, disrupted search efficiency for targets in an unfamiliar color whilst there was little cost to search efficiency for targets in their familiar color. These re- sults generalized across items with different colors (Experiment 3). The data are consistent with early pro- cesses in selection being automatically modulated in a bottom-up manner to targets in their familiar color, even when expectancies are set for other colors.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We assessed the effects of pairing a target object with its familiar color on eye movements in visual search, under conditions where the familiar color could or could not be predicted. In Experiment 1 participants searched for a yellow- or purple-colored corn target amongst aubergine distractors, half of which were yellow and half purple. Search was more efficient when the color of the target was familiar and early eye movements more likely to be directed to targets carrying a familiar color than an unfamiliar color. Ex- periment 2 introduced cues which predicted the target color at 80 % validity. Cue validity did not affect whether early fixations were to the target. Invalid cues, however, disrupted search efficiency for targets in an unfamiliar color whilst there was little cost to search efficiency for targets in their familiar color. These re- sults generalized across items with different colors (Experiment 3). The data are consistent with early pro- cesses in selection being automatically modulated in a bottom-up manner to targets in their familiar color, even when expectancies are set for other colors. |
Nicola C Anderson; Fraser Anderson; Alan Kingstone; Walter F Bischof A comparison of scanpath comparison methods Journal Article Behavior Research Methods, 47 (4), pp. 1377–1392, 2015. @article{Anderson2015a, title = {A comparison of scanpath comparison methods}, author = {Nicola C Anderson and Fraser Anderson and Alan Kingstone and Walter F Bischof}, doi = {10.3758/s13428-014-0550-3}, year = {2015}, date = {2015-01-01}, journal = {Behavior Research Methods}, volume = {47}, number = {4}, pages = {1377--1392}, abstract = {Interest has flourished in studying both the spatial and temporal aspects of eye movement behavior. This has sparked the development of a large number of new methods to compare scanpaths. In the present work, we present a detailed overview of common scanpath comparison measures. Each of these measures was developed to solve a specific problem, but quantifies different aspects of scanpath behavior and requires different data-processing techniques. To understand these differences, we applied each scanpath comparison method to data from an encoding and recognition experiment and compared their ability to reveal scanpath similarities within and between individuals looking at natural scenes. Results are discussed in terms of the unique aspects of scanpath behavior that the different methods quantify. We conclude by making recommendations for choosing an appropriate scanpath comparison measure.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Interest has flourished in studying both the spatial and temporal aspects of eye movement behavior. This has sparked the development of a large number of new methods to compare scanpaths. In the present work, we present a detailed overview of common scanpath comparison measures. Each of these measures was developed to solve a specific problem, but quantifies different aspects of scanpath behavior and requires different data-processing techniques. To understand these differences, we applied each scanpath comparison method to data from an encoding and recognition experiment and compared their ability to reveal scanpath similarities within and between individuals looking at natural scenes. Results are discussed in terms of the unique aspects of scanpath behavior that the different methods quantify. We conclude by making recommendations for choosing an appropriate scanpath comparison measure. |
Nicola C Anderson; Eduard Ort; Wouter Kruijne; Martijn Meeter; Mieke Donk It depends on when you look at it: Salience influences eye movements in natural scene viewing and search early in time Journal Article Journal of Vision, 15 (5), pp. 1–22, 2015. @article{Anderson2015b, title = {It depends on when you look at it: Salience influences eye movements in natural scene viewing and search early in time}, author = {Nicola C Anderson and Eduard Ort and Wouter Kruijne and Martijn Meeter and Mieke Donk}, doi = {10.1167/15.5.9.doi}, year = {2015}, date = {2015-01-01}, journal = {Journal of Vision}, volume = {15}, number = {5}, pages = {1--22}, abstract = {It is generally accepted that salience affects eye movements in simple artificially created search displays. However, no such consensus exists for eye movements in natural scenes, with several reports arguing that it is mostly high-level cognitive factors that control oculomotor behavior in natural scenes. Here, we manipulate the salience distribution across images by decreasing or increasing the contrast in a gradient across the image. We recorded eye movements in an encoding task (Experiment 1) and a visual search task (Experiment 2) and analyzed the relationship between the latency of fixations and subsequent saccade targeting throughout scene viewing. We find that short-latency first saccades are more likely to land on a region of the image with high salience than long-latency and subsequent saccades in both the encoding and visual search tasks. This implies that salience indeed influences oculomotor behavior in natural scenes, albeit on a different timescale than previously reported. We discuss our findings in relation to current theories of saccade control in natural scenes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } It is generally accepted that salience affects eye movements in simple artificially created search displays. However, no such consensus exists for eye movements in natural scenes, with several reports arguing that it is mostly high-level cognitive factors that control oculomotor behavior in natural scenes. Here, we manipulate the salience distribution across images by decreasing or increasing the contrast in a gradient across the image. We recorded eye movements in an encoding task (Experiment 1) and a visual search task (Experiment 2) and analyzed the relationship between the latency of fixations and subsequent saccade targeting throughout scene viewing. We find that short-latency first saccades are more likely to land on a region of the image with high salience than long-latency and subsequent saccades in both the encoding and visual search tasks. This implies that salience indeed influences oculomotor behavior in natural scenes, albeit on a different timescale than previously reported. We discuss our findings in relation to current theories of saccade control in natural scenes. |