Zhi-Lei Zhang; Christopher R L Cantor; Clifton M Schor Perisaccadic stereo depth with zero retinal disparity Journal Article Current Biology, 20 (13), pp. 1176–1181, 2010. @article{Zhang2010c, title = {Perisaccadic stereo depth with zero retinal disparity}, author = {Zhi-Lei Zhang and Christopher R L Cantor and Clifton M Schor}, doi = {10.1016/j.cub.2010.04.060}, year = {2010}, date = {2010-01-01}, journal = {Current Biology}, volume = {20}, number = {13}, pages = {1176--1181}, publisher = {Elsevier Ltd}, abstract = {When an object is viewed binocularly, unequal perspective projections of the two eyes' half images (binocular disparity) provide a cue for the sensation of stereo depth. For almost 200 years, binocular disparity has remained synonymous with retinal disparity [1], which is computed by subtracting the distance of each half image from its respective fovea [2]. However, binocular disparity could also be coded in headcentric instead of retinal coordinates, by combining eye position and retinal image position in each eye and representing disparity as differences between visual directions of half images relative to the head [3]. Although these two disparity-coding schemes suggest very different neural mechanisms, both offer identical predictions for stereopsis in almost every viewing condition, making it difficult to empirically distinguish between them. We designed a novel stimulus that uses perisaccadic spatial distortion [4] to generate inconsistency between headcentric and retinal disparity. Foveal half images flashed asynchronously just before a horizontal saccade have zero retinal disparity, yet they produce a sensation of depth consistent with a nonzero headcentric disparity. Furthermore, this headcentric disparity can cancel and reverse the perceived depth stimulated with nonzero retinal disparity. This is the first demonstration that a coding scheme other than retinal disparity has a role in human stereopsis.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When an object is viewed binocularly, unequal perspective projections of the two eyes' half images (binocular disparity) provide a cue for the sensation of stereo depth. For almost 200 years, binocular disparity has remained synonymous with retinal disparity [1], which is computed by subtracting the distance of each half image from its respective fovea [2]. However, binocular disparity could also be coded in headcentric instead of retinal coordinates, by combining eye position and retinal image position in each eye and representing disparity as differences between visual directions of half images relative to the head [3]. Although these two disparity-coding schemes suggest very different neural mechanisms, both offer identical predictions for stereopsis in almost every viewing condition, making it difficult to empirically distinguish between them. We designed a novel stimulus that uses perisaccadic spatial distortion [4] to generate inconsistency between headcentric and retinal disparity. Foveal half images flashed asynchronously just before a horizontal saccade have zero retinal disparity, yet they produce a sensation of depth consistent with a nonzero headcentric disparity. Furthermore, this headcentric disparity can cancel and reverse the perceived depth stimulated with nonzero retinal disparity. This is the first demonstration that a coding scheme other than retinal disparity has a role in human stereopsis. |
Yang Zhang; Ming Zhang Spatial working memory load impairs manual but not saccadic inhibition of return Journal Article Vision Research, 51 (1), pp. 147–153, 2011. @article{Zhang2011, title = {Spatial working memory load impairs manual but not saccadic inhibition of return}, author = {Yang Zhang and Ming Zhang}, doi = {10.1080/07380569.2018.1427960}, year = {2011}, date = {2011-01-01}, journal = {Vision Research}, volume = {51}, number = {1}, pages = {147--153}, abstract = {Although spatial working memory has been shown to play a central role in manual IOR (Castel, Pratt, & Craik, 2003), it is so far unclear whether spatial working memory is involved in saccadic IOR. The present study sought to address this question by using a dual task paradigm, in which the participants performed an IOR task while keeping a set of locations in spatial working memory. While manual IOR was eliminated, saccadic IOR was not affected by spatial working memory load. These findings suggest that saccadic IOR does not rely on spatial working memory to process inhibitory tagging.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Although spatial working memory has been shown to play a central role in manual IOR (Castel, Pratt, & Craik, 2003), it is so far unclear whether spatial working memory is involved in saccadic IOR. The present study sought to address this question by using a dual task paradigm, in which the participants performed an IOR task while keeping a set of locations in spatial working memory. While manual IOR was eliminated, saccadic IOR was not affected by spatial working memory load. These findings suggest that saccadic IOR does not rely on spatial working memory to process inhibitory tagging. |
Hang Zhang; Camille Morvan; Louis Alexandre Etezad-Heydari; Laurence T Maloney Very slow search and reach: Failure to maximize expected gain in an eye-hand coordination task Journal Article PLoS Computational Biology, 8 (10), pp. e1002718, 2012. @article{Zhang2012a, title = {Very slow search and reach: Failure to maximize expected gain in an eye-hand coordination task}, author = {Hang Zhang and Camille Morvan and Louis Alexandre Etezad-Heydari and Laurence T Maloney}, doi = {10.1371/journal.pcbi.1002718}, year = {2012}, date = {2012-01-01}, journal = {PLoS Computational Biology}, volume = {8}, number = {10}, pages = {e1002718}, abstract = {We examined an eye-hand coordination task where optimal visual search and hand movement strategies were inter-related. Observers were asked to find and touch a target among five distractors on a touch screen. Their reward for touching the target was reduced by an amount proportional to how long they took to locate and reach to it. Coordinating the eye and the hand appropriately would markedly reduce the search-reach time. Using statistical decision theory we derived the sequence of interrelated eye and hand movements that would maximize expected gain and we predicted how hand movements should change as the eye gathered further information about target location. We recorded human observers' eye movements and hand movements and compared them with the optimal strategy that would have maximized expected gain. We found that most observers failed to adopt the optimal search-reach strategy. We analyze and describe the strategies they did adopt.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We examined an eye-hand coordination task where optimal visual search and hand movement strategies were inter-related. Observers were asked to find and touch a target among five distractors on a touch screen. Their reward for touching the target was reduced by an amount proportional to how long they took to locate and reach to it. Coordinating the eye and the hand appropriately would markedly reduce the search-reach time. Using statistical decision theory we derived the sequence of interrelated eye and hand movements that would maximize expected gain and we predicted how hand movements should change as the eye gathered further information about target location. We recorded human observers' eye movements and hand movements and compared them with the optimal strategy that would have maximized expected gain. We found that most observers failed to adopt the optimal search-reach strategy. We analyze and describe the strategies they did adopt. |
Jun-Yun Zhang; Gong-Liang Zhang; Lei Liu; Cong Yu Whole report uncovers correctly identified but incorrectly placed target information under visual crowding Journal Article Journal of Vision, 12 (7), pp. 1–11, 2012. @article{Zhang2012bb, title = {Whole report uncovers correctly identified but incorrectly placed target information under visual crowding}, author = {Jun-Yun Zhang and Gong-Liang Zhang and Lei Liu and Cong Yu}, doi = {10.1167/12.7.5.Introduction}, year = {2012}, date = {2012-01-01}, journal = {Journal of Vision}, volume = {12}, number = {7}, pages = {1--11}, abstract = {Multiletter identification studies often find correctly identified letters being reported in wrong positions. However, how position uncertainty impacts crowding in peripheral vision is not fully understood. The observation of a flanker being reported as the central target cannot be taken as unequivocal evidence for position misperception because the observers could be biased to report a more identifiable flanker when failing to identify the central target. In addition, it has never been reported whether a correctly identified central target can be perceived at a flanker position under crowding. Empirical investigation into this possibility holds the key to demonstrating letter-level position uncertainty in crowding, because the position errors of the least identifiable central target cannot be attributed to response bias. We asked normally-sighted observers to report either the central target of a trigram (partial report) or all three characters (whole report). The results showed that, for radially arranged trigrams, the rate of reporting the central target regardless of the reported position in the whole report was significantly higher than the partial report rate, and the extra target reports mostly ended up in flanker positions. Error analysis indicated that target-flanker position swapping and misalignment (lateral shift of the target and one flanker) underlay this target misplacement. Our results thus establish target misplacement as a source of crowding errors and ascertain the role of letter-level position uncertainty in crowding.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Multiletter identification studies often find correctly identified letters being reported in wrong positions. However, how position uncertainty impacts crowding in peripheral vision is not fully understood. The observation of a flanker being reported as the central target cannot be taken as unequivocal evidence for position misperception because the observers could be biased to report a more identifiable flanker when failing to identify the central target. In addition, it has never been reported whether a correctly identified central target can be perceived at a flanker position under crowding. Empirical investigation into this possibility holds the key to demonstrating letter-level position uncertainty in crowding, because the position errors of the least identifiable central target cannot be attributed to response bias. We asked normally-sighted observers to report either the central target of a trigram (partial report) or all three characters (whole report). The results showed that, for radially arranged trigrams, the rate of reporting the central target regardless of the reported position in the whole report was significantly higher than the partial report rate, and the extra target reports mostly ended up in flanker positions. Error analysis indicated that target-flanker position swapping and misalignment (lateral shift of the target and one flanker) underlay this target misplacement. Our results thus establish target misplacement as a source of crowding errors and ascertain the role of letter-level position uncertainty in crowding. |
En Zhang; Gong Liang Zhang; Wu Li Spatiotopic perceptual learning mediated by retinotopic processing and attentional remapping Journal Article European Journal of Neuroscience, 38 (12), pp. 3758–3767, 2013. @article{Zhang2013, title = {Spatiotopic perceptual learning mediated by retinotopic processing and attentional remapping}, author = {En Zhang and Gong Liang Zhang and Wu Li}, doi = {10.1111/ejn.12379}, year = {2013}, date = {2013-01-01}, journal = {European Journal of Neuroscience}, volume = {38}, number = {12}, pages = {3758--3767}, abstract = {Visual processing takes place in both retinotopic and spatiotopic frames of reference. Whereas visual perceptual learning is usually specific to the trained retinotopic location, our recent study has shown spatiotopic specificity of learning in motion direction discrimination. To explore the mechanisms underlying spatiotopic processing and learning, and to examine whether similar mechanisms also exist in visual form processing, we trained human subjects to discriminate an orientation difference between two successively displayed stimuli, with a gaze shift in between to manipulate their positional relation in the spatiotopic frame of reference without changing their retinal locations. Training resulted in better orientation discriminability for the trained than for the untrained spatial relation of the two stimuli. This learning-induced spatiotopic preference was seen only at the trained retinal location and orientation, suggesting experience-dependent spatiotopic form processing directly based on a retinotopic map. Moreover, a similar but weaker learning-induced spatiotopic preference was still present even if the first stimulus was rendered irrelevant to the orientation discrimination task by having the subjects judge the orientation of the second stimulus relative to its mean orientation in a block of trials. However, if the first stimulus was absent, and thus no attention was captured before the gaze shift, the learning produced no significant spatiotopic preference, suggesting an important role of attentional remapping in spatiotopic processing and learning. Taken together, our results suggest that spatiotopic visual representation can be mediated by interactions between retinotopic processing and attentional remapping, and can be modified by perceptual training.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual processing takes place in both retinotopic and spatiotopic frames of reference. Whereas visual perceptual learning is usually specific to the trained retinotopic location, our recent study has shown spatiotopic specificity of learning in motion direction discrimination. To explore the mechanisms underlying spatiotopic processing and learning, and to examine whether similar mechanisms also exist in visual form processing, we trained human subjects to discriminate an orientation difference between two successively displayed stimuli, with a gaze shift in between to manipulate their positional relation in the spatiotopic frame of reference without changing their retinal locations. Training resulted in better orientation discriminability for the trained than for the untrained spatial relation of the two stimuli. This learning-induced spatiotopic preference was seen only at the trained retinal location and orientation, suggesting experience-dependent spatiotopic form processing directly based on a retinotopic map. Moreover, a similar but weaker learning-induced spatiotopic preference was still present even if the first stimulus was rendered irrelevant to the orientation discrimination task by having the subjects judge the orientation of the second stimulus relative to its mean orientation in a block of trials. However, if the first stimulus was absent, and thus no attention was captured before the gaze shift, the learning produced no significant spatiotopic preference, suggesting an important role of attentional remapping in spatiotopic processing and learning. Taken together, our results suggest that spatiotopic visual representation can be mediated by interactions between retinotopic processing and attentional remapping, and can be modified by perceptual training. |
Ruyuan Zhang; Oh-Sang Kwon; Duje Tadin Illusory movement of stationary stimuli in the visual periphery: Evidence for a strong centrifugal prior in motion processing Journal Article Journal of Neuroscience, 33 (10), pp. 4415–4423, 2013. @article{Zhang2013c, title = {Illusory movement of stationary stimuli in the visual periphery: Evidence for a strong centrifugal prior in motion processing}, author = {Ruyuan Zhang and Oh-Sang Kwon and Duje Tadin}, doi = {10.1523/JNEUROSCI.4744-12.2013}, year = {2013}, date = {2013-01-01}, journal = {Journal of Neuroscience}, volume = {33}, number = {10}, pages = {4415--4423}, abstract = {Visual input is remarkably diverse. Certain sensory inputs are more probable than others, mirroring statistical regularities of the visual environment. The visual system exploits many of these regularities, resulting, on average, in better inferences about visual stimuli. However, by incorporating prior knowledge into perceptual decisions, visual processing can also result in perceptions that do not match sensory inputs. Such perceptual biases can often reveal unique insights into underlying mechanisms and computations. For example, a prior assumption that objects move slowly can explain a wide range of motion phenomena. The prior on slow speed is usually rationalized by its match with visual input, which typically includes stationary or slow moving objects. However, this only holds for foveal and parafoveal stimulation. The visual periphery tends to be exposed to faster motions, which are biased toward centrifugal directions. Thus, if prior assumptions derive from experience, peripheral motion processing should be biased toward centrifugal speeds. Here, in experiments with human participants, we support this hypothesis and report a novel visual illusion where stationary objects in the visual periphery are perceived as moving centrifugally, while objects moving as fast as 7°/s toward fovea are perceived as stationary. These behavioral results were quantitatively explained by a Bayesian observer that has a strong centrifugal prior. This prior is consistent with both the prevalence of centrifugal motions in the visual periphery and a centrifugal bias of direction tuning in cortical area MT, supporting the notion that visual processing mirrors its input statistics.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual input is remarkably diverse. Certain sensory inputs are more probable than others, mirroring statistical regularities of the visual environment. The visual system exploits many of these regularities, resulting, on average, in better inferences about visual stimuli. However, by incorporating prior knowledge into perceptual decisions, visual processing can also result in perceptions that do not match sensory inputs. Such perceptual biases can often reveal unique insights into underlying mechanisms and computations. For example, a prior assumption that objects move slowly can explain a wide range of motion phenomena. The prior on slow speed is usually rationalized by its match with visual input, which typically includes stationary or slow moving objects. However, this only holds for foveal and parafoveal stimulation. The visual periphery tends to be exposed to faster motions, which are biased toward centrifugal directions. Thus, if prior assumptions derive from experience, peripheral motion processing should be biased toward centrifugal speeds. Here, in experiments with human participants, we support this hypothesis and report a novel visual illusion where stationary objects in the visual periphery are perceived as moving centrifugally, while objects moving as fast as 7°/s toward fovea are perceived as stationary. These behavioral results were quantitatively explained by a Bayesian observer that has a strong centrifugal prior. This prior is consistent with both the prevalence of centrifugal motions in the visual periphery and a centrifugal bias of direction tuning in cortical area MT, supporting the notion that visual processing mirrors its input statistics. |
Luming Zhang; Yue Gao; Rongrong Ji; Yingjie Xia; Qionghai Dai; Xuelong Li Actively learning human gaze shifting paths for semantics-aware photo cropping Journal Article IEEE Transactions on Image Processing, 23 (5), pp. 2235–2245, 2014. @article{Zhang2014, title = {Actively learning human gaze shifting paths for semantics-aware photo cropping}, author = {Luming Zhang and Yue Gao and Rongrong Ji and Yingjie Xia and Qionghai Dai and Xuelong Li}, doi = {10.1109/TIP.2014.2311658}, year = {2014}, date = {2014-01-01}, journal = {IEEE Transactions on Image Processing}, volume = {23}, number = {5}, pages = {2235--2245}, abstract = {Photo cropping is a widely used tool in printing industry, photography, and cinematography. Conventional cropping models suffer from the following three challenges. First, the deemphasized role of semantic contents that are many times more important than low-level features in photo aesthetics. Second, the absence of a sequential ordering in the existing models. In contrast, humans look at semantically important regions sequentially when viewing a photo. Third, the difficulty of leveraging inputs from multiple users. Experience from multiple users is particularly critical in cropping as photo assessment is quite a subjective task. To address these challenges, this paper proposes semantics-aware photo cropping, which crops a photo by simulating the process of humans sequentially perceiving semantically important regions of a photo. We first project the local features (graphlets in this paper) onto the semantic space, which is constructed based on the category information of the training photos. An efficient learning algorithm is then derived to sequentially select semantically representative graphlets of a photo, and the selecting process can be interpreted by a path, which simulates humans actively perceiving semantics in a photo. Furthermore, we learn a prior distribution of such active graphlet paths from training photos that are marked as aesthetically pleasing by multiple users. The learned priors enforce the corresponding active graphlet path of a test photo to be maximally similar to those from the training photos. Experimental results show that: 1) the active graphlet path accurately predicts human gaze shifting, and thus is more indicative for photo aesthetics than conventional saliency maps and 2) the cropped photos produced by our approach outperform its competitors in both qualitative and quantitative comparisons.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Photo cropping is a widely used tool in printing industry, photography, and cinematography. Conventional cropping models suffer from the following three challenges. First, the deemphasized role of semantic contents that are many times more important than low-level features in photo aesthetics. Second, the absence of a sequential ordering in the existing models. In contrast, humans look at semantically important regions sequentially when viewing a photo. Third, the difficulty of leveraging inputs from multiple users. Experience from multiple users is particularly critical in cropping as photo assessment is quite a subjective task. To address these challenges, this paper proposes semantics-aware photo cropping, which crops a photo by simulating the process of humans sequentially perceiving semantically important regions of a photo. We first project the local features (graphlets in this paper) onto the semantic space, which is constructed based on the category information of the training photos. An efficient learning algorithm is then derived to sequentially select semantically representative graphlets of a photo, and the selecting process can be interpreted by a path, which simulates humans actively perceiving semantics in a photo. Furthermore, we learn a prior distribution of such active graphlet paths from training photos that are marked as aesthetically pleasing by multiple users. The learned priors enforce the corresponding active graphlet path of a test photo to be maximally similar to those from the training photos. Experimental results show that: 1) the active graphlet path accurately predicts human gaze shifting, and thus is more indicative for photo aesthetics than conventional saliency maps and 2) the cropped photos produced by our approach outperform its competitors in both qualitative and quantitative comparisons. |
Jiedong Zhang; Jia Liu; Yaoda Xu Neural decoding reveals impaired face configural processing in the right fusiform face area of individuals with developmental prosopagnosia Journal Article Journal of Neuroscience, 35 (4), pp. 1539–1548, 2015. @article{Zhang2015, title = {Neural decoding reveals impaired face configural processing in the right fusiform face area of individuals with developmental prosopagnosia}, author = {Jiedong Zhang and Jia Liu and Yaoda Xu}, doi = {10.1523/JNEUROSCI.2646-14.2015}, year = {2015}, date = {2015-01-01}, journal = {Journal of Neuroscience}, volume = {35}, number = {4}, pages = {1539--1548}, abstract = {Most of human daily social interactions rely on the ability to successfully recognize faces. Yet ∼2% of the human population suffers from face blindness without any acquired brain damage [this is also known as developmental prosopagnosia (DP) or congenital prosopagnosia]). Despite the presence of severe behavioral face recognition deficits, surprisingly, a majority of DP individuals exhibit normal face selectivity in the right fusiform face area (FFA), a key brain region involved in face configural processing. This finding, together with evidence showing impairments downstream from the right FFA in DP individuals, has led some to argue that perhaps the right FFA is largely intact in DP individuals. Using fMRI multivoxel pattern analysis, here we report the discovery of a neural impairment in the right FFA of DP individuals that may play a critical role in mediating their face-processing deficits. In seven individuals with DP, we discovered that, despite the right FFA's preference for faces and it showing decoding for the different face parts, it exhibited impaired face configural decoding and did not contain distinct neural response patterns for the intact and the scrambled face configurations. This abnormality was not present throughout the ventral visual cortex, as normal neural decoding was found in an adjacent object-processing region. To our knowledge, this is the first direct neural evidence showing impaired face configural processing in the right FFA in individuals with DP. The discovery of this neural impairment provides a new clue to our understanding of the neural basis of DP.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most of human daily social interactions rely on the ability to successfully recognize faces. Yet ∼2% of the human population suffers from face blindness without any acquired brain damage [this is also known as developmental prosopagnosia (DP) or congenital prosopagnosia]). Despite the presence of severe behavioral face recognition deficits, surprisingly, a majority of DP individuals exhibit normal face selectivity in the right fusiform face area (FFA), a key brain region involved in face configural processing. This finding, together with evidence showing impairments downstream from the right FFA in DP individuals, has led some to argue that perhaps the right FFA is largely intact in DP individuals. Using fMRI multivoxel pattern analysis, here we report the discovery of a neural impairment in the right FFA of DP individuals that may play a critical role in mediating their face-processing deficits. In seven individuals with DP, we discovered that, despite the right FFA's preference for faces and it showing decoding for the different face parts, it exhibited impaired face configural decoding and did not contain distinct neural response patterns for the intact and the scrambled face configurations. This abnormality was not present throughout the ventral visual cortex, as normal neural decoding was found in an adjacent object-processing region. To our knowledge, this is the first direct neural evidence showing impaired face configural processing in the right FFA in individuals with DP. The discovery of this neural impairment provides a new clue to our understanding of the neural basis of DP. |
Yan Zhang; Xiaochuan Pan; Rubin Wang; Masamichi Sakagami Functional connectivity between prefrontal cortex and striatum estimated by phase locking value Journal Article Cognitive Neurodynamics, 10 (3), pp. 245–254, 2016. @article{Zhang2016, title = {Functional connectivity between prefrontal cortex and striatum estimated by phase locking value}, author = {Yan Zhang and Xiaochuan Pan and Rubin Wang and Masamichi Sakagami}, doi = {10.1007/s11571-016-9376-2}, year = {2016}, date = {2016-01-01}, journal = {Cognitive Neurodynamics}, volume = {10}, number = {3}, pages = {245--254}, publisher = {Springer Netherlands}, abstract = {The interplay between the prefrontal cortex (PFC) and striatum has an important role in cognitive processes. To investigate interactive functions between the two areas in reward processing, we recorded local field potentials (LFPs) simultaneously from the two areas of two monkeys performing a reward prediction task (large reward vs small reward). The power of the LFPs was calculated in three frequency bands: the beta band (15–29 Hz), the low gamma band (30–49 Hz), and the high gamma band (50–100 Hz). We found that both the PFC and striatum encoded the reward information in the beta band. The reward information was also found in the high gamma band in the PFC, not in the striatum. We further calculated the phase-locking value (PLV) between two LFP signals to measure the phase synchrony between the PFC and striatum. It was found that significant differences occurred between PLVs in different task periods and in different frequency bands. The PLVs in small reward condition were significant higher than that in large reward condition in the beta band. In contrast, the PLVs in the high gamma band were stronger in large reward trials than in small trials. These results suggested that the functional connectivity between the PFC and striatum depended on the task periods and reward conditions. The beta synchrony between the PFC and striatum may regulate behavioral outputs of the monkeys in the small reward condition.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The interplay between the prefrontal cortex (PFC) and striatum has an important role in cognitive processes. To investigate interactive functions between the two areas in reward processing, we recorded local field potentials (LFPs) simultaneously from the two areas of two monkeys performing a reward prediction task (large reward vs small reward). The power of the LFPs was calculated in three frequency bands: the beta band (15–29 Hz), the low gamma band (30–49 Hz), and the high gamma band (50–100 Hz). We found that both the PFC and striatum encoded the reward information in the beta band. The reward information was also found in the high gamma band in the PFC, not in the striatum. We further calculated the phase-locking value (PLV) between two LFP signals to measure the phase synchrony between the PFC and striatum. It was found that significant differences occurred between PLVs in different task periods and in different frequency bands. The PLVs in small reward condition were significant higher than that in large reward condition in the beta band. In contrast, the PLVs in the high gamma band were stronger in large reward trials than in small trials. These results suggested that the functional connectivity between the PFC and striatum depended on the task periods and reward conditions. The beta synchrony between the PFC and striatum may regulate behavioral outputs of the monkeys in the small reward condition. |
Xuemeng Zhang; Shuaiyu Chen; Hong Chen; Yan Gu; Wenjian Xu General and food-specific inhibitory control as moderators of the effects of the impulsive systems on food choices Journal Article Frontiers in Psychology, 8 , pp. 1–8, 2017. @article{Zhang2017, title = {General and food-specific inhibitory control as moderators of the effects of the impulsive systems on food choices}, author = {Xuemeng Zhang and Shuaiyu Chen and Hong Chen and Yan Gu and Wenjian Xu}, doi = {10.3389/fpsyg.2017.00802}, year = {2017}, date = {2017-01-01}, journal = {Frontiers in Psychology}, volume = {8}, pages = {1--8}, abstract = {The present study aimed to extend the application of the reflective-impulsive model to restrained eating and explore the effect of automatic attention (impulsive system) on food choices. Furthermore, we examined the moderating effects of general inhibitory control (G-IC) and food-specific inhibitory control (F-IC) on successful and unsuccessful restrained eaters (US-REs). Automatic attention was measured using ``the EyeLink 1000,'' which tracked eye movements during the process of making food choices, and G-IC and F-IC were measured using the Stop-Signal Task. The results showed that food choices were related to automatic attention and that G-IC and F-IC moderated the predictive relationship between automatic attention and food choices. Furthermore, among successful restrained eaters (S-REs), automatic attention to high caloric foods did not predict food choices, regardless of whether G-IC or F-IC was high or low. Whereas food choice was positively correlated with automatic attention among US-REs with poor F-IC, this pattern was not observed in those with poor G-IC. In conclusion, the S-REs had more effective self-management skills and their food choices were affected less by automatic attention and inhibitory control. Unsuccessful restrained eating was associated with poor F-IC (not G-IC) and greater automatic attention to high caloric foods. Thus, clinical interventions should focus on enhancing F-IC, not G-IC, and on reducing automatic attention to high caloric foods.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The present study aimed to extend the application of the reflective-impulsive model to restrained eating and explore the effect of automatic attention (impulsive system) on food choices. Furthermore, we examined the moderating effects of general inhibitory control (G-IC) and food-specific inhibitory control (F-IC) on successful and unsuccessful restrained eaters (US-REs). Automatic attention was measured using ``the EyeLink 1000,'' which tracked eye movements during the process of making food choices, and G-IC and F-IC were measured using the Stop-Signal Task. The results showed that food choices were related to automatic attention and that G-IC and F-IC moderated the predictive relationship between automatic attention and food choices. Furthermore, among successful restrained eaters (S-REs), automatic attention to high caloric foods did not predict food choices, regardless of whether G-IC or F-IC was high or low. Whereas food choice was positively correlated with automatic attention among US-REs with poor F-IC, this pattern was not observed in those with poor G-IC. In conclusion, the S-REs had more effective self-management skills and their food choices were affected less by automatic attention and inhibitory control. Unsuccessful restrained eating was associated with poor F-IC (not G-IC) and greater automatic attention to high caloric foods. Thus, clinical interventions should focus on enhancing F-IC, not G-IC, and on reducing automatic attention to high caloric foods. |
Yan Zhang; Xiaoying Wang; Juan Wang; Lili Zhang; Yu Xiang Patterns of eye movements when observers judge female facial attractiveness Journal Article Frontiers in Psychology, 8 (NOV), pp. 1909, 2017. @article{Zhang2017a, title = {Patterns of eye movements when observers judge female facial attractiveness}, author = {Yan Zhang and Xiaoying Wang and Juan Wang and Lili Zhang and Yu Xiang}, doi = {10.3389/fpsyg.2017.01909}, year = {2017}, date = {2017-11-01}, journal = {Frontiers in Psychology}, volume = {8}, number = {NOV}, pages = {1909}, publisher = {Frontiers}, abstract = {The purpose of the present study is to explore the fixation patterns for the explicit judgments of attractiveness judgments and infer which features are used for attractiveness. Facial attractiveness is of high importance for human interaction and social behavior. Behavioral studies on the perceptual cues for female facial attractiveness suggested three potentially important features: averageness, symmetry, and sexual dimorphy. However, none of these studies explained which regions of stimulus images influence observers' judgments. Therefore, the present research recorded the eye movements of 24 male observers and 19 female observers as they rated a set of 30 photographs of female facial attractiveness. Results demonstrated the following: (1) Fixation is longer and more frequent on the noses of female faces than on their eyes and mouths (no difference exists between the eyes and the mouth); (2) The average pupil diameter at the nose region is bigger than that at the eyes and mouth (no difference exists between the eyes and the mouth); (3) the number of fixations of male participants was significantly more than female participants. (4) Observers first fixate on the eyes and mouth (no difference exists between the eyes and the mouth) before fixating on the nose area. In general, participants attend predominantly to the nose to form attractiveness judgments. The results of this study add a new dimension to the existing literature on judgment of facial attractiveness. The major contribution of the present study is the finding that the area of the nose is vital in the judgment of facial attractiveness. This finding establish a contribution of partial processing on female facial attractiveness judgments during eye-tracking.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The purpose of the present study is to explore the fixation patterns for the explicit judgments of attractiveness judgments and infer which features are used for attractiveness. Facial attractiveness is of high importance for human interaction and social behavior. Behavioral studies on the perceptual cues for female facial attractiveness suggested three potentially important features: averageness, symmetry, and sexual dimorphy. However, none of these studies explained which regions of stimulus images influence observers' judgments. Therefore, the present research recorded the eye movements of 24 male observers and 19 female observers as they rated a set of 30 photographs of female facial attractiveness. Results demonstrated the following: (1) Fixation is longer and more frequent on the noses of female faces than on their eyes and mouths (no difference exists between the eyes and the mouth); (2) The average pupil diameter at the nose region is bigger than that at the eyes and mouth (no difference exists between the eyes and the mouth); (3) the number of fixations of male participants was significantly more than female participants. (4) Observers first fixate on the eyes and mouth (no difference exists between the eyes and the mouth) before fixating on the nose area. In general, participants attend predominantly to the nose to form attractiveness judgments. The results of this study add a new dimension to the existing literature on judgment of facial attractiveness. The major contribution of the present study is the finding that the area of the nose is vital in the judgment of facial attractiveness. This finding establish a contribution of partial processing on female facial attractiveness judgments during eye-tracking. |
Yan Zhang; Yu Xiang; Ying Guo; Lili Zhang Beauty-related perceptual bias: Who captures the mind of the beholder? Journal Article Brain and Behavior, 8 (5), pp. 1–7, 2018. @article{Zhang2018a, title = {Beauty-related perceptual bias: Who captures the mind of the beholder?}, author = {Yan Zhang and Yu Xiang and Ying Guo and Lili Zhang}, doi = {10.1002/brb3.945}, year = {2018}, date = {2018-01-01}, journal = {Brain and Behavior}, volume = {8}, number = {5}, pages = {1--7}, abstract = {Introduction: To explore the beauty- related perceptual bias and answers the question: Who can capture the mind of the beholder? Many studies have explored the specificity of human faces through ERP or other ways, and the materials they used are general human faces and other objects. Therefore, we want to further explore the difference between attractive faces and beautiful objects such as flowers. Methods: We recorded the eye movement of 22 male observers and 23 female observers using a standard two-alternative forced choice. Results: (1) The attractive faces were looked at longer and more often in comparison with the beautiful flowers; (2) fixation counts of female participants are more than male participants; and (3) the participants watched the beautiful flowers first, followed by the attractive faces, but there was no significant difference on the first fixation duration between the beautiful flowers and the attractive faces. Conclusions: The data in this study may suggest that people prefer attractive faces to beautiful flowers.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Introduction: To explore the beauty- related perceptual bias and answers the question: Who can capture the mind of the beholder? Many studies have explored the specificity of human faces through ERP or other ways, and the materials they used are general human faces and other objects. Therefore, we want to further explore the difference between attractive faces and beautiful objects such as flowers. Methods: We recorded the eye movement of 22 male observers and 23 female observers using a standard two-alternative forced choice. Results: (1) The attractive faces were looked at longer and more often in comparison with the beautiful flowers; (2) fixation counts of female participants are more than male participants; and (3) the participants watched the beautiful flowers first, followed by the attractive faces, but there was no significant difference on the first fixation duration between the beautiful flowers and the attractive faces. Conclusions: The data in this study may suggest that people prefer attractive faces to beautiful flowers. |
Jun-Yun Zhang; Cong Yu Vernier learning with short- and long-staircase training and its transfer to a new location with double training Journal Article Journal of Vision, 18 (13), pp. 1–8, 2018. @article{Zhang2018b, title = {Vernier learning with short- and long-staircase training and its transfer to a new location with double training}, author = {Jun-Yun Zhang and Cong Yu}, doi = {10.1167/18.13.8}, year = {2018}, date = {2018-01-01}, journal = {Journal of Vision}, volume = {18}, number = {13}, pages = {1--8}, abstract = {We previously demonstrated that perceptual learning of Vernier discrimination, when paired with orientation learning at the same retinal location, can transfer completely to untrained locations (Wang, Zhang, Klein, Levi, & Yu, 2014; Zhang, Wang, Klein, Levi, & Yu, 2011). However, Hung and Seitz (2014) reported that the transfer is possible only when Vernier is trained with short staircases, but not with very long staircases. Here we ran two experiments to examine Hung and Seitz's conclusions. The first experiment confirmed the transfer effects with short-staircase Vernier training in both our study and Hung and Seitz's. The second experiment revealed that long-staircase training only produced very fast learning at the beginning of the pretraining session, but with no further learning afterward. Moreover, the learning and transfer effects differed insignificantly with a small effect size, making it difficult to support Hung and Seitz's claim that learning with long-staircase training cannot transfer to an untrained retinal location.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We previously demonstrated that perceptual learning of Vernier discrimination, when paired with orientation learning at the same retinal location, can transfer completely to untrained locations (Wang, Zhang, Klein, Levi, & Yu, 2014; Zhang, Wang, Klein, Levi, & Yu, 2011). However, Hung and Seitz (2014) reported that the transfer is possible only when Vernier is trained with short staircases, but not with very long staircases. Here we ran two experiments to examine Hung and Seitz's conclusions. The first experiment confirmed the transfer effects with short-staircase Vernier training in both our study and Hung and Seitz's. The second experiment revealed that long-staircase training only produced very fast learning at the beginning of the pretraining session, but with no further learning afterward. Moreover, the learning and transfer effects differed insignificantly with a small effect size, making it difficult to support Hung and Seitz's claim that learning with long-staircase training cannot transfer to an untrained retinal location. |
Mengmi Zhang; Jiashi Feng; Keng Teck Ma; Joo Hwee Lim; Qi Zhao; Gabriel Kreiman Finding any Waldo with zero-shot invariant and efficient visual search Journal Article Nature Communications, 9 , pp. 3730, 2018. @article{Zhang2018c, title = {Finding any Waldo with zero-shot invariant and efficient visual search}, author = {Mengmi Zhang and Jiashi Feng and Keng Teck Ma and Joo Hwee Lim and Qi Zhao and Gabriel Kreiman}, doi = {10.1038/s41467-018-06217-x}, year = {2018}, date = {2018-01-01}, journal = {Nature Communications}, volume = {9}, pages = {3730}, publisher = {Springer US}, abstract = {Searching for a target object in a cluttered scene constitutes a fundamental challenge in daily vision. Visual search must be selective enough to discriminate the target from distractors, invariant to changes in the appearance of the target, efficient to avoid exhaustive exploration of the image, and must generalize to locate novel target objects with zero-shot training. Previous work on visual search has focused on searching for perfect matches of a target after extensive category-specific training. Here, we show for the first time that humans can effi- ciently and invariantly search for natural objects in complex scenes. To gain insight into the mechanisms that guide visual search, we propose a biologically inspired computational model that can locate targets without exhaustive sampling and which can generalize to novel objects. The model provides an approximation to the mechanisms integrating bottom-up and top-down signals during search in natural scenes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Searching for a target object in a cluttered scene constitutes a fundamental challenge in daily vision. Visual search must be selective enough to discriminate the target from distractors, invariant to changes in the appearance of the target, efficient to avoid exhaustive exploration of the image, and must generalize to locate novel target objects with zero-shot training. Previous work on visual search has focused on searching for perfect matches of a target after extensive category-specific training. Here, we show for the first time that humans can effi- ciently and invariantly search for natural objects in complex scenes. To gain insight into the mechanisms that guide visual search, we propose a biologically inspired computational model that can locate targets without exhaustive sampling and which can generalize to novel objects. The model provides an approximation to the mechanisms integrating bottom-up and top-down signals during search in natural scenes. |
Bao Zhang; Shuhui Liu; Mattia Doro; Giovanni Galfano Attentional guidance from multiple working memory representations: Evidence from eye movements Journal Article Scientific Reports, 8 , pp. 13876, 2018. @article{Zhang2018d, title = {Attentional guidance from multiple working memory representations: Evidence from eye movements}, author = {Bao Zhang and Shuhui Liu and Mattia Doro and Giovanni Galfano}, doi = {10.1038/s41598-018-32144-4}, year = {2018}, date = {2018-01-01}, journal = {Scientific Reports}, volume = {8}, pages = {13876}, publisher = {Springer US}, abstract = {Recent studies have shown that the representation of an item in visual working memory (VWM) can bias the deployment of attention to stimuli in the visual scene possessing the same features. When multiple item representations are simultaneously held in VWM, whether these representations, especially those held in a non-prioritized or accessory status, are able to bias attention, is still controversial. In the present study we adopted an eye tracking technique to shed light on this issue. In particular, we implemented a manipulation aimed at prioritizing one of the VWM representation to an active status, and tested whether attention could be guided by both the prioritized and the accessory representations when they reappeared as distractors in a visual search task. Notably, in Experiment 1, an analysis of first fixation proportion (FFP) revealed that both the prioritized and the accessory representations were able to capture attention suggesting a significant attentional guidance effect. However, such effect was not present in manual response times (RT). Most critically, in Experiment 2, we used a more robust experimental design controlling for different factors that might have played a role in shaping these findings. The results showed evidence for attentional guidance from the accessory representation in both manual RTs and FFPs. Interestingly, FFPs showed a stronger attentional bias for the prioritized representation than for the accessory representation across experiments. The overall findings suggest that multiple VWM representations, even the accessory representation, can simultaneously interact with visual attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent studies have shown that the representation of an item in visual working memory (VWM) can bias the deployment of attention to stimuli in the visual scene possessing the same features. When multiple item representations are simultaneously held in VWM, whether these representations, especially those held in a non-prioritized or accessory status, are able to bias attention, is still controversial. In the present study we adopted an eye tracking technique to shed light on this issue. In particular, we implemented a manipulation aimed at prioritizing one of the VWM representation to an active status, and tested whether attention could be guided by both the prioritized and the accessory representations when they reappeared as distractors in a visual search task. Notably, in Experiment 1, an analysis of first fixation proportion (FFP) revealed that both the prioritized and the accessory representations were able to capture attention suggesting a significant attentional guidance effect. However, such effect was not present in manual response times (RT). Most critically, in Experiment 2, we used a more robust experimental design controlling for different factors that might have played a role in shaping these findings. The results showed evidence for attentional guidance from the accessory representation in both manual RTs and FFPs. Interestingly, FFPs showed a stronger attentional bias for the prioritized representation than for the accessory representation across experiments. The overall findings suggest that multiple VWM representations, even the accessory representation, can simultaneously interact with visual attention. |
Xilin Zhang; Nicole Mlynaryk; Sara Ahmed; Shruti Japee; Leslie G Ungerleider The role of inferior frontal junction in controlling the spatially global effect of feature-based attention in human visual areas Journal Article PLoS Biology, 16 (6), pp. e2005399, 2018. @article{Zhang2018f, title = {The role of inferior frontal junction in controlling the spatially global effect of feature-based attention in human visual areas}, author = {Xilin Zhang and Nicole Mlynaryk and Sara Ahmed and Shruti Japee and Leslie G Ungerleider}, doi = {10.1371/journal.pbio.2005399}, year = {2018}, date = {2018-01-01}, journal = {PLoS Biology}, volume = {16}, number = {6}, pages = {e2005399}, abstract = {Feature-based attention has a spatially global effect, i.e., responses to stimuli that share features with an attended stimulus are enhanced not only at the attended location but throughout the visual field. However, how feature-based attention modulates cortical neural responses at unattended locations remains unclear. Here we used functional magnetic resonance imaging (fMRI) to examine this issue as human participants performed motion- (Experiment 1) and color- (Experiment 2) based attention tasks. Results indicated that, in both experiments, the respective visual processing areas (middle temporal area [MT+] for motion and V4 for color) as well as early visual, parietal, and prefrontal areas all showed the classic feature-based attention effect, with neural responses to the unattended stimulus significantly elevated when it shared the same feature with the attended stimulus. Effective connectivity analysis using dynamic causal modeling (DCM) showed that this spatially global effect in the respective visual processing areas (MT+ for motion and V4 for color), intraparietal sulcus (IPS), frontal eye field (FEF), medial frontal gyrus (mFG), and primary visual cortex (V1) was derived by feedback from the inferior frontal junction (IFJ). Complementary effective connectivity analysis using Granger causality modeling (GCM) confirmed that, in both experiments, the node with the highest outflow and netflow degree was IFJ, which was thus considered to be the source of the network. These results indicate a source for the spatially global effect of feature-based attention in the human prefrontal cortex.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Feature-based attention has a spatially global effect, i.e., responses to stimuli that share features with an attended stimulus are enhanced not only at the attended location but throughout the visual field. However, how feature-based attention modulates cortical neural responses at unattended locations remains unclear. Here we used functional magnetic resonance imaging (fMRI) to examine this issue as human participants performed motion- (Experiment 1) and color- (Experiment 2) based attention tasks. Results indicated that, in both experiments, the respective visual processing areas (middle temporal area [MT+] for motion and V4 for color) as well as early visual, parietal, and prefrontal areas all showed the classic feature-based attention effect, with neural responses to the unattended stimulus significantly elevated when it shared the same feature with the attended stimulus. Effective connectivity analysis using dynamic causal modeling (DCM) showed that this spatially global effect in the respective visual processing areas (MT+ for motion and V4 for color), intraparietal sulcus (IPS), frontal eye field (FEF), medial frontal gyrus (mFG), and primary visual cortex (V1) was derived by feedback from the inferior frontal junction (IFJ). Complementary effective connectivity analysis using Granger causality modeling (GCM) confirmed that, in both experiments, the node with the highest outflow and netflow degree was IFJ, which was thus considered to be the source of the network. These results indicate a source for the spatially global effect of feature-based attention in the human prefrontal cortex. |
Felicia Zhang; Lauren L Emberson Opposing timing constraints severely limit the use of pupillometry to investigate visual statistical learning Journal Article Frontiers in Psychology, 10 (JULY), pp. 1–15, 2019. @article{Zhang2019c, title = {Opposing timing constraints severely limit the use of pupillometry to investigate visual statistical learning}, author = {Felicia Zhang and Lauren L Emberson}, doi = {10.3389/fpsyg.2019.01792}, year = {2019}, date = {2019-01-01}, journal = {Frontiers in Psychology}, volume = {10}, number = {JULY}, pages = {1--15}, abstract = {Majority of visual statistical learning (VSL) research uses only offline measures, collected after the familiarization phase (i.e. learning) has occurred. Offline measures have revealed a lot about the extent of statistical learning (SL) but less is known about the learning mechanisms that support VSL. Studies have shown that prediction can be a potential learning mechanism for VSL, but it is difficult to examine the role of prediction in VSL using offline measures alone. Pupil diameter is a promising online measure to index prediction in VSL because it can be collected during learning, requires no overt action or task and can be used in a wide-range of populations (e.g., infants and adults). Furthermore, pupil diameter has already been used to investigate processes that are part of prediction such as prediction error and updating. While the properties of pupil diameter have the potentially to powerfully expand studies in VSL, through a series of three experiments, we find that the two are not compatible with each other. Our results revealed that pupil diameter, used to index prediction, is not related to offline measures of learning. We also found that pupil differences that appear to be a result of prediction, are actually a result of where we chose to baseline instead. Ultimately, we conclude that the fast-paced nature of VSL paradigms make it incompatible with the slow nature of pupil change. Therefore, our findings suggest pupillometry should not be used to investigate learning mechanisms in fast-paced VSL tasks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Majority of visual statistical learning (VSL) research uses only offline measures, collected after the familiarization phase (i.e. learning) has occurred. Offline measures have revealed a lot about the extent of statistical learning (SL) but less is known about the learning mechanisms that support VSL. Studies have shown that prediction can be a potential learning mechanism for VSL, but it is difficult to examine the role of prediction in VSL using offline measures alone. Pupil diameter is a promising online measure to index prediction in VSL because it can be collected during learning, requires no overt action or task and can be used in a wide-range of populations (e.g., infants and adults). Furthermore, pupil diameter has already been used to investigate processes that are part of prediction such as prediction error and updating. While the properties of pupil diameter have the potentially to powerfully expand studies in VSL, through a series of three experiments, we find that the two are not compatible with each other. Our results revealed that pupil diameter, used to index prediction, is not related to offline measures of learning. We also found that pupil differences that appear to be a result of prediction, are actually a result of where we chose to baseline instead. Ultimately, we conclude that the fast-paced nature of VSL paradigms make it incompatible with the slow nature of pupil change. Therefore, our findings suggest pupillometry should not be used to investigate learning mechanisms in fast-paced VSL tasks. |
Jinxiao Zhang; Antoni B Chan; Esther Y Y Lau; Janet H Hsiao Individuals with insomnia misrecognize angry faces as fearful faces while missing the eyes: An eye-tracking study Journal Article Sleep, 42 (2), pp. zsy220, 2019. @article{Zhang2019d, title = {Individuals with insomnia misrecognize angry faces as fearful faces while missing the eyes: An eye-tracking study}, author = {Jinxiao Zhang and Antoni B Chan and Esther Y Y Lau and Janet H Hsiao}, doi = {10.1093/sleep/zsy220}, year = {2019}, date = {2019-01-01}, journal = {Sleep}, volume = {42}, number = {2}, pages = {zsy220}, abstract = {Individuals with insomnia have been found to have disturbed perception of facial expressions. Through eye movement examinations, here we test the hypothesis that this effect is due to impaired visual attention functions for retrieving diagnostic features in facial expression judgments. Twenty-three individuals with insomnia symptoms and 23 controls without insomnia completed a task to categorize happy, sad, fearful, and angry facial expressions. The participants with insomnia were less accurate in recognizing angry faces and misidentified them as fearful faces more often than the controls. A hidden Markov modeling approach for eye movement data analysis revealed that when viewing facial expressions, more individuals with insomnia adopted a nose-mouth eye movement pattern focusing on the vertical face midline while more controls adopted an eyes-mouth pattern preferentially attending to lateral features, particularly the two eyes. As previous studies found that the primary diagnostic feature for recognizing angry faces is the eyes while the diagnostic features for other facial expressions involve the mouth region, missing the eye region may contribute to specific difficulties in recognizing angry facial expressions, consistent with our behavioral finding in participants with insomnia symptoms. Taken together, the findings suggest that impaired information selection through visual attention control may be related to the compromised emotion perception in individuals with insomnia.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Individuals with insomnia have been found to have disturbed perception of facial expressions. Through eye movement examinations, here we test the hypothesis that this effect is due to impaired visual attention functions for retrieving diagnostic features in facial expression judgments. Twenty-three individuals with insomnia symptoms and 23 controls without insomnia completed a task to categorize happy, sad, fearful, and angry facial expressions. The participants with insomnia were less accurate in recognizing angry faces and misidentified them as fearful faces more often than the controls. A hidden Markov modeling approach for eye movement data analysis revealed that when viewing facial expressions, more individuals with insomnia adopted a nose-mouth eye movement pattern focusing on the vertical face midline while more controls adopted an eyes-mouth pattern preferentially attending to lateral features, particularly the two eyes. As previous studies found that the primary diagnostic feature for recognizing angry faces is the eyes while the diagnostic features for other facial expressions involve the mouth region, missing the eye region may contribute to specific difficulties in recognizing angry facial expressions, consistent with our behavioral finding in participants with insomnia symptoms. Taken together, the findings suggest that impaired information selection through visual attention control may be related to the compromised emotion perception in individuals with insomnia. |
Xiaoxian Zhang; Wanlu Fu; Licheng Xue; Jing Zhao; Zhiguo Wang Children with mathematical learning difficulties are sluggish in disengaging attention Journal Article Frontiers in Psychology, 10 , pp. 1–9, 2019. @article{Zhang2019f, title = {Children with mathematical learning difficulties are sluggish in disengaging attention}, author = {Xiaoxian Zhang and Wanlu Fu and Licheng Xue and Jing Zhao and Zhiguo Wang}, doi = {10.3389/fpsyg.2019.00932}, year = {2019}, date = {2019-01-01}, journal = {Frontiers in Psychology}, volume = {10}, pages = {1--9}, abstract = {Mathematical learning difficulties (MLD) refer to a variety of deficits in math skills, typically pertaining to the domains of arithmetic and problem solving. The present study examined the time course of attentional orienting in MLD children with a spatial cueing task, by parametrically manipulating the cue-target onset asynchrony (CTOA). The results of Experiment 1 revealed that, in contrast to typical developing children, the inhibitory aftereffect of attentional orienting-frequently referred to as inhibition of return (IOR)-was not observed in the MLD children, even at the longest CTOA tested (800 ms). However, robust early facilitation effects were observed in the MLD children, suggesting that they have difficulties in attentional disengagement rather than attentional engagement. In a second experiment, a secondary cue was introduced to the cueing task to encourage attentional disengagement and IOR effects were observed in the MLD children. Taken together, the present experiments indicate that MLD children are sluggish in disengaging spatial attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Mathematical learning difficulties (MLD) refer to a variety of deficits in math skills, typically pertaining to the domains of arithmetic and problem solving. The present study examined the time course of attentional orienting in MLD children with a spatial cueing task, by parametrically manipulating the cue-target onset asynchrony (CTOA). The results of Experiment 1 revealed that, in contrast to typical developing children, the inhibitory aftereffect of attentional orienting-frequently referred to as inhibition of return (IOR)-was not observed in the MLD children, even at the longest CTOA tested (800 ms). However, robust early facilitation effects were observed in the MLD children, suggesting that they have difficulties in attentional disengagement rather than attentional engagement. In a second experiment, a secondary cue was introduced to the cueing task to encourage attentional disengagement and IOR effects were observed in the MLD children. Taken together, the present experiments indicate that MLD children are sluggish in disengaging spatial attention. |
Kaining Zhang; Charles D Chen; Ilya E Monosov Novelty, salience, and surprise timing are signaled by neurons in the basal forebrain Journal Article Current Biology, 29 (1), pp. 134–142, 2019. @article{Zhang2019g, title = {Novelty, salience, and surprise timing are signaled by neurons in the basal forebrain}, author = {Kaining Zhang and Charles D Chen and Ilya E Monosov}, doi = {10.1016/j.cub.2018.11.012}, year = {2019}, date = {2019-01-01}, journal = {Current Biology}, volume = {29}, number = {1}, pages = {134--142}, publisher = {Elsevier Ltd.}, abstract = {The basal forebrain (BF) is a principal source of modulation of the neocortex [1–6] and is thought to regulate cognitive functions such as attention, motivation, and learning by broadcasting information about salience [2, 3, 5, 7–19]. However, events can be salient for multiple reasons—such as novelty, surprise, or reward prediction errors [20–24]—and to date, precisely which salience-related information the BF broadcasts isunclear.Here, wereport that theprimate BF contains at least two types of neurons that often process salient events in distinct manners: one with phasic burst responses to cues predicting salient events and one with ramping activity anticipating such events. Bursting neurons respond to cues that convey predictions about the magnitude, probability, and timing of primary reinforcements. They also burst to the reinforcement itself, particularly when it is unexpected. However, they do not have a selective response to reinforcement omission (the unexpected absence of an event). Thus, bursting neurons do not convey value-prediction errors but do signal surprise associated with external events. Indeed, they are not limited to processing primary reinforcement: they discriminate fully expected novel visual objects from familiar objects and respond to object-sequence violations. In contrast, ramping neurons predict the timing of many salient, novel, and surprising events. Their ramping activity is highly sensitive to the subjects' confidence in event timing and on average encodes the subjects' surprise after unexpected events occur. These data suggest that the primate BF contains mechanisms to anticipate the timing of a diverse set of important external events (via ramping activity) and to rapidly deploy cognitive resources when these events occur (via short latency bursting).}, keywords = {}, pubstate = {published}, tppubtype = {article} } The basal forebrain (BF) is a principal source of modulation of the neocortex [1–6] and is thought to regulate cognitive functions such as attention, motivation, and learning by broadcasting information about salience [2, 3, 5, 7–19]. However, events can be salient for multiple reasons—such as novelty, surprise, or reward prediction errors [20–24]—and to date, precisely which salience-related information the BF broadcasts isunclear.Here, wereport that theprimate BF contains at least two types of neurons that often process salient events in distinct manners: one with phasic burst responses to cues predicting salient events and one with ramping activity anticipating such events. Bursting neurons respond to cues that convey predictions about the magnitude, probability, and timing of primary reinforcements. They also burst to the reinforcement itself, particularly when it is unexpected. However, they do not have a selective response to reinforcement omission (the unexpected absence of an event). Thus, bursting neurons do not convey value-prediction errors but do signal surprise associated with external events. Indeed, they are not limited to processing primary reinforcement: they discriminate fully expected novel visual objects from familiar objects and respond to object-sequence violations. In contrast, ramping neurons predict the timing of many salient, novel, and surprising events. Their ramping activity is highly sensitive to the subjects' confidence in event timing and on average encodes the subjects' surprise after unexpected events occur. These data suggest that the primate BF contains mechanisms to anticipate the timing of a diverse set of important external events (via ramping activity) and to rapidly deploy cognitive resources when these events occur (via short latency bursting). |
Felicia Zhang; Sagi Jaffe-Dax; Robert C Wilson; Lauren L Emberson Prediction in infants and adults: A pupillometry study Journal Article Developmental Science, 22 (4), pp. 1–9, 2019. @article{Zhang2019h, title = {Prediction in infants and adults: A pupillometry study}, author = {Felicia Zhang and Sagi Jaffe-Dax and Robert C Wilson and Lauren L Emberson}, doi = {10.1111/desc.12780}, year = {2019}, date = {2019-12-01}, journal = {Developmental Science}, volume = {22}, number = {4}, pages = {1--9}, publisher = {John Wiley & Sons, Ltd (10.1111)}, abstract = {Adults use both bottom-up sensory inputs and top-down signals to generate predictions about future sensory inputs. Infants have also been shown to make predictions with simple stimuli and recent work has suggested top-down processing is available early in infancy. However, it is unknown whether this indicates that top-down prediction is an ability that is continuous across the lifespan or whether an infant's ability to predict is different from an adult's, qualitatively or quantitatively. We employed pupillometry to provide a direct comparison of prediction abilities across these disparate age groups. Pupil dilation response (PDR) was measured in 6-month olds and adults as they completed an identical implicit learning task designed to help learn associations between sounds and pictures. We found significantly larger PDR for visual omission trials (i.e. trials that violated participants' predictions without the presentation of new stimuli to control for bottom-up signals) compared to visual present trials (i.e. trials that confirmed participants' predictions) in both age groups. Furthermore, a computational learning model that is closely linked to prediction error (Rescorla-Wagner model) demonstrated similar learning trajectories suggesting a continuity of predictive capacity and learning across the two age groups.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Adults use both bottom-up sensory inputs and top-down signals to generate predictions about future sensory inputs. Infants have also been shown to make predictions with simple stimuli and recent work has suggested top-down processing is available early in infancy. However, it is unknown whether this indicates that top-down prediction is an ability that is continuous across the lifespan or whether an infant's ability to predict is different from an adult's, qualitatively or quantitatively. We employed pupillometry to provide a direct comparison of prediction abilities across these disparate age groups. Pupil dilation response (PDR) was measured in 6-month olds and adults as they completed an identical implicit learning task designed to help learn associations between sounds and pictures. We found significantly larger PDR for visual omission trials (i.e. trials that violated participants' predictions without the presentation of new stimuli to control for bottom-up signals) compared to visual present trials (i.e. trials that confirmed participants' predictions) in both age groups. Furthermore, a computational learning model that is closely linked to prediction error (Rescorla-Wagner model) demonstrated similar learning trajectories suggesting a continuity of predictive capacity and learning across the two age groups. |
Xuemeng Zhang; Yijun Luo; Yong Liu; Chao Yang; Hong Chen Lack of conflict during food choice is associated with the failure of restrained eating Journal Article Eating Behaviors, 34 , pp. 1–8, 2019. @article{Zhang2019i, title = {Lack of conflict during food choice is associated with the failure of restrained eating}, author = {Xuemeng Zhang and Yijun Luo and Yong Liu and Chao Yang and Hong Chen}, doi = {10.1016/j.eatbeh.2019.101309}, year = {2019}, date = {2019-01-01}, journal = {Eating Behaviors}, volume = {34}, pages = {1--8}, abstract = {Restrained eaters tend to sustain a restriction in caloric intake to lose or maintain body weight; however, only a few restrained eaters can achieve the goal of restricting their caloric intake to lose or maintain body weight. Those who are effective restrained eaters habitually adhere to their intentions to avoid eating certain palatable foods, whereas those who are ineffective restrained eaters are generally unable to translate their intentions into behavior. To restrain eating regardless of temptation, an individual must first identify potential conflicts between achieving restrained eating and temptation to eat. Regarding food selections, the association between a lack of conflict between temptation, eating enjoyment, and weight loss or maintenance goals and the failure of restriction of caloric intake remains unknown. The present study used an eye-tracking technique to assess the degree of conflict experienced by effective and ineffective restrained eaters during food choice. Participants were required to choose between pairs of high-and low-calorie foods. The results showed that choosing the low-calorie food was associated with the experience of more conflict, measured by longer response times and more gaze switches, than choosing the high-calorie food. Ineffective restrained eaters experienced less conflict, exhibiting shorter response times and fewer gaze switches, than did effective restrained eaters, which suggests that a failure to restrain eating might be associated with a lack of experience of conflict.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Restrained eaters tend to sustain a restriction in caloric intake to lose or maintain body weight; however, only a few restrained eaters can achieve the goal of restricting their caloric intake to lose or maintain body weight. Those who are effective restrained eaters habitually adhere to their intentions to avoid eating certain palatable foods, whereas those who are ineffective restrained eaters are generally unable to translate their intentions into behavior. To restrain eating regardless of temptation, an individual must first identify potential conflicts between achieving restrained eating and temptation to eat. Regarding food selections, the association between a lack of conflict between temptation, eating enjoyment, and weight loss or maintenance goals and the failure of restriction of caloric intake remains unknown. The present study used an eye-tracking technique to assess the degree of conflict experienced by effective and ineffective restrained eaters during food choice. Participants were required to choose between pairs of high-and low-calorie foods. The results showed that choosing the low-calorie food was associated with the experience of more conflict, measured by longer response times and more gaze switches, than choosing the high-calorie food. Ineffective restrained eaters experienced less conflict, exhibiting shorter response times and fewer gaze switches, than did effective restrained eaters, which suggests that a failure to restrain eating might be associated with a lack of experience of conflict. |
Bao Zhang; Shuhui Liu; Cenlou Hu; Ziwen Luo; Sai Huang; Jie Sui Enhanced memory-driven attentional capture in action video game players Journal Article Computers in Human Behavior, 107 , pp. 1–7, 2020. @article{Zhang2020a, title = {Enhanced memory-driven attentional capture in action video game players}, author = {Bao Zhang and Shuhui Liu and Cenlou Hu and Ziwen Luo and Sai Huang and Jie Sui}, doi = {10.1016/j.chb.2020.106271}, year = {2020}, date = {2020-01-01}, journal = {Computers in Human Behavior}, volume = {107}, pages = {1--7}, publisher = {Elsevier Ltd}, abstract = {Action video game players (AVGPs) have been shown to have an enhanced cognitive control ability to reduce stimulus-driven attentional capture (e.g., from an exogenous salient distractor) compared with non-action video game players (NVGPs). Here we examined whether these benefits could extend to the memory-driven attentional capture (i.e., working memory (WM) representations bias visual attention toward a matching distractor). AVGPs and NVGPs were instructed to complete a visual search task while actively maintaining 1, 2 or 4 items in WM. There was a robust advantage to the memory-driven attentional capture in reaction time and first eye movement fixation in the AVGPs compared to the NVGPs when they had to maintain one item in WM. Moreover, the effect of memory-driven attentional capture was maintained in the AVGPs when the WM load was increased, but it was eliminated in the NVGPs. The results suggest that AVGPs may devote more attentional resources to sustaining the cognitive control rather than to suppressing the attentional capture driven by the active WM representations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Action video game players (AVGPs) have been shown to have an enhanced cognitive control ability to reduce stimulus-driven attentional capture (e.g., from an exogenous salient distractor) compared with non-action video game players (NVGPs). Here we examined whether these benefits could extend to the memory-driven attentional capture (i.e., working memory (WM) representations bias visual attention toward a matching distractor). AVGPs and NVGPs were instructed to complete a visual search task while actively maintaining 1, 2 or 4 items in WM. There was a robust advantage to the memory-driven attentional capture in reaction time and first eye movement fixation in the AVGPs compared to the NVGPs when they had to maintain one item in WM. Moreover, the effect of memory-driven attentional capture was maintained in the AVGPs when the WM load was increased, but it was eliminated in the NVGPs. The results suggest that AVGPs may devote more attentional resources to sustaining the cognitive control rather than to suppressing the attentional capture driven by the active WM representations. |
Hanshu Zhang; Joseph W Houpt Exaggerated prevalence effect with the explicit prevalence information: The description-experience gap in visual search Journal Article Attention, Perception, and Psychophysics, 82 (7), pp. 3340–3356, 2020. @article{Zhang2020b, title = {Exaggerated prevalence effect with the explicit prevalence information: The description-experience gap in visual search}, author = {Hanshu Zhang and Joseph W Houpt}, doi = {10.3758/s13414-020-02045-8}, year = {2020}, date = {2020-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {82}, number = {7}, pages = {3340--3356}, publisher = {Attention, Perception, & Psychophysics}, abstract = {Despite the increasing focus on target prevalence in visual search research, few papers have thoroughly examined the effect of how target prevalence is communicated. Findings in the judgment and decision-making literature have demonstrated that people behave differently depending on whether probabilistic information is made explicit or learned through experience, hence there is potential for a similar difference when communicating prevalence in visual search. Our current research examined how visual search changes depending on whether the target prevalence information was explicitly given to observers or they learned the prevalence through experience with additional manipulations of target reward and salience. We found that when the target prevalence was low, learning prevalence from experience resulted in more target-present responses and longer search times before quitting compared to when observers were explicitly informed of the target probability. The discrepancy narrowed with increased prevalence and reversed in the high target prevalence condition. Eye-tracking results indicated that search with experience consistently resulted in longer fixation durations, with the largest difference in low-prevalence conditions. Longer search time was primarily due to observers re-visited more items. Our work addressed the importance of exploring influences brought by probability communication in future prevalence visual search studies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Despite the increasing focus on target prevalence in visual search research, few papers have thoroughly examined the effect of how target prevalence is communicated. Findings in the judgment and decision-making literature have demonstrated that people behave differently depending on whether probabilistic information is made explicit or learned through experience, hence there is potential for a similar difference when communicating prevalence in visual search. Our current research examined how visual search changes depending on whether the target prevalence information was explicitly given to observers or they learned the prevalence through experience with additional manipulations of target reward and salience. We found that when the target prevalence was low, learning prevalence from experience resulted in more target-present responses and longer search times before quitting compared to when observers were explicitly informed of the target probability. The discrepancy narrowed with increased prevalence and reversed in the high target prevalence condition. Eye-tracking results indicated that search with experience consistently resulted in longer fixation durations, with the largest difference in low-prevalence conditions. Longer search time was primarily due to observers re-visited more items. Our work addressed the importance of exploring influences brought by probability communication in future prevalence visual search studies. |
Hui Zhang; Ping Wang; Tinghu Kang Aesthetic experience of field cognitive style in the appreciation of cursive and running scripts: An eye movement study Journal Article Art and Design Review, 8 , pp. 215–227, 2020. @article{Zhang2020c, title = {Aesthetic experience of field cognitive style in the appreciation of cursive and running scripts: An eye movement study}, author = {Hui Zhang and Ping Wang and Tinghu Kang}, doi = {10.4236/adr.2020.84017}, year = {2020}, date = {2020-01-01}, journal = {Art and Design Review}, volume = {8}, pages = {215--227}, abstract = {This study compares the characteristics of the aesthetic experience of different cognitive styles in calligraphy style. The study used a cursive script and running script as experimental materials and the EyeLink 1000 Plus eye tracker to record eye movements while viewing calligraphy. The results showed that, in the overall analysis, there were differences in the field cogni-tion style in total fixation counts, saccade amplitude, and saccade counts and differences in the calligraphic style in total fixation counts and saccade counts. Further local analysis found significant differences in the field cogni-tive style in mean pupil diameter, fixation counts, and regression in count, and that there were differences in fixation counts and regression in count in the calligraphic style, as well as interactions with the area of interest. The results indicate that the field cognitive style is characterized by different aesthetic experiences in calligraphy appreciation and that there are aesthetic preferences in calligraphy style.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study compares the characteristics of the aesthetic experience of different cognitive styles in calligraphy style. The study used a cursive script and running script as experimental materials and the EyeLink 1000 Plus eye tracker to record eye movements while viewing calligraphy. The results showed that, in the overall analysis, there were differences in the field cogni-tion style in total fixation counts, saccade amplitude, and saccade counts and differences in the calligraphic style in total fixation counts and saccade counts. Further local analysis found significant differences in the field cogni-tive style in mean pupil diameter, fixation counts, and regression in count, and that there were differences in fixation counts and regression in count in the calligraphic style, as well as interactions with the area of interest. The results indicate that the field cognitive style is characterized by different aesthetic experiences in calligraphy appreciation and that there are aesthetic preferences in calligraphy style. |
Xinru Zhang; Zhongling Pi; Chenyu Li; Weiping Hu Intrinsic motivation enhances online group creativity via promoting members' effort, not interaction Journal Article British Journal of Educational Technology, pp. 1–13, 2020. @article{Zhang2020eb, title = {Intrinsic motivation enhances online group creativity via promoting members' effort, not interaction}, author = {Xinru Zhang and Zhongling Pi and Chenyu Li and Weiping Hu}, doi = {10.1111/bjet.13045}, year = {2020}, date = {2020-01-01}, journal = {British Journal of Educational Technology}, pages = {1--13}, abstract = {Intrinsic motivation is seen as the principal source of vitality in educational settings. This study examined whether intrinsic motivation promoted online group creativity and tested a cognitive mechanism that might explain this effect. University students (N = 72; 61 women) who volunteered to participate were asked to fulfill a creative task with a peer using online software. The peer was actually a fake participant who was programed to send prepared answers in sequence. Ratings of creativity (fluency, flexibility and originality) and eye movement data (focus on own vs. peer's ideas on the screen) were used to compare students who were induced to have high intrinsic motivation and those induced to have low intrinsic motivation. Results showed that compared to participants with low intrinsic motivation, those with high intrinsic motivation showed higher fluency and flexibility on the creative task and spent a larger percentage of time looking at their own ideas on the screen. The two groups did not differ in how much they looked at the peer's ideas. In addition, students' percentage dwell time on their own ideas mediated the beneficial effect of intrinsic motivation on idea fluency. These results suggest that although intrinsic motivation could enhance the fluency of creative ideas in an online group, it does not necessarily promote interaction among group members. Given the importance of interaction in online group setting, findings of this study suggest that in addition to enhancing intrinsic motivation, other measures should be taken to promote the interaction behavior in online groups. Practitioner Notes What is already known about this topic The generation of creative ideas in group settings calls for both individual effort and cognitive stimulation from other members. Intrinsic motivation has been shown to foster creativity in face-to-face groups, which is primarily due the promotion of individual effort. In online group settings, students' creativity tends to rely on intrinsic motivation because the extrinsic motivation typically provided by teachers' supervision and peer pressure in face-to-face settings is minimized online. What this paper adds Creative performance in online groups benefits from intrinsic motivation. Intrinsic motivation promotes creativity through an individual's own cognitive effort instead of interaction among members. Implications for practice and/or policy Improving students' intrinsic motivation is an effective way to promote creativity in online groups. Teachers should take additional steps to encourage students to interact more with each other in online groups.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Intrinsic motivation is seen as the principal source of vitality in educational settings. This study examined whether intrinsic motivation promoted online group creativity and tested a cognitive mechanism that might explain this effect. University students (N = 72; 61 women) who volunteered to participate were asked to fulfill a creative task with a peer using online software. The peer was actually a fake participant who was programed to send prepared answers in sequence. Ratings of creativity (fluency, flexibility and originality) and eye movement data (focus on own vs. peer's ideas on the screen) were used to compare students who were induced to have high intrinsic motivation and those induced to have low intrinsic motivation. Results showed that compared to participants with low intrinsic motivation, those with high intrinsic motivation showed higher fluency and flexibility on the creative task and spent a larger percentage of time looking at their own ideas on the screen. The two groups did not differ in how much they looked at the peer's ideas. In addition, students' percentage dwell time on their own ideas mediated the beneficial effect of intrinsic motivation on idea fluency. These results suggest that although intrinsic motivation could enhance the fluency of creative ideas in an online group, it does not necessarily promote interaction among group members. Given the importance of interaction in online group setting, findings of this study suggest that in addition to enhancing intrinsic motivation, other measures should be taken to promote the interaction behavior in online groups. Practitioner Notes What is already known about this topic The generation of creative ideas in group settings calls for both individual effort and cognitive stimulation from other members. Intrinsic motivation has been shown to foster creativity in face-to-face groups, which is primarily due the promotion of individual effort. In online group settings, students' creativity tends to rely on intrinsic motivation because the extrinsic motivation typically provided by teachers' supervision and peer pressure in face-to-face settings is minimized online. What this paper adds Creative performance in online groups benefits from intrinsic motivation. Intrinsic motivation promotes creativity through an individual's own cognitive effort instead of interaction among members. Implications for practice and/or policy Improving students' intrinsic motivation is an effective way to promote creativity in online groups. Teachers should take additional steps to encourage students to interact more with each other in online groups. |
Y Zhang; Q Yuan Indian Journal of Pharmaceutical Sciences, 82 , pp. 32–40, 2020. @article{Zhang2020f, title = {Effect of the combination of biofeedback and sequential psychotherapy on the cognitive function of trauma patients based on the fusion of set theory model}, author = {Y Zhang and Q Yuan}, doi = {10.36468/pharmaceutical-sciences.spl.78}, year = {2020}, date = {2020-01-01}, journal = {Indian Journal of Pharmaceutical Sciences}, volume = {82}, pages = {32--40}, abstract = {This study intended to take a special group of trauma patients as research subjects to propose a method analysing the effect of combination of biofeedback and sequential psychotherapy based on the fusion of the set theory model on the cognitive function of these patients with trauma. The occurrence and development of post-traumatic stress disorder and the cognitive function is investigated. The set theory model is used in this study to carry out a survey on the effect of the combination of biofeedback and sequential psychotherapy on patients with post-traumatic stress disorder to describe the occurrence, development, change trajectory and time course characteristics of post-traumatic stress disorder. The set theory model was employed to investigate the cognitive development characteristics of these trauma patients. In addition, through the set theory model, psychological behavior mechanism for the occurrence and development of post-traumatic stress disorder is revealed. The study of the combination of biofeedback and sequential psychotherapy is adopted to investigate the effect of the post-traumatic stress disorder on the cognitive function of the trauma patients. The results of this study could be used to provide scientific advice for the placement and psychological assistance of trauma patients in future, to provide a scientific basis for a targeted psychological intervention and overall planning of the intervention, and to provide scientific and objective indicators and methods for the diagnosis and assessment of intervention of traumatic psychology in patients with trauma in the future.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study intended to take a special group of trauma patients as research subjects to propose a method analysing the effect of combination of biofeedback and sequential psychotherapy based on the fusion of the set theory model on the cognitive function of these patients with trauma. The occurrence and development of post-traumatic stress disorder and the cognitive function is investigated. The set theory model is used in this study to carry out a survey on the effect of the combination of biofeedback and sequential psychotherapy on patients with post-traumatic stress disorder to describe the occurrence, development, change trajectory and time course characteristics of post-traumatic stress disorder. The set theory model was employed to investigate the cognitive development characteristics of these trauma patients. In addition, through the set theory model, psychological behavior mechanism for the occurrence and development of post-traumatic stress disorder is revealed. The study of the combination of biofeedback and sequential psychotherapy is adopted to investigate the effect of the post-traumatic stress disorder on the cognitive function of the trauma patients. The results of this study could be used to provide scientific advice for the placement and psychological assistance of trauma patients in future, to provide a scientific basis for a targeted psychological intervention and overall planning of the intervention, and to provide scientific and objective indicators and methods for the diagnosis and assessment of intervention of traumatic psychology in patients with trauma in the future. |
Han Zhang; Chuyan Qu; Kevin F Miller; Kai S Cortina Missing the joke: Reduced rereading of garden-path jokes during mind-wandering Journal Article Journal of experimental psychology. Learning, memory, and cognition, 46 (4), pp. 638–648, 2020. @article{Zhang2020g, title = {Missing the joke: Reduced rereading of garden-path jokes during mind-wandering}, author = {Han Zhang and Chuyan Qu and Kevin F Miller and Kai S Cortina}, doi = {10.1037/xlm0000745}, year = {2020}, date = {2020-01-01}, journal = {Journal of experimental psychology. Learning, memory, and cognition}, volume = {46}, number = {4}, pages = {638--648}, abstract = {Mind-wandering (i.e., thoughts irrelevant to the current task) occurs frequently during reading. The current study examined whether mind-wandering was associated with reduced rereading when the reader read the so-called garden-path jokes. In a garden-path joke, the reader's initial interpretation is violated by the final punchline, and the violation creates a semantic incongruity that needs to be resolved (e.g., "My girlfriend has read so many negative things about smoking. Therefore, she decided to quit reading."). Rereading text prior to the punchline can help resolve the incongruity. In a main study and a preregistered replication, participants read jokes and nonfunny controls embedded in filler texts and responded to thought probes that assessed intentional and unintentional mind-wandering. Results were consistent across the two studies: When the reader was not mind-wandering, jokes elicited more rereading (from the punchline) than the nonfunny controls did, and had a recall advantage over the nonfunny controls. During mind-wandering, however, the additional eye movement processing and the recall advantage of jokes were generally reduced. These results show that mind-wandering is associated with reduced rereading, which is important for resolving higher level comprehension difficulties. (PsycInfo Database Record (c) 2020 APA, all rights reserved).}, keywords = {}, pubstate = {published}, tppubtype = {article} } Mind-wandering (i.e., thoughts irrelevant to the current task) occurs frequently during reading. The current study examined whether mind-wandering was associated with reduced rereading when the reader read the so-called garden-path jokes. In a garden-path joke, the reader's initial interpretation is violated by the final punchline, and the violation creates a semantic incongruity that needs to be resolved (e.g., "My girlfriend has read so many negative things about smoking. Therefore, she decided to quit reading."). Rereading text prior to the punchline can help resolve the incongruity. In a main study and a preregistered replication, participants read jokes and nonfunny controls embedded in filler texts and responded to thought probes that assessed intentional and unintentional mind-wandering. Results were consistent across the two studies: When the reader was not mind-wandering, jokes elicited more rereading (from the punchline) than the nonfunny controls did, and had a recall advantage over the nonfunny controls. During mind-wandering, however, the additional eye movement processing and the recall advantage of jokes were generally reduced. These results show that mind-wandering is associated with reduced rereading, which is important for resolving higher level comprehension difficulties. (PsycInfo Database Record (c) 2020 APA, all rights reserved). |
Gu Zhao; Qiang Liu; Jun Jiao; Peiling Zhou; Hong Li; Hong-jin Sun Dual-state modulation of the contextual cueing effect: Evidence from eye movement recordings Journal Article Journal of Vision, 12 (6), pp. 11–11, 2012. @article{Zhao2012, title = {Dual-state modulation of the contextual cueing effect: Evidence from eye movement recordings}, author = {Gu Zhao and Qiang Liu and Jun Jiao and Peiling Zhou and Hong Li and Hong-jin Sun}, doi = {10.1159/000171501}, year = {2012}, date = {2012-01-01}, journal = {Journal of Vision}, volume = {12}, number = {6}, pages = {11--11}, abstract = {The repeated configurations of random elements induce a better search performance than that of the displays of novel random configurations. The mechanism of such contextual cueing effect has been investigated through the use of the RT $backslash$texttimes Set Size function. There are divergent views on whether the contextual cueing effect is driven by attentional guidance or facilitation of initial perceptual processing or response selection. To explore this question, we used eye movement recording in this study, which offers information about the substages of the search task. The results suggest that the contextual cueing effect is contributed mainly by attentional guidance, and facilitation of response selection also plays a role.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The repeated configurations of random elements induce a better search performance than that of the displays of novel random configurations. The mechanism of such contextual cueing effect has been investigated through the use of the RT $backslash$texttimes Set Size function. There are divergent views on whether the contextual cueing effect is driven by attentional guidance or facilitation of initial perceptual processing or response selection. To explore this question, we used eye movement recording in this study, which offers information about the substages of the search task. The results suggest that the contextual cueing effect is contributed mainly by attentional guidance, and facilitation of response selection also plays a role. |
Jingjing Zhao; Yonghui Wang; Donglai Liu; Liang Zhao; Peng Liu Attention, Perception, and Psychophysics, 77 (7), pp. 2284–2292, 2015. @article{Zhao2015, title = {Strength of object representation: its key role in object-based attention for determining the competition result between Gestalt and top-down objects}, author = {Jingjing Zhao and Yonghui Wang and Donglai Liu and Liang Zhao and Peng Liu}, doi = {10.3758/s13414-015-0922-5}, year = {2015}, date = {2015-01-01}, journal = {Attention, Perception, and Psychophysics}, volume = {77}, number = {7}, pages = {2284--2292}, abstract = {It was found in previous studies that two types of objects (rectangles formed according to the Gestalt principle and Chinese words formed in a top-down fashion) can both induce an object-based effect. The aim of the present study was to investigate how the strength of an object representation affects the result of the competition between these two types of objects based on research carried out by Liu, Wang and Zhou [(2011) Acta Psychologica, 138(3), 397-404]. In Experiment 1, the rectangles were filled with two different colors to increase the strength of Gestalt object representation, and we found that the object effect changed significantly for the different stimulus types. Experiment 2 used Chinese words with various familiarities to manipulate the strength of the top-down object representation. As a result, the object-based effect induced by rectangles was observed only when the Chinese word familiarity was low. These results suggest that the strength of object representation determines the result of competition between different types of objects.}, keywords = {}, pubstate = {published}, tppubtype = {article} } It was found in previous studies that two types of objects (rectangles formed according to the Gestalt principle and Chinese words formed in a top-down fashion) can both induce an object-based effect. The aim of the present study was to investigate how the strength of an object representation affects the result of the competition between these two types of objects based on research carried out by Liu, Wang and Zhou [(2011) Acta Psychologica, 138(3), 397-404]. In Experiment 1, the rectangles were filled with two different colors to increase the strength of Gestalt object representation, and we found that the object effect changed significantly for the different stimulus types. Experiment 2 used Chinese words with various familiarities to manipulate the strength of the top-down object representation. As a result, the object-based effect induced by rectangles was observed only when the Chinese word familiarity was low. These results suggest that the strength of object representation determines the result of competition between different types of objects. |
Jing Zhao; Hang Yang; Xuchu Weng; Zhiguo Wang Emergent attentional bias toward visual word forms in the environment: Evidence from eye movements Journal Article Frontiers in Psychology, 9 , pp. 1–7, 2018. @article{Zhao2018, title = {Emergent attentional bias toward visual word forms in the environment: Evidence from eye movements}, author = {Jing Zhao and Hang Yang and Xuchu Weng and Zhiguo Wang}, doi = {10.3389/fpsyg.2018.01378}, year = {2018}, date = {2018-01-01}, journal = {Frontiers in Psychology}, volume = {9}, pages = {1--7}, abstract = {Young children are frequently exposed to environmental prints (e.g., billboards and product labels) that contain visual word forms on a daily basis. As the visual word forms in environmental prints are frequently used to convey information critical to an individual's survival and wellbeing (e.g., "STOP" in the stop sign), it is conceivable that an attentional bias toward words in the environment may emerge as the reading ability of young children develops. Empirical findings relevant to this issue, however, are inconclusive so far. The present study examines this issue in children in the early stages of formal reading training (grades 1, 3, and 5) with the eye-tracking technique. Children viewed images with word and non-word visual information (environmental prints) and images with the same words in standard typeface on a plain background (standard prints). For children in grade 1, the latency of their first fixations on words in environmental prints was longer than those in standard prints. This latency cost, however, was markedly reduced in grades 3 and 5, suggesting that in older children an attentional bias toward words has emerged to help filter out the non-word visual information in environmental prints. Importantly, this attentional bias was found to correlate moderately with word reading ability. These findings show that an attentional bias toward visual word forms emerges shortly after the start of formal schooling and it is closely linked to the development of reading skills.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Young children are frequently exposed to environmental prints (e.g., billboards and product labels) that contain visual word forms on a daily basis. As the visual word forms in environmental prints are frequently used to convey information critical to an individual's survival and wellbeing (e.g., "STOP" in the stop sign), it is conceivable that an attentional bias toward words in the environment may emerge as the reading ability of young children develops. Empirical findings relevant to this issue, however, are inconclusive so far. The present study examines this issue in children in the early stages of formal reading training (grades 1, 3, and 5) with the eye-tracking technique. Children viewed images with word and non-word visual information (environmental prints) and images with the same words in standard typeface on a plain background (standard prints). For children in grade 1, the latency of their first fixations on words in environmental prints was longer than those in standard prints. This latency cost, however, was markedly reduced in grades 3 and 5, suggesting that in older children an attentional bias toward words has emerged to help filter out the non-word visual information in environmental prints. Importantly, this attentional bias was found to correlate moderately with word reading ability. These findings show that an attentional bias toward visual word forms emerges shortly after the start of formal schooling and it is closely linked to the development of reading skills. |
Sijia Zhao; Gabriela Bury; Alice Milne; Maria Chait Pupillometry as an objective measure of sustained attention in young and older listeners Journal Article Trends in Hearing, 23 , 2019. @article{Zhao2019a, title = {Pupillometry as an objective measure of sustained attention in young and older listeners}, author = {Sijia Zhao and Gabriela Bury and Alice Milne and Maria Chait}, doi = {10.1101/579540}, year = {2019}, date = {2019-01-01}, journal = {Trends in Hearing}, volume = {23}, abstract = {The ability to sustain attention on a task-relevant sound-source whilst avoiding distraction from other concurrent sounds is fundamental to listening in crowded environments. To isolate this aspect of hearing we designed a paradigm that continuously measured behavioural and pupillometry responses during 25-second-long trials in young (18-35 yo) and older (63-79 yo) participants. The auditory stimuli consisted of a number (1, 2 or 3) of concurrent, spectrally distinct tone streams. On each trial, participants detected brief silent gaps in one of the streams whilst resisting distraction from the others. Behavioural performance demonstrated increasing difficulty with time-on-task and with number/proximity of distractor streams. In young listeners (N=20), pupillometry revealed that pupil diameter (on the group and individual level) was dynamically modulated by instantaneous task difficulty such that periods where behavioural performance revealed a strain on sustained attention, were also accompanied by increased pupil diameter. Only trials on which participants performed successfully were included in the pupillometry analysis. Therefore, the observed effects reflect consequences of task demands as opposed to failure to attend.In line with existing reports, we observed global changes to pupil dynamics in the older group, including decreased pupil diameter, a limited dilation range, and reduced temporal variability. However, despite these changes, the older group showed similar effects of attentive tracking to those observed in the younger listeners. Overall, our results demonstrate that pupillometry can be a reliable and time-sensitive measure of the effort associated with attentive tracking over long durations in both young and (with some caveats) older listeners.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The ability to sustain attention on a task-relevant sound-source whilst avoiding distraction from other concurrent sounds is fundamental to listening in crowded environments. To isolate this aspect of hearing we designed a paradigm that continuously measured behavioural and pupillometry responses during 25-second-long trials in young (18-35 yo) and older (63-79 yo) participants. The auditory stimuli consisted of a number (1, 2 or 3) of concurrent, spectrally distinct tone streams. On each trial, participants detected brief silent gaps in one of the streams whilst resisting distraction from the others. Behavioural performance demonstrated increasing difficulty with time-on-task and with number/proximity of distractor streams. In young listeners (N=20), pupillometry revealed that pupil diameter (on the group and individual level) was dynamically modulated by instantaneous task difficulty such that periods where behavioural performance revealed a strain on sustained attention, were also accompanied by increased pupil diameter. Only trials on which participants performed successfully were included in the pupillometry analysis. Therefore, the observed effects reflect consequences of task demands as opposed to failure to attend.In line with existing reports, we observed global changes to pupil dynamics in the older group, including decreased pupil diameter, a limited dilation range, and reduced temporal variability. However, despite these changes, the older group showed similar effects of attentive tracking to those observed in the younger listeners. Overall, our results demonstrate that pupillometry can be a reliable and time-sensitive measure of the effort associated with attentive tracking over long durations in both young and (with some caveats) older listeners. |
Sijia Zhao; Maria Chait; Frederic Dick; Peter Dayan; Shigeto Furukawa; Hsin-I Liao Pupil-linked phasic arousal evoked by violation but not emergence of regularity within rapid sound sequences Journal Article Nature Communications, 10 , pp. 4030, 2019. @article{Zhao2019b, title = {Pupil-linked phasic arousal evoked by violation but not emergence of regularity within rapid sound sequences}, author = {Sijia Zhao and Maria Chait and Frederic Dick and Peter Dayan and Shigeto Furukawa and Hsin-I Liao}, doi = {10.1038/s41467-019-12048-1}, year = {2019}, date = {2019-12-01}, journal = {Nature Communications}, volume = {10}, pages = {4030}, publisher = {Springer Science and Business Media LLC}, abstract = {The ability to track the statistics of our surroundings is a key computational challenge. A prominent theory proposes that the brain monitors for unexpected uncertainty-events which deviate substantially from model predictions, indicating model failure. Norepinephrine is thought to play a key role in this process by serving as an interrupt signal, initiating model-resetting. However, evidence is from paradigms where participants actively monitored stimulus statistics. To determine whether Norepinephrine routinely reports the statistical structure of our surroundings, even when not behaviourally relevant, we used rapid tone-pip sequences that contained salient pattern-changes associated with abrupt structural violations vs. emergence of regular structure. Phasic pupil dilations (PDR) were monitored to assess Norepinephrine. We reveal a remarkable specificity: When not behaviourally relevant, only abrupt structural violations evoke a PDR. The results demonstrate that Norepinephrine tracks unexpected uncertainty on rapid time scales relevant to sensory signals.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The ability to track the statistics of our surroundings is a key computational challenge. A prominent theory proposes that the brain monitors for unexpected uncertainty-events which deviate substantially from model predictions, indicating model failure. Norepinephrine is thought to play a key role in this process by serving as an interrupt signal, initiating model-resetting. However, evidence is from paradigms where participants actively monitored stimulus statistics. To determine whether Norepinephrine routinely reports the statistical structure of our surroundings, even when not behaviourally relevant, we used rapid tone-pip sequences that contained salient pattern-changes associated with abrupt structural violations vs. emergence of regular structure. Phasic pupil dilations (PDR) were monitored to assess Norepinephrine. We reveal a remarkable specificity: When not behaviourally relevant, only abrupt structural violations evoke a PDR. The results demonstrate that Norepinephrine tracks unexpected uncertainty on rapid time scales relevant to sensory signals. |
Sijia Zhao; Nga Wai Yum; Lucas Benjamin; Elia Benhamou; Makoto Yoneya; Shigeto Furukawa; Frederic Dick; Malcolm Slaney; Maria Chait Rapid ocular responses are modulated by bottom-up-driven auditory salience Journal Article Journal of Neuroscience, 39 (39), pp. 7703–7714, 2019. @article{Zhao2019c, title = {Rapid ocular responses are modulated by bottom-up-driven auditory salience}, author = {Sijia Zhao and Nga Wai Yum and Lucas Benjamin and Elia Benhamou and Makoto Yoneya and Shigeto Furukawa and Frederic Dick and Malcolm Slaney and Maria Chait}, doi = {10.1523/JNEUROSCI.0776-19.2019}, year = {2019}, date = {2019-01-01}, journal = {Journal of Neuroscience}, volume = {39}, number = {39}, pages = {7703--7714}, abstract = {Despite the prevalent use of alerting sounds in alarms and human–machine interface systems and the long-hypothesized role of the auditory system as the brain's “early warning system,” we have only a rudimentary understanding of what determines auditory salience — the automatic attraction of attention by sound —and which brain mechanisms underlie this process. A major roadblock has been the lack ofa robust, objective means of quantifying sound-driven attentional capture. Here we demonstrate that: (1) a reliable salience scale can be obtained from crowd-sourcing (N ⫽ 911), (2) acoustic roughness appears to be a driving feature behind this scaling, consistent with previous reports implicating roughness in the perceptual distinctiveness of sounds, and (3) crowd-sourced auditory salience correlates with objective autonomic measures. Specifically, we show that a salience ranking obtained from online raters correlated robustly with the superior colliculus-mediated ocular freezing response, microsaccadic inhibition (MSI), measured in naive, passively listening human participants (ofeither sex). More salient sounds evoked earlier and larger MSI, consistent with a faster orienting response. These results are consistent with the hypothesis that MSI reflects a general reorienting response that is evoked by potentially behaviorally important events regardless oftheir modality.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Despite the prevalent use of alerting sounds in alarms and human–machine interface systems and the long-hypothesized role of the auditory system as the brain's “early warning system,” we have only a rudimentary understanding of what determines auditory salience — the automatic attraction of attention by sound —and which brain mechanisms underlie this process. A major roadblock has been the lack ofa robust, objective means of quantifying sound-driven attentional capture. Here we demonstrate that: (1) a reliable salience scale can be obtained from crowd-sourcing (N ⫽ 911), (2) acoustic roughness appears to be a driving feature behind this scaling, consistent with previous reports implicating roughness in the perceptual distinctiveness of sounds, and (3) crowd-sourced auditory salience correlates with objective autonomic measures. Specifically, we show that a salience ranking obtained from online raters correlated robustly with the superior colliculus-mediated ocular freezing response, microsaccadic inhibition (MSI), measured in naive, passively listening human participants (ofeither sex). More salient sounds evoked earlier and larger MSI, consistent with a faster orienting response. These results are consistent with the hypothesis that MSI reflects a general reorienting response that is evoked by potentially behaviorally important events regardless oftheir modality. |
Chenzhu Zhao Near or far? The effect of latest booking time on hotel booking intention: Based on eye-tracking experiments Journal Article International Journal of Frontiers in Sociology, 2 (7), pp. 1–12, 2020. @article{Zhao2020a, title = {Near or far? The effect of latest booking time on hotel booking intention: Based on eye-tracking experiments}, author = {Chenzhu Zhao}, doi = {10.25236/IJFS.2020.020701}, year = {2020}, date = {2020-01-01}, journal = {International Journal of Frontiers in Sociology}, volume = {2}, number = {7}, pages = {1--12}, abstract = {Online travel agencies (OTAs) depends on marketing clues to reduce the consumer uncertainty perceptions of online travel-related products. The latest booking time (LBT) provided by the consumer has a significant impact on purchasing decisions. This study aims to explore the effect of LBT on consumer visual attention and booking intention along with the moderation effect of online comment valence (OCV). Since eye movement is bound up with the transfer of visual attention, eye-tracking is used to record visual attention of consumer. Our research used a 3 (LBT: near vs. medium vs. far) × 3 (OCV: high vs. medium vs. low) design to conduct the experiments. The main findings showed the following:(1) LBT can obviously increase the visual attention to the whole advertisements and improve the booking intention;(2) OCV moderates the effect of LBT on both visual attention to the whole advertisements and booking intention. Only when OCV are medium and high, LBT can obviously improve attention to the whole advertisements and increase consumers' booking intention. The experiment results show that OTAs can improve the advertising effectiveness by adding LBT label, but LBT have no effect with low-level OCV.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Online travel agencies (OTAs) depends on marketing clues to reduce the consumer uncertainty perceptions of online travel-related products. The latest booking time (LBT) provided by the consumer has a significant impact on purchasing decisions. This study aims to explore the effect of LBT on consumer visual attention and booking intention along with the moderation effect of online comment valence (OCV). Since eye movement is bound up with the transfer of visual attention, eye-tracking is used to record visual attention of consumer. Our research used a 3 (LBT: near vs. medium vs. far) × 3 (OCV: high vs. medium vs. low) design to conduct the experiments. The main findings showed the following:(1) LBT can obviously increase the visual attention to the whole advertisements and improve the booking intention;(2) OCV moderates the effect of LBT on both visual attention to the whole advertisements and booking intention. Only when OCV are medium and high, LBT can obviously improve attention to the whole advertisements and increase consumers' booking intention. The experiment results show that OTAs can improve the advertising effectiveness by adding LBT label, but LBT have no effect with low-level OCV. |
Yang Zhou; Yining Liu; Wangzikang Zhang; Mingsha Zhang Asymmetric influence of egocentric representation onto allocentric perception Journal Article Journal of Neuroscience, 32 (24), pp. 8354–8360, 2012. @article{Zhou2012c, title = {Asymmetric influence of egocentric representation onto allocentric perception}, author = {Yang Zhou and Yining Liu and Wangzikang Zhang and Mingsha Zhang}, doi = {10.1523/JNEUROSCI.0829-12.2012}, year = {2012}, date = {2012-01-01}, journal = {Journal of Neuroscience}, volume = {32}, number = {24}, pages = {8354--8360}, abstract = {Objects in the visual world can be represented in both egocentric and allocentric coordinates. Previous studies have found that allocentric representation can affect the accuracy of spatial judgment relative to an egocentric frame, but not vice versa. Here we asked whether egocentric representation influenced the processing speed of allocentric perception. We measured the manual reaction time of human subjects in a position discrimination task in which the behavioral response purely relied on the target's allocentric location, independent of its egocentric position. We used two conditions of stimulus location: the compatible condition-allocentric left and egocentric left or allocentric right and egocentric right; the incompatible condition-allocentric left and egocentric right or allocentric right and egocentric left. We found that egocentric representation markedly influenced allocentric perception in three ways. First, in a given egocentric location, allocentric perception was significantly faster in the compatible condition than in the incompatible condition. Second, as the target became more eccentric in the visual field, the speed of allocentric perception gradually slowed down in the incompatible condition but remained unchanged in the compatible condition. Third, egocentric-allocentric incompatibility slowed allocentric perception more in the left egocentric side than the right egocentric side. These results cannot be explained by interhemispheric visuomotor transformation and stimulus-response compatibility theory. Our findings indicate that each hemisphere preferentially processes and integrates the contralateral egocentric and allocentric spatial information, and the right hemisphere receives more ipsilateral egocentric inputs than left hemisphere does.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objects in the visual world can be represented in both egocentric and allocentric coordinates. Previous studies have found that allocentric representation can affect the accuracy of spatial judgment relative to an egocentric frame, but not vice versa. Here we asked whether egocentric representation influenced the processing speed of allocentric perception. We measured the manual reaction time of human subjects in a position discrimination task in which the behavioral response purely relied on the target's allocentric location, independent of its egocentric position. We used two conditions of stimulus location: the compatible condition-allocentric left and egocentric left or allocentric right and egocentric right; the incompatible condition-allocentric left and egocentric right or allocentric right and egocentric left. We found that egocentric representation markedly influenced allocentric perception in three ways. First, in a given egocentric location, allocentric perception was significantly faster in the compatible condition than in the incompatible condition. Second, as the target became more eccentric in the visual field, the speed of allocentric perception gradually slowed down in the incompatible condition but remained unchanged in the compatible condition. Third, egocentric-allocentric incompatibility slowed allocentric perception more in the left egocentric side than the right egocentric side. These results cannot be explained by interhemispheric visuomotor transformation and stimulus-response compatibility theory. Our findings indicate that each hemisphere preferentially processes and integrates the contralateral egocentric and allocentric spatial information, and the right hemisphere receives more ipsilateral egocentric inputs than left hemisphere does. |
Huihui Zhou; Robert John Schafer; Robert Desimone Pulvinar-cortex interactions in vision and attention Journal Article Neuron, 89 (1), pp. 209–220, 2016. @article{Zhou2016c, title = {Pulvinar-cortex interactions in vision and attention}, author = {Huihui Zhou and Robert John Schafer and Robert Desimone}, doi = {10.1016/j.neuron.2015.11.034}, year = {2016}, date = {2016-01-01}, journal = {Neuron}, volume = {89}, number = {1}, pages = {209--220}, publisher = {Elsevier Inc.}, abstract = {The ventro-lateral pulvinar is reciprocally connected with the visual areas of the ventral stream that are important for object recognition. To understand the mechanisms of attentive stimulus processing in this pulvinar-cortex loop, we investigated the interactions between the pulvinar, area V4, and IT cortex in a spatial-attention task. Sensory processing and the influence of attention in the pulvinar appeared to reflect its cortical inputs. However, pulvinar deactivation led to a reduction of attentional effects on firing rates and gamma synchrony in V4, a reduction of sensory-evoked responses and overall gamma coherence within V4, and severe behavioral deficits in the affected portion of the visual field. Conversely, pulvinar deactivation caused an increase in low-frequency cortical oscillations, often associated with inattention or sleep. Thus, cortical interactions with the ventro-lateral pulvinar are necessary for normal attention and sensory processing and for maintaining the cortex in an active state. The pulvinar is often proposed to modulate cortical processing with attention. Zhou et al. find that beyond any role in attention, the pulvinar input to cortex seems necessary to maintain the cortex in an active state.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The ventro-lateral pulvinar is reciprocally connected with the visual areas of the ventral stream that are important for object recognition. To understand the mechanisms of attentive stimulus processing in this pulvinar-cortex loop, we investigated the interactions between the pulvinar, area V4, and IT cortex in a spatial-attention task. Sensory processing and the influence of attention in the pulvinar appeared to reflect its cortical inputs. However, pulvinar deactivation led to a reduction of attentional effects on firing rates and gamma synchrony in V4, a reduction of sensory-evoked responses and overall gamma coherence within V4, and severe behavioral deficits in the affected portion of the visual field. Conversely, pulvinar deactivation caused an increase in low-frequency cortical oscillations, often associated with inattention or sleep. Thus, cortical interactions with the ventro-lateral pulvinar are necessary for normal attention and sensory processing and for maintaining the cortex in an active state. The pulvinar is often proposed to modulate cortical processing with attention. Zhou et al. find that beyond any role in attention, the pulvinar input to cortex seems necessary to maintain the cortex in an active state. |
Lei Zhou; Yang Yang Zhang; Zuo Jun Wang; Li Lin Rao; Wei Wang; Shu Li; Xingshan Li; Zhu Yuan Liang A scanpath analysis of the risky decision-making process Journal Article Journal of Behavioral Decision Making, 29 (2-3), pp. 169–182, 2016. @article{Zhou2016c, title = {A scanpath analysis of the risky decision-making process}, author = {Lei Zhou and Yang Yang Zhang and Zuo Jun Wang and Li Lin Rao and Wei Wang and Shu Li and Xingshan Li and Zhu Yuan Liang}, doi = {10.1002/bdm.1943}, year = {2016}, date = {2016-01-01}, journal = {Journal of Behavioral Decision Making}, volume = {29}, number = {2-3}, pages = {169--182}, abstract = {In the field of eye tracking, scanpath analysis can reflect the sequential and temporal properties of the cognitive process. However, the advantages of scanpath analysis have not yet been utilized in the study of risky decision making. We explored the methodological applicability of scanpath analysis to test models of risky decision making by analyzing published data from the eye-tracking studies of Su et al. (2013); Wang and Li (2012), and Sun, Rao, Zhou, and Li (2014). These studies used a proportion task, an outcome-matched presentation condition, and a multiple-play condition as the baseline for comparison with information search and processing in the risky decision-making condition. We found that (i) the similarity scores of the intra-conditions were significantly higher than those of the inter-condition; (ii) the scanpaths of the two conditions were separable; and (iii) based on an inspection of typical trials, the patterns of the scanpaths differed between the two conditions. These findings suggest that scanpath analysis is reliable and valid for examining the process of risky decision making. In line with the findings of the three original studies, our results indicate that risky decision making is unlikely to be based on a weighting and summing process, as hypothesized by the family of expectation models. The findings highlight a new methodological direction for research on decision making.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In the field of eye tracking, scanpath analysis can reflect the sequential and temporal properties of the cognitive process. However, the advantages of scanpath analysis have not yet been utilized in the study of risky decision making. We explored the methodological applicability of scanpath analysis to test models of risky decision making by analyzing published data from the eye-tracking studies of Su et al. (2013); Wang and Li (2012), and Sun, Rao, Zhou, and Li (2014). These studies used a proportion task, an outcome-matched presentation condition, and a multiple-play condition as the baseline for comparison with information search and processing in the risky decision-making condition. We found that (i) the similarity scores of the intra-conditions were significantly higher than those of the inter-condition; (ii) the scanpaths of the two conditions were separable; and (iii) based on an inspection of typical trials, the patterns of the scanpaths differed between the two conditions. These findings suggest that scanpath analysis is reliable and valid for examining the process of risky decision making. In line with the findings of the three original studies, our results indicate that risky decision making is unlikely to be based on a weighting and summing process, as hypothesized by the family of expectation models. The findings highlight a new methodological direction for research on decision making. |
Huixia Zhou; Sonja Rossi; Juan Li; Huanhuan Liu; Ran Chen; Baoguo Chen Effects of working memory capacity in processing wh-extractions: Eye-movement evidence from Chinese–English bilinguals Journal Article Journal of Research in Reading, 40 (4), pp. 420–438, 2017. @article{Zhou2017a, title = {Effects of working memory capacity in processing wh-extractions: Eye-movement evidence from Chinese–English bilinguals}, author = {Huixia Zhou and Sonja Rossi and Juan Li and Huanhuan Liu and Ran Chen and Baoguo Chen}, doi = {10.1111/1467-9817.12079}, year = {2017}, date = {2017-01-01}, journal = {Journal of Research in Reading}, volume = {40}, number = {4}, pages = {420--438}, abstract = {By using the eye-tracking method, the present study explores whether working memory capacity assessed via the second language (L2) reading span (L2WMC) as well as the operational span task (OSPAN) affects the processing of subject-extraction and object-extraction in Chinese–English bilinguals. Results showed that L2WMC has no effects on the grammatical judgement accuracies, the first fixation duration, gaze duration, go-past times and total fixation duration of the critical regions in wh-extractions. In contrast, OSPAN influences the first fixation duration and go-past times of the critical regions in wh-extractions. Specifically, in region 1, (e.g., Who do you think loved the comedian [region 1] with [region 2] all his heart [subject-extraction]? versus Who do you think the comedian loved [region 1] with [region 2] all his heart? [object-extraction] ), participants with high OSPAN were much slower than those with low OSPAN in their first fixation duration in reading subject-extractions, whereas there were no differences between participants with different OSPANs in reading object-extractions. In region 2, participants with high OSPAN were much faster than those with low OSPAN in their go-past times of object-extractions. These results indicated that individual differences in OSPAN rather than in L2WMC more strongly affect processing of wh-extractions. Thus, OSPAN results to be more suitable to explore the influences of working memory while processing L2 sentences with complex syntax, at least for intermediate proficient bilinguals. Results of the study also provide further support for the Capacity Theory of Comprehension.}, keywords = {}, pubstate = {published}, tppubtype = {article} } By using the eye-tracking method, the present study explores whether working memory capacity assessed via the second language (L2) reading span (L2WMC) as well as the operational span task (OSPAN) affects the processing of subject-extraction and object-extraction in Chinese–English bilinguals. Results showed that L2WMC has no effects on the grammatical judgement accuracies, the first fixation duration, gaze duration, go-past times and total fixation duration of the critical regions in wh-extractions. In contrast, OSPAN influences the first fixation duration and go-past times of the critical regions in wh-extractions. Specifically, in region 1, (e.g., Who do you think loved the comedian [region 1] with [region 2] all his heart [subject-extraction]? versus Who do you think the comedian loved [region 1] with [region 2] all his heart? [object-extraction] ), participants with high OSPAN were much slower than those with low OSPAN in their first fixation duration in reading subject-extractions, whereas there were no differences between participants with different OSPANs in reading object-extractions. In region 2, participants with high OSPAN were much faster than those with low OSPAN in their go-past times of object-extractions. These results indicated that individual differences in OSPAN rather than in L2WMC more strongly affect processing of wh-extractions. Thus, OSPAN results to be more suitable to explore the influences of working memory while processing L2 sentences with complex syntax, at least for intermediate proficient bilinguals. Results of the study also provide further support for the Capacity Theory of Comprehension. |
Yang Zhou; Lixin Liang; Yujun Pan; Ning Qian; Mingsha Zhang Sites of overt and covert attention define simultaneous spatial reference centers for visuomotor response Journal Article Scientific Reports, 7 , pp. 46556, 2017. @article{Zhou2017c, title = {Sites of overt and covert attention define simultaneous spatial reference centers for visuomotor response}, author = {Yang Zhou and Lixin Liang and Yujun Pan and Ning Qian and Mingsha Zhang}, doi = {10.1038/srep46556}, year = {2017}, date = {2017-01-01}, journal = {Scientific Reports}, volume = {7}, pages = {46556}, publisher = {Nature Publishing Group}, abstract = {The site of overt attention (fixation point) defines a spatial reference center that affects visuomotor response as indicated by the stimulus-response-compatibility (SRC) effect: When subjects press, e.g., a left key to report stimuli, their reaction time is shorter when stimuli appear to the left than to the right of the fixation. Covert attention to a peripheral site appears to define a similar reference center but previous studies did not control for confounding spatiotemporal factors or investigate the relationship between overt-and covert-attention-defined centers. Using an eye tracker to monitor fixation, we found an SRC effect relative to the site of covert attention induced by a flashed cue dot, and a concurrent reduction, but not elimination, of the overt-attention SRC effect. The two SRC effects jointly determined the overall motor reaction time. Since trials with different cue locations were randomlZhou, Y., Liang, L., Pan, Y., Qian, N., & Zhang, M. (2017). Sites of overt and covert attention define simultaneous spatial reference centers for visuomotor response. Scientific Reports, 7, 1–12. https://doi.org/10.1038/srep46556y interleaved, the integration of the two reference centers must be updated online. When the cue was invalid and diminished covert attention, the covert-attention SRC effect disappeared and the overt-attention SRC effect retained full strength, excluding non-attention-based interpretations. We conclude that both covert-and overt-attention sites define visual reference centers that simultaneously contribute to motor response.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The site of overt attention (fixation point) defines a spatial reference center that affects visuomotor response as indicated by the stimulus-response-compatibility (SRC) effect: When subjects press, e.g., a left key to report stimuli, their reaction time is shorter when stimuli appear to the left than to the right of the fixation. Covert attention to a peripheral site appears to define a similar reference center but previous studies did not control for confounding spatiotemporal factors or investigate the relationship between overt-and covert-attention-defined centers. Using an eye tracker to monitor fixation, we found an SRC effect relative to the site of covert attention induced by a flashed cue dot, and a concurrent reduction, but not elimination, of the overt-attention SRC effect. The two SRC effects jointly determined the overall motor reaction time. Since trials with different cue locations were randomlZhou, Y., Liang, L., Pan, Y., Qian, N., & Zhang, M. (2017). Sites of overt and covert attention define simultaneous spatial reference centers for visuomotor response. Scientific Reports, 7, 1–12. https://doi.org/10.1038/srep46556y interleaved, the integration of the two reference centers must be updated online. When the cue was invalid and diminished covert attention, the covert-attention SRC effect disappeared and the overt-attention SRC effect retained full strength, excluding non-attention-based interpretations. We conclude that both covert-and overt-attention sites define visual reference centers that simultaneously contribute to motor response. |
Wenxi Zhou; Haoyu Chen; Jiongjiong Yang Discriminative learning of similar objects enhances memory for the objects and contexts Journal Article Learning & Memory, 25 (12), pp. 601–610, 2018. @article{Zhou2018d, title = {Discriminative learning of similar objects enhances memory for the objects and contexts}, author = {Wenxi Zhou and Haoyu Chen and Jiongjiong Yang}, doi = {10.1101/lm.047514.118}, year = {2018}, date = {2018-01-01}, journal = {Learning & Memory}, volume = {25}, number = {12}, pages = {601--610}, abstract = {How to improve our episodic memory is an important issue in the field of memory. In the present study, we used a discriminative learning paradigm that was similar to a paradigm used in animal studies. In Experiment 1, a picture (e.g., a dog) was either paired with an identical picture, with a similar picture of the same concept (e.g., another dog), or with a picture of a different concept (e.g., a cat). Then, after intervals of 10 min, 1 d, and 1 wk, participants were asked to perform a 2-alternative forced-choice (2AFC) task to discriminate between a repeated and a similar picture, followed by the contextual judgment. In Experiment 2, eye movements were measured when participants encoded the pairs of pictures. The results showed that by discriminative learning, there was better memory performance in the 2AFC task for the "same" and "similar" conditions than for the "different" condition. In addition, there was better contextual memory performance for the "similar" condition than for the other two conditions. With regard to the eye movements, the participants were more likely to fixate on the lure objects and made more saccades between the target and lure objects in the "similar" (versus "different") condition. The number of saccades predicted how well the targets were remembered in both the 2AFC and contextual memory tasks. These results suggested that with discriminative learning of similar objects, detailed information could be better encoded by distinguishing the object from similar interferences, making the details and the contexts better remembered and retained over time.}, keywords = {}, pubstate = {published}, tppubtype = {article} } How to improve our episodic memory is an important issue in the field of memory. In the present study, we used a discriminative learning paradigm that was similar to a paradigm used in animal studies. In Experiment 1, a picture (e.g., a dog) was either paired with an identical picture, with a similar picture of the same concept (e.g., another dog), or with a picture of a different concept (e.g., a cat). Then, after intervals of 10 min, 1 d, and 1 wk, participants were asked to perform a 2-alternative forced-choice (2AFC) task to discriminate between a repeated and a similar picture, followed by the contextual judgment. In Experiment 2, eye movements were measured when participants encoded the pairs of pictures. The results showed that by discriminative learning, there was better memory performance in the 2AFC task for the "same" and "similar" conditions than for the "different" condition. In addition, there was better contextual memory performance for the "similar" condition than for the other two conditions. With regard to the eye movements, the participants were more likely to fixate on the lure objects and made more saccades between the target and lure objects in the "similar" (versus "different") condition. The number of saccades predicted how well the targets were remembered in both the 2AFC and contextual memory tasks. These results suggested that with discriminative learning of similar objects, detailed information could be better encoded by distinguishing the object from similar interferences, making the details and the contexts better remembered and retained over time. |
Ying Joey Zhou; Alexis Pérez-Bellido; Saskia Haegens; Floris P de Lange Perceptual expectations modulate low-frequency activity: A statistical learning magnetoencephalographystudy Journal Article Journal of Cognitive Neuroscience, pp. 1–12, 2019. @article{Zhou2019c, title = {Perceptual expectations modulate low-frequency activity: A statistical learning magnetoencephalographystudy}, author = {Ying Joey Zhou and Alexis Pérez-Bellido and Saskia Haegens and Floris P de Lange}, doi = {10.1162/jocn_a_01511}, year = {2019}, date = {2019-12-01}, journal = {Journal of Cognitive Neuroscience}, pages = {1--12}, publisher = {MIT Press - Journals}, abstract = {Perceptual expectations can change how a visual stimulus is perceived. Recent studies have shown mixed results in terms of whether expectations modulate sensory representations. Here, we used a statistical learning paradigm to study the temporal characteristics of perceptual expectations. We presented participants with pairs of object images organized in a predictive manner and then recorded their brain activity with magnetoencephalography while they viewed expected and unexpected image pairs on the subsequent day. We observed stronger alpha-band (7–14 Hz) activity in response to unexpected compared with expected object images. Specifically, the alpha-band modulation occurred as early as the onset of the stimuli and was most pronounced in left occipito-temporal cortex. Given that the differential response to expected versus unexpected stimuli occurred in sensory regions early in time, our results suggest that expectations modulate perceptual decision-making by changing the sensory response elicited by the stimuli.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Perceptual expectations can change how a visual stimulus is perceived. Recent studies have shown mixed results in terms of whether expectations modulate sensory representations. Here, we used a statistical learning paradigm to study the temporal characteristics of perceptual expectations. We presented participants with pairs of object images organized in a predictive manner and then recorded their brain activity with magnetoencephalography while they viewed expected and unexpected image pairs on the subsequent day. We observed stronger alpha-band (7–14 Hz) activity in response to unexpected compared with expected object images. Specifically, the alpha-band modulation occurred as early as the onset of the stimuli and was most pronounced in left occipito-temporal cortex. Given that the differential response to expected versus unexpected stimuli occurred in sensory regions early in time, our results suggest that expectations modulate perceptual decision-making by changing the sensory response elicited by the stimuli. |
Yan Zhou Psychological analysis of online teaching in colleges based on eye-tracking technology Journal Article Revista Argentina de Clinica Psicologica, 29 (2), pp. 523–529, 2020. @article{Zhou2020a, title = {Psychological analysis of online teaching in colleges based on eye-tracking technology}, author = {Yan Zhou}, doi = {10.24205/03276716.2020.272}, year = {2020}, date = {2020-01-01}, journal = {Revista Argentina de Clinica Psicologica}, volume = {29}, number = {2}, pages = {523--529}, abstract = {Eye-tracking technology has been widely adopted to capture the psychological changes of college students in the learning process. With the aid of eye-tracking technology this paper establishes a psychological analysis model for students in online teaching. Four eye movement parameters were selected for the model, including pupil diameter, fixation time, re-reading time and retrospective time. A total of 100 college students were selected for an eye movement test in online teaching environment. The test data were analyzed on the SPSS software. The results show that the eye movement parameters are greatly affected by the key points in teaching and the contents that interest the students; the two influencing factors can arouse and attract the students' attention in the teaching process. The research results provide an important reference for the psychological study of online teaching in colleges.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Eye-tracking technology has been widely adopted to capture the psychological changes of college students in the learning process. With the aid of eye-tracking technology this paper establishes a psychological analysis model for students in online teaching. Four eye movement parameters were selected for the model, including pupil diameter, fixation time, re-reading time and retrospective time. A total of 100 college students were selected for an eye movement test in online teaching environment. The test data were analyzed on the SPSS software. The results show that the eye movement parameters are greatly affected by the key points in teaching and the contents that interest the students; the two influencing factors can arouse and attract the students' attention in the teaching process. The research results provide an important reference for the psychological study of online teaching in colleges. |
Weina Zhu; Jan Drewes; Karl R Gegenfurtner Animal detection in natural images: effects of color and image database Journal Article PLoS ONE, 8 (10), pp. e75816, 2013. @article{Zhu2013, title = {Animal detection in natural images: effects of color and image database}, author = {Weina Zhu and Jan Drewes and Karl R Gegenfurtner}, doi = {10.1371/journal.pone.0075816}, year = {2013}, date = {2013-01-01}, journal = {PLoS ONE}, volume = {8}, number = {10}, pages = {e75816}, abstract = {The visual system has a remarkable ability to extract categorical information from complex natural scenes. In order to elucidate the role of low-level image features for the recognition of objects in natural scenes, we recorded saccadic eye movements and event-related potentials (ERPs) in two experiments, in which human subjects had to detect animals in previously unseen natural images. We used a new natural image database (ANID) that is free of some of the potential artifacts that have plagued the widely used COREL images. Color and grayscale images picked from the ANID and COREL databases were used. In all experiments, color images induced a greater N1 EEG component at earlier time points than grayscale images. We suggest that this influence of color in animal detection may be masked by later processes when measuring reation times. The ERP results of go/nogo and forced choice tasks were similar to those reported earlier. The non-animal stimuli induced bigger N1 than animal stimuli both in the COREL and ANID databases. This result indicates ultra-fast processing of animal images is possible irrespective of the particular database. With the ANID images, the difference between color and grayscale images is more pronounced than with the COREL images. The earlier use of the COREL images might have led to an underestimation of the contribution of color. Therefore, we conclude that the ANID image database is better suited for the investigation of the processing of natural scenes than other databases commonly used.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The visual system has a remarkable ability to extract categorical information from complex natural scenes. In order to elucidate the role of low-level image features for the recognition of objects in natural scenes, we recorded saccadic eye movements and event-related potentials (ERPs) in two experiments, in which human subjects had to detect animals in previously unseen natural images. We used a new natural image database (ANID) that is free of some of the potential artifacts that have plagued the widely used COREL images. Color and grayscale images picked from the ANID and COREL databases were used. In all experiments, color images induced a greater N1 EEG component at earlier time points than grayscale images. We suggest that this influence of color in animal detection may be masked by later processes when measuring reation times. The ERP results of go/nogo and forced choice tasks were similar to those reported earlier. The non-animal stimuli induced bigger N1 than animal stimuli both in the COREL and ANID databases. This result indicates ultra-fast processing of animal images is possible irrespective of the particular database. With the ANID images, the difference between color and grayscale images is more pronounced than with the COREL images. The earlier use of the COREL images might have led to an underestimation of the contribution of color. Therefore, we conclude that the ANID image database is better suited for the investigation of the processing of natural scenes than other databases commonly used. |
Xiao Lin Zhu; Shu Ping Tan; Fu De Yang; Wei Sun; Chong Sheng Song; Jie Feng Cui; Yan Li Zhao; Feng Mei Fan; Ya Jun Li; Yun Long Tan; Yi Zhuang Zou Visual scanning of emotional faces in schizophrenia Journal Article Neuroscience Letters, 552 , pp. 46–51, 2013. @article{Zhu2013a, title = {Visual scanning of emotional faces in schizophrenia}, author = {Xiao Lin Zhu and Shu Ping Tan and Fu {De Yang} and Wei Sun and Chong Sheng Song and Jie Feng Cui and Yan Li Zhao and Feng Mei Fan and Ya Jun Li and Yun Long Tan and Yi Zhuang Zou}, doi = {10.1016/j.neulet.2013.07.046}, year = {2013}, date = {2013-01-01}, journal = {Neuroscience Letters}, volume = {552}, pages = {46--51}, publisher = {Elsevier Ireland Ltd}, abstract = {This study investigated eye movement differences during facial emotion recognition between 101 patients with chronic schizophrenia and 101 controls. Independent of facial emotion, patients with schizophrenia processed facial information inefficiently; they showed significantly more direct fixations that lasted longer to interest areas (IAs), such as the eyes, nose, mouth, and nasion. The total fixation number, mean fixation duration, and total fixation duration were significantly increased in schizophrenia. Additionally, the number of fixations per second to IAs (IA fixation number/s) was significantly lower in schizophrenia. However, no differences were found between the two groups in the proportion of number of fixations to IAs or total fixation number (IA fixation number %). Interestingly, the negative symptoms of patients with schizophrenia negatively correlated with IA fixation number %. Both groups showed significantly greater attention to positive faces. Compared to controls, patients with schizophrenia exhibited significantly more fixations directed to IAs, a higher total fixation number, and lower IA fixation number/s for negative faces. These results indicate that facial processing efficiency is significantly decreased in schizophrenia, but no difference was observed in processing strategy. Patients with schizophrenia may have special deficits in processing negative faces, and negative symptoms may affect visual scanning parameters.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study investigated eye movement differences during facial emotion recognition between 101 patients with chronic schizophrenia and 101 controls. Independent of facial emotion, patients with schizophrenia processed facial information inefficiently; they showed significantly more direct fixations that lasted longer to interest areas (IAs), such as the eyes, nose, mouth, and nasion. The total fixation number, mean fixation duration, and total fixation duration were significantly increased in schizophrenia. Additionally, the number of fixations per second to IAs (IA fixation number/s) was significantly lower in schizophrenia. However, no differences were found between the two groups in the proportion of number of fixations to IAs or total fixation number (IA fixation number %). Interestingly, the negative symptoms of patients with schizophrenia negatively correlated with IA fixation number %. Both groups showed significantly greater attention to positive faces. Compared to controls, patients with schizophrenia exhibited significantly more fixations directed to IAs, a higher total fixation number, and lower IA fixation number/s for negative faces. These results indicate that facial processing efficiency is significantly decreased in schizophrenia, but no difference was observed in processing strategy. Patients with schizophrenia may have special deficits in processing negative faces, and negative symptoms may affect visual scanning parameters. |
Rongjuan Zhu; Yangmei Luo; Xuqun You; Ziyu Wang Spatial bias induced by simple addition and subtraction: From eye movement evidence Journal Article Perception, 47 (2), pp. 143–157, 2018. @article{Zhu2018, title = {Spatial bias induced by simple addition and subtraction: From eye movement evidence}, author = {Rongjuan Zhu and Yangmei Luo and Xuqun You and Ziyu Wang}, doi = {10.1177/0301006617738718}, year = {2018}, date = {2018-01-01}, journal = {Perception}, volume = {47}, number = {2}, pages = {143--157}, abstract = {The associations between number and space have been intensively investigated. Recent studies indicated that this association could extend to more complex tasks, such as mental arithmetic. However, the mechanism of arithmetic-space associations in mental arithmetic was still a topic of debate. Thus, in the current study, we adopted an eye-tracking technology to investigate whether spatial bias induced by mental arithmetic was related with spatial attention shifts on the mental number line or with semantic link between the operator and space. In Experiment 1, participants moved their eyes to the corresponding response area according to the cues after solving addition and subtraction problems. The results showed that the participants moved their eyes faster to the leftward space after solving subtraction problems and faster to the right after solving addition problems. However, there was no spatial bias observed when the second operand was zero in the same time window, which indicated that the emergence of spatial bias may be associated with spatial attention shifts on the mental number line. In Experiment 2, participants responded to the operator (operation plus and operation minus) with their eyes. The results showed that mere presentation of operator did not cause spatial bias. Therefore, the arithmetic–space associations might be related with the movement along the mental number line.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The associations between number and space have been intensively investigated. Recent studies indicated that this association could extend to more complex tasks, such as mental arithmetic. However, the mechanism of arithmetic-space associations in mental arithmetic was still a topic of debate. Thus, in the current study, we adopted an eye-tracking technology to investigate whether spatial bias induced by mental arithmetic was related with spatial attention shifts on the mental number line or with semantic link between the operator and space. In Experiment 1, participants moved their eyes to the corresponding response area according to the cues after solving addition and subtraction problems. The results showed that the participants moved their eyes faster to the leftward space after solving subtraction problems and faster to the right after solving addition problems. However, there was no spatial bias observed when the second operand was zero in the same time window, which indicated that the emergence of spatial bias may be associated with spatial attention shifts on the mental number line. In Experiment 2, participants responded to the operator (operation plus and operation minus) with their eyes. The results showed that mere presentation of operator did not cause spatial bias. Therefore, the arithmetic–space associations might be related with the movement along the mental number line. |
Rongjuan Zhu; Xuqun You; Shuoqiu Gan; Jinwei Wang Spatial attention shifts in addition and subtraction arithmetic: Evidence of eye movement Journal Article Perception, 48 (9), pp. 835–849, 2019. @article{Zhu2019a, title = {Spatial attention shifts in addition and subtraction arithmetic: Evidence of eye movement}, author = {Rongjuan Zhu and Xuqun You and Shuoqiu Gan and Jinwei Wang}, doi = {10.1177/0301006619865156}, year = {2019}, date = {2019-01-01}, journal = {Perception}, volume = {48}, number = {9}, pages = {835--849}, abstract = {Recently, it has been proposed that solving addition and subtraction problems can evoke horizontal shifts of spatial attention. However, prior to this study, it remained unclear whether orienting shifts of spatial attention relied on actual arithmetic processes (i.e., the activated magnitude) or the semantic spatial association of the operator. In this study, spatial–arithmetic associations were explored through three experiments using an eye tracker, which attempted to investigate the mechanism of those associations. Experiment 1 replicated spatial–arithmetic associations in addition and subtraction problems. Experiments 2 and 3 selected zero as the operand to investigate whether these arithmetic problems could induce shifts of spatial attention. Experiment 2 indicated that addition and subtraction problems (zero as the second operand, i.e., 2 + 0) do not induce shifts of spatial attention. Experiment 3 showed that addition and subtraction arithmetic (zero as the first operand, i.e., 0 + 2) do facilitate rightward and leftward eye movement, respectively. This indicates that the operator alone does not induce horizontal eye movement. However, our findings support the idea that solving addition and subtraction problems is associated with horizontal shifts of spatial attention.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recently, it has been proposed that solving addition and subtraction problems can evoke horizontal shifts of spatial attention. However, prior to this study, it remained unclear whether orienting shifts of spatial attention relied on actual arithmetic processes (i.e., the activated magnitude) or the semantic spatial association of the operator. In this study, spatial–arithmetic associations were explored through three experiments using an eye tracker, which attempted to investigate the mechanism of those associations. Experiment 1 replicated spatial–arithmetic associations in addition and subtraction problems. Experiments 2 and 3 selected zero as the operand to investigate whether these arithmetic problems could induce shifts of spatial attention. Experiment 2 indicated that addition and subtraction problems (zero as the second operand, i.e., 2 + 0) do not induce shifts of spatial attention. Experiment 3 showed that addition and subtraction arithmetic (zero as the first operand, i.e., 0 + 2) do facilitate rightward and leftward eye movement, respectively. This indicates that the operator alone does not induce horizontal eye movement. However, our findings support the idea that solving addition and subtraction problems is associated with horizontal shifts of spatial attention. |
Zhuoting Zhu; Yin Hu; Chimei Liao; Stuart Keel; Ren Huang; Yanping Liu; Mingguang He Visual span and cognitive factors affect Chinese reading speed Journal Article Journal of Vision, 19 (14), pp. 1–11, 2019. @article{Zhu2019d, title = {Visual span and cognitive factors affect Chinese reading speed}, author = {Zhuoting Zhu and Yin Hu and Chimei Liao and Stuart Keel and Ren Huang and Yanping Liu and Mingguang He}, doi = {10.1167/19.14.17}, year = {2019}, date = {2019-01-01}, journal = {Journal of Vision}, volume = {19}, number = {14}, pages = {1--11}, abstract = {Visual span, which is the number of recognizable letters seen without moving the eyes, has been proven to impose a sensory limitation for alphabetic reading speed (Chung, 2011; Chung, Legge, & Cheung, 2004; Lee, Kwon, Legge, & Gefroh, 2010; Legge, Ahn, Klitz, & Luebker, 1997; Legge, Hooven, Klitz, Stephen Mansfield, & Tjan, 2002; D. Yu, Cheung, Legge, & Chung, 2010). However, little is known about the effects of visual span on Chinese reading performance. Of note, Chinese text differs greatly from that of the alphabetic writing system. There are no spaces between words, and readers are forced to utilize their lexical knowledge to segment Chinese characters into meaningful words, thus increasing the relative importance of cognitive/linguistic factors in reading performance. Therefore, the aim of the present study is to explore whether visual span and cognitive/linguistic factors have independent effects on Chinese reading speed. Visual span profiles, cognitive/linguistic factors indicated by word frequency, and Chinese sentence-reading performance were collected from 28 native Chinese-speaking subjects. We found that the visual-span size and cognitive/linguistic factors independently contributed to Chinese sentence-reading speed (all ps textless 0.05). We concluded that both the visual-span size and cognitive/linguistic factors represented bottlenecks for Chinese sentence-reading speed.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual span, which is the number of recognizable letters seen without moving the eyes, has been proven to impose a sensory limitation for alphabetic reading speed (Chung, 2011; Chung, Legge, & Cheung, 2004; Lee, Kwon, Legge, & Gefroh, 2010; Legge, Ahn, Klitz, & Luebker, 1997; Legge, Hooven, Klitz, Stephen Mansfield, & Tjan, 2002; D. Yu, Cheung, Legge, & Chung, 2010). However, little is known about the effects of visual span on Chinese reading performance. Of note, Chinese text differs greatly from that of the alphabetic writing system. There are no spaces between words, and readers are forced to utilize their lexical knowledge to segment Chinese characters into meaningful words, thus increasing the relative importance of cognitive/linguistic factors in reading performance. Therefore, the aim of the present study is to explore whether visual span and cognitive/linguistic factors have independent effects on Chinese reading speed. Visual span profiles, cognitive/linguistic factors indicated by word frequency, and Chinese sentence-reading performance were collected from 28 native Chinese-speaking subjects. We found that the visual-span size and cognitive/linguistic factors independently contributed to Chinese sentence-reading speed (all ps textless 0.05). We concluded that both the visual-span size and cognitive/linguistic factors represented bottlenecks for Chinese sentence-reading speed. |
Jiawen Zhu; Kara Dawson; Albert D Ritzhaupt; Pavlo Pasha Antonenko Journal of Educational Multimedia and Hypermedia, 29 (3), pp. 265–284, 2020. @article{Zhu2020, title = {Investigating how multimedia and modality design principles influence student learning performance, satisfaction, mental effort, and visual attention}, author = {Jiawen Zhu and Kara Dawson and Albert D Ritzhaupt and Pavlo Pasha Antonenko}, year = {2020}, date = {2020-01-01}, journal = {Journal of Educational Multimedia and Hypermedia}, volume = {29}, number = {3}, pages = {265--284}, abstract = {This study investigated the effects of multimedia and modal- ity design principles using a learning intervention about Aus- tralia with a sample of college students and employing mea- sures of learning outcomes, visual attention, satisfaction, and mental effort. Seventy-five college students were systemati- cally assigned to one of four conditions: a) text with pictures, b) text without pictures, c) narration with pictures, or d) nar- ration without pictures. No significant differences were found among the four groups in learning performance, satisfaction, or self-reported mental effort, and participants rarely focused their visual attention on the representational pictures provid- ed in the intervention. Neither the multimedia nor the modal- ity principles held true in this study. However, participants in narration environments focused significantly more visual at- tention on the “Next” button, a navigational aid included on all slides. This study contributes to the research on visual at- tention and navigational aids in multimedia learning, and it suggests such features may cause distractions, particularly when spoken text is provided without on-screen text. The paper also offers implications for the design of multimedia learning}, keywords = {}, pubstate = {published}, tppubtype = {article} } This study investigated the effects of multimedia and modal- ity design principles using a learning intervention about Aus- tralia with a sample of college students and employing mea- sures of learning outcomes, visual attention, satisfaction, and mental effort. Seventy-five college students were systemati- cally assigned to one of four conditions: a) text with pictures, b) text without pictures, c) narration with pictures, or d) nar- ration without pictures. No significant differences were found among the four groups in learning performance, satisfaction, or self-reported mental effort, and participants rarely focused their visual attention on the representational pictures provid- ed in the intervention. Neither the multimedia nor the modal- ity principles held true in this study. However, participants in narration environments focused significantly more visual at- tention on the “Next” button, a navigational aid included on all slides. This study contributes to the research on visual at- tention and navigational aids in multimedia learning, and it suggests such features may cause distractions, particularly when spoken text is provided without on-screen text. The paper also offers implications for the design of multimedia learning |
Jing Zhu; Zihan Wang; Tao Gong; Shuai Zeng; Xiaowei Li; Bin Hu; Jianxiu Li; Shuting Sun; Lan Zhang An improved classification model for depression detection using EEG and eye tracking data Journal Article IEEE Transactions on Nanobioscience, 19 (3), pp. 527–537, 2020. @article{Zhu2020a, title = {An improved classification model for depression detection using EEG and eye tracking data}, author = {Jing Zhu and Zihan Wang and Tao Gong and Shuai Zeng and Xiaowei Li and Bin Hu and Jianxiu Li and Shuting Sun and Lan Zhang}, doi = {10.1109/TNB.2020.2990690}, year = {2020}, date = {2020-01-01}, journal = {IEEE Transactions on Nanobioscience}, volume = {19}, number = {3}, pages = {527--537}, abstract = {At present, depression has become a main health burden in the world. However, there are many problems with the diagnosis of depression, such as low patient cooperation, subjective bias and low accuracy. Therefore, reliable and objective evaluation method is needed to achieve effective depression detection. Electroencephalogram (EEG) and eye movements (EMs) data have been widely used for depression detection due to their advantages of easy recording and non-invasion. This research proposes a content based ensemble method (CBEM) to promote the depression detection accuracy, both static and dynamic CBEM were discussed. In the proposed model, EEG or EMs dataset was divided into subsets by the context of the experiments, and then a majority vote strategy was used to determine the subjects' label. The validation of the method is testified on two datasets which included free viewing eye tracking and resting-state EEG, and these two datasets have 36,34 subjects respectively. For these two datasets, CBEM achieves accuracies of 82.5% and 92.65% respectively. The results show that CBEM outperforms traditional classification methods. Our findings provide an effective solution for promoting the accuracy of depression identification, and provide an effective method for identification of depression, which in the future could be used for the auxiliary diagnosis of depression.}, keywords = {}, pubstate = {published}, tppubtype = {article} } At present, depression has become a main health burden in the world. However, there are many problems with the diagnosis of depression, such as low patient cooperation, subjective bias and low accuracy. Therefore, reliable and objective evaluation method is needed to achieve effective depression detection. Electroencephalogram (EEG) and eye movements (EMs) data have been widely used for depression detection due to their advantages of easy recording and non-invasion. This research proposes a content based ensemble method (CBEM) to promote the depression detection accuracy, both static and dynamic CBEM were discussed. In the proposed model, EEG or EMs dataset was divided into subsets by the context of the experiments, and then a majority vote strategy was used to determine the subjects' label. The validation of the method is testified on two datasets which included free viewing eye tracking and resting-state EEG, and these two datasets have 36,34 subjects respectively. For these two datasets, CBEM achieves accuracies of 82.5% and 92.65% respectively. The results show that CBEM outperforms traditional classification methods. Our findings provide an effective solution for promoting the accuracy of depression identification, and provide an effective method for identification of depression, which in the future could be used for the auxiliary diagnosis of depression. |
Maryam Ziaei; William von Hippel; Julie D Henry; Stefanie I Becker Are age effects in positivity influenced by the valence of distractors? Journal Article PLoS ONE, 10 (9), pp. e0137604, 2015. @article{Ziaei2015, title = {Are age effects in positivity influenced by the valence of distractors?}, author = {Maryam Ziaei and William von Hippel and Julie D Henry and Stefanie I Becker}, doi = {10.1371/journal.pone.0137604}, year = {2015}, date = {2015-01-01}, journal = {PLoS ONE}, volume = {10}, number = {9}, pages = {e0137604}, abstract = {An age-related ‘positivity' effect has been identified, in which older adults show an information- processing bias towards positive emotional items in attention and memory. In the present study, we examined this positivity bias by using a novel paradigm in which emotional and neutral distractors were presented along with emotionally valenced targets. Thirty-five older and 37 younger adults were asked during encoding to attend to emotional targets paired with distractors that were either neutral or opposite in valence to the target. Pupillary responses were recorded during initial encoding as well as a later incidental recognition task. Memory and pupillary responses for negative items were not affected by the valence of distractors, suggesting that positive distractors did not automatically attract older adults' attention while they were encoding negative targets. Additionally, the pupil dilation to negative items mediated the relation between age and positivity in memory. Overall, memory and pupillary responses provide converging support for a cognitive control account of positivity effects in late adulthood and suggest a link between attentional processes and the memory positivity effect.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An age-related ‘positivity' effect has been identified, in which older adults show an information- processing bias towards positive emotional items in attention and memory. In the present study, we examined this positivity bias by using a novel paradigm in which emotional and neutral distractors were presented along with emotionally valenced targets. Thirty-five older and 37 younger adults were asked during encoding to attend to emotional targets paired with distractors that were either neutral or opposite in valence to the target. Pupillary responses were recorded during initial encoding as well as a later incidental recognition task. Memory and pupillary responses for negative items were not affected by the valence of distractors, suggesting that positive distractors did not automatically attract older adults' attention while they were encoding negative targets. Additionally, the pupil dilation to negative items mediated the relation between age and positivity in memory. Overall, memory and pupillary responses provide converging support for a cognitive control account of positivity effects in late adulthood and suggest a link between attentional processes and the memory positivity effect. |
Imme C Zillekens; Marie Luise Brandi; Juha M Lahnakoski; Atesh Koul; Valeria Manera; Cristina Becchio; Leonhard Schilbach Increased functional coupling of the left amygdala and medial prefrontal cortex during the perception of communicative point-light stimuli Journal Article Social Cognitive and Affective Neuroscience, 14 (1), pp. 97–107, 2019. @article{Zillekens2019, title = {Increased functional coupling of the left amygdala and medial prefrontal cortex during the perception of communicative point-light stimuli}, author = {Imme C Zillekens and Marie Luise Brandi and Juha M Lahnakoski and Atesh Koul and Valeria Manera and Cristina Becchio and Leonhard Schilbach}, doi = {10.1093/scan/nsy105}, year = {2019}, date = {2019-01-01}, journal = {Social Cognitive and Affective Neuroscience}, volume = {14}, number = {1}, pages = {97--107}, abstract = {Interpersonal predictive coding (IPPC) describes the behavioral phenomenon whereby seeing a communicative rather than an individual action helps to discern a masked second agent. As little is known, yet, about the neural correlates of IPPC, we conducted a functional magnetic resonance imaging study in a group of 27 healthy participants using point-light displays of moving agents embedded in distractors. We discovered that seeing communicative compared to individual actions was associated with higher activation of right superior frontal gyrus, whereas the reversed contrast elicited increased neural activation in an action observation network that was activated during all trials. Our findings, therefore, potentially indicate the formation of action predictions and a reduced demand for executive control in response to communicative actions. Further, in a regression analysis, we revealed that increased perceptual sensitivity was associated with a deactivation of the left amygdala during the perceptual task. A consecutive psychophysiological interaction analysis showed increased connectivity of the amygdala with medial prefrontal cortex in the context of communicative compared to individual actions. Thus, whereas increased amygdala signaling might interfere with task-relevant processes, increased co-activation of the amygdala and the medial prefrontal cortex in a communicative context might represent the integration of mentalizing computations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Interpersonal predictive coding (IPPC) describes the behavioral phenomenon whereby seeing a communicative rather than an individual action helps to discern a masked second agent. As little is known, yet, about the neural correlates of IPPC, we conducted a functional magnetic resonance imaging study in a group of 27 healthy participants using point-light displays of moving agents embedded in distractors. We discovered that seeing communicative compared to individual actions was associated with higher activation of right superior frontal gyrus, whereas the reversed contrast elicited increased neural activation in an action observation network that was activated during all trials. Our findings, therefore, potentially indicate the formation of action predictions and a reduced demand for executive control in response to communicative actions. Further, in a regression analysis, we revealed that increased perceptual sensitivity was associated with a deactivation of the left amygdala during the perceptual task. A consecutive psychophysiological interaction analysis showed increased connectivity of the amygdala with medial prefrontal cortex in the context of communicative compared to individual actions. Thus, whereas increased amygdala signaling might interfere with task-relevant processes, increased co-activation of the amygdala and the medial prefrontal cortex in a communicative context might represent the integration of mentalizing computations. |
Ulrike Zimmer; M H"ofler; Karl Koschutnig; Anja Ischebeck; Margit Höfler; Karl Koschutnig; Anja Ischebeck Neuronal interactions in areas of spatial attention reflect avoidance of disgust, but orienting to danger Journal Article NeuroImage, 134 , pp. 94–104, 2016. @article{Zimmer2016, title = {Neuronal interactions in areas of spatial attention reflect avoidance of disgust, but orienting to danger}, author = {Ulrike Zimmer and M H"ofler and Karl Koschutnig and Anja Ischebeck and Margit Höfler and Karl Koschutnig and Anja Ischebeck}, doi = {10.1016/j.neuroimage.2016.03.050}, year = {2016}, date = {2016-01-01}, journal = {NeuroImage}, volume = {134}, pages = {94--104}, abstract = {For survival, it is necessary to attend quickly towards dangerous objects, but to turn away from something that is disgusting. We tested whether fear and disgust sounds direct spatial attention differently. Using fMRI, a sound cue (disgust, fear or neutral) was presented to the left or right ear. The cue was followed by a visual target (a small arrow) which was located on the same (valid) or opposite (invalid) side as the cue. Participants were required to decide whether the arrow pointed up- or downwards while ignoring the sound cue. Behaviorally, responses were faster for invalid compared to valid targets when cued by disgust, whereas the opposite pattern was observed for targets after fearful and neutral sound cues. During target presentation, activity in the visual cortex and IPL increased for targets invalidly cued with disgust, but for targets validly cued with fear which indicated a general modulation of activation due to attention. For the TPJ, an interaction in the opposite direction was observed, consistent with its role in detecting targets at unattended positions and in relocating attention. As a whole our results indicate that a disgusting sound directs spatial attention away from its location, in contrast to fearful and neutral sounds.}, keywords = {}, pubstate = {published}, tppubtype = {article} } For survival, it is necessary to attend quickly towards dangerous objects, but to turn away from something that is disgusting. We tested whether fear and disgust sounds direct spatial attention differently. Using fMRI, a sound cue (disgust, fear or neutral) was presented to the left or right ear. The cue was followed by a visual target (a small arrow) which was located on the same (valid) or opposite (invalid) side as the cue. Participants were required to decide whether the arrow pointed up- or downwards while ignoring the sound cue. Behaviorally, responses were faster for invalid compared to valid targets when cued by disgust, whereas the opposite pattern was observed for targets after fearful and neutral sound cues. During target presentation, activity in the visual cortex and IPL increased for targets invalidly cued with disgust, but for targets validly cued with fear which indicated a general modulation of activation due to attention. For the TPJ, an interaction in the opposite direction was observed, consistent with its role in detecting targets at unattended positions and in relocating attention. As a whole our results indicate that a disgusting sound directs spatial attention away from its location, in contrast to fearful and neutral sounds. |
Eckart Zimmermann; Concetta M Morrone; David C Burr Visual motion distorts visual and motor space Journal Article Journal of Vision, 12 (2), pp. 10–10, 2012. @article{Zimmermann2012, title = {Visual motion distorts visual and motor space}, author = {Eckart Zimmermann and Concetta M Morrone and David C Burr}, doi = {10.1167/12.2.10}, year = {2012}, date = {2012-01-01}, journal = {Journal of Vision}, volume = {12}, number = {2}, pages = {10--10}, abstract = {Much evidence suggests that visual motion can cause severe distortions in the perception of spatial position. In this study, we show that visual motion also distorts saccadic eye movements. Landing positions of saccades performed to objects presented in the vicinity of visual motion were biased in the direction of motion. The targeting errors for both saccades and perceptual reports were maximum during motion onset and were of very similar magnitude under the two conditions. These results suggest that visual motion affects a representation of spatial position, or spatial map, in a similar fashion for visuomotor action as for perception.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Much evidence suggests that visual motion can cause severe distortions in the perception of spatial position. In this study, we show that visual motion also distorts saccadic eye movements. Landing positions of saccades performed to objects presented in the vicinity of visual motion were biased in the direction of motion. The targeting errors for both saccades and perceptual reports were maximum during motion onset and were of very similar magnitude under the two conditions. These results suggest that visual motion affects a representation of spatial position, or spatial map, in a similar fashion for visuomotor action as for perception. |
Eckart Zimmermann; Concetta M Morrone; David C Burr Spatial position information accumulates steadily over time Journal Article Journal of Neuroscience, 33 (47), pp. 18396–18401, 2013. @article{Zimmermann2013b, title = {Spatial position information accumulates steadily over time}, author = {Eckart Zimmermann and Concetta M Morrone and David C Burr}, doi = {10.1523/JNEUROSCI.1864-13.2013}, year = {2013}, date = {2013-01-01}, journal = {Journal of Neuroscience}, volume = {33}, number = {47}, pages = {18396--18401}, abstract = {One of the more enduring mysteries of neuroscience is how the visual system constructs robust maps of the world that remain stable in the face of frequent eye movements. Here we show that encoding the position of objects in external space is a relatively slow process, building up over hundreds of milliseconds. We display targets to which human subjects saccade after a variable preview duration. As they saccade, the target is displaced leftwards or rightwards, and subjects report the displacement direction. When subjects saccade to targets without delay, sensitivity is poor; but if the target is viewed for 300-500 ms before saccading, sensitivity is similar to that during fixation with a strong visual mask to dampen transients. These results suggest that the poor displacement thresholds usually observed in the "saccadic suppression of displacement" paradigm are a result of the fact that the target has had insufficient time to be encoded in memory, and not a result of the action of special mechanisms conferring saccadic stability. Under more natural conditions, trans-saccadic displacement detection is as good as in fixation, when the displacement transients are masked.}, keywords = {}, pubstate = {published}, tppubtype = {article} } One of the more enduring mysteries of neuroscience is how the visual system constructs robust maps of the world that remain stable in the face of frequent eye movements. Here we show that encoding the position of objects in external space is a relatively slow process, building up over hundreds of milliseconds. We display targets to which human subjects saccade after a variable preview duration. As they saccade, the target is displaced leftwards or rightwards, and subjects report the displacement direction. When subjects saccade to targets without delay, sensitivity is poor; but if the target is viewed for 300-500 ms before saccading, sensitivity is similar to that during fixation with a strong visual mask to dampen transients. These results suggest that the poor displacement thresholds usually observed in the "saccadic suppression of displacement" paradigm are a result of the fact that the target has had insufficient time to be encoded in memory, and not a result of the action of special mechanisms conferring saccadic stability. Under more natural conditions, trans-saccadic displacement detection is as good as in fixation, when the displacement transients are masked. |
Eckart Zimmermann; Concetta M Morrone; David C Burr The visual component to saccadic compression. Journal Article Journal of Vision, 14 (12), pp. 13–, 2014. @article{Zimmermann2014b, title = {The visual component to saccadic compression.}, author = {Eckart Zimmermann and Concetta M Morrone and David C Burr}, doi = {10.1167/14.12.13.doi}, year = {2014}, date = {2014-01-01}, journal = {Journal of Vision}, volume = {14}, number = {12}, pages = {13--}, abstract = {Visual objects presented around the time of saccadic eye movements are strongly mislocalized towards the saccadic target, a phenomenon known as "saccadic compression." Here we show that perisaccadic compression is modulated by the presence of a visual saccadic target. When subjects saccaded to the center of the screen with no visible target, perisaccadic localization was more veridical than when tested with a target. Presenting a saccadic target sometime before saccade initiation was sufficient to induce mislocalization. When we systematically varied the onset of the saccade target, we found that it had to be presented around 100 ms before saccade execution to cause strong mislocalization: saccadic targets presented after this time caused progressively less mislocalization. When subjects made a saccade to screen center with a reference object placed at various positions, mislocalization was focused towards the position of the reference object. The results suggest that saccadic compression is a signature of a mechanism attempting to match objects seen before the saccade with those seen after.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual objects presented around the time of saccadic eye movements are strongly mislocalized towards the saccadic target, a phenomenon known as "saccadic compression." Here we show that perisaccadic compression is modulated by the presence of a visual saccadic target. When subjects saccaded to the center of the screen with no visible target, perisaccadic localization was more veridical than when tested with a target. Presenting a saccadic target sometime before saccade initiation was sufficient to induce mislocalization. When we systematically varied the onset of the saccade target, we found that it had to be presented around 100 ms before saccade execution to cause strong mislocalization: saccadic targets presented after this time caused progressively less mislocalization. When subjects made a saccade to screen center with a reference object placed at various positions, mislocalization was focused towards the position of the reference object. The results suggest that saccadic compression is a signature of a mechanism attempting to match objects seen before the saccade with those seen after. |
Eckart Zimmermann; Concetta M Morrone; David C Burr Visual mislocalization during saccade sequences Journal Article Experimental Brain Research, 233 (2), pp. 577–585, 2015. @article{Zimmermann2015a, title = {Visual mislocalization during saccade sequences}, author = {Eckart Zimmermann and Concetta M Morrone and David C Burr}, doi = {10.1007/s00221-014-4138-z}, year = {2015}, date = {2015-01-01}, journal = {Experimental Brain Research}, volume = {233}, number = {2}, pages = {577--585}, abstract = {Visual objects briefly presented around the time of saccadic eye movements are perceived compressed towards the saccade target. Here, we investigated perisaccadic mislocalization with a double-step saccade paradigm, measuring localization of small probe dots briefly flashed at various times around the sequence of the two saccades. At onset of the first saccade, probe dots were mislocalized towards the first and, to a lesser extent, also towards the second saccade target. However, there was very little mislocalization at the onset of the second saccade. When we increased the presentation duration of the saccade targets prior to onset of the saccade sequence, perisaccadic mislocalization did occur at the onset of the second saccade.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual objects briefly presented around the time of saccadic eye movements are perceived compressed towards the saccade target. Here, we investigated perisaccadic mislocalization with a double-step saccade paradigm, measuring localization of small probe dots briefly flashed at various times around the sequence of the two saccades. At onset of the first saccade, probe dots were mislocalized towards the first and, to a lesser extent, also towards the second saccade target. However, there was very little mislocalization at the onset of the second saccade. When we increased the presentation duration of the saccade targets prior to onset of the saccade sequence, perisaccadic mislocalization did occur at the onset of the second saccade. |
Eckart Zimmermann; Ralph Weidner; Gereon R Fink Spatiotopic updating of visual feature information Journal Article Journal of Vision, 17 (12), pp. 6, 2017. @article{Zimmermann2017, title = {Spatiotopic updating of visual feature information}, author = {Eckart Zimmermann and Ralph Weidner and Gereon R Fink}, doi = {10.1167/17.12.6.doi}, year = {2017}, date = {2017-01-01}, journal = {Journal of Vision}, volume = {17}, number = {12}, pages = {6}, abstract = {Saccades shift the retina with high-speed motion. In order to compensate for the sudden displacement, the visuomotor system needs to combine saccade-related information and visual metrics. Many neurons in oculomotor but also in visual areas shift their receptive field shortly before the execution of a saccade (Duhamel, Colby, & Goldberg, 1992; Nakamura & Colby, 2002). These shifts supposedly enable the binding of information from before and after the saccade. It is a matter of current debate whether these shifts are merely location based (i.e., involve remapping of abstract spatial coordinates) or also comprise information about visual features. We have recently presented fMRI evidence for a feature-based remapping mechanism in visual areas V3, V4, and VO (Zimmermann, Weidner, Abdollahi, & Fink, 2016). In particular, we found fMRI adaptation in cortical regions representing a stimulus' retinotopic as well as its spatiotopic position. Here, we asked whether spatiotopic adaptation exists independently from retinotopic adaptation and which type of information is behaviorally more relevant after saccade execution. We first adapted at the saccade target location only and found a spatiotopic tilt aftereffect. Then, we simultaneously adapted both the fixation and the saccade target location but with opposite tilt orientations. As a result, adaptation from the fixation location was carried retinotopically to the saccade target position. The opposite tilt orientation at the retinotopic location altered the effects induced by spatiotopic adaptation. More precisely, it cancelled out spatiotopic adaptation at the saccade target location. We conclude that retinotopic and spatiotopic visual adaptation are independent effects.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Saccades shift the retina with high-speed motion. In order to compensate for the sudden displacement, the visuomotor system needs to combine saccade-related information and visual metrics. Many neurons in oculomotor but also in visual areas shift their receptive field shortly before the execution of a saccade (Duhamel, Colby, & Goldberg, 1992; Nakamura & Colby, 2002). These shifts supposedly enable the binding of information from before and after the saccade. It is a matter of current debate whether these shifts are merely location based (i.e., involve remapping of abstract spatial coordinates) or also comprise information about visual features. We have recently presented fMRI evidence for a feature-based remapping mechanism in visual areas V3, V4, and VO (Zimmermann, Weidner, Abdollahi, & Fink, 2016). In particular, we found fMRI adaptation in cortical regions representing a stimulus' retinotopic as well as its spatiotopic position. Here, we asked whether spatiotopic adaptation exists independently from retinotopic adaptation and which type of information is behaviorally more relevant after saccade execution. We first adapted at the saccade target location only and found a spatiotopic tilt aftereffect. Then, we simultaneously adapted both the fixation and the saccade target location but with opposite tilt orientations. As a result, adaptation from the fixation location was carried retinotopically to the saccade target position. The opposite tilt orientation at the retinotopic location altered the effects induced by spatiotopic adaptation. More precisely, it cancelled out spatiotopic adaptation at the saccade target location. We conclude that retinotopic and spatiotopic visual adaptation are independent effects. |
Josua Zimmermann; Dominik R Bach Impact of a reminder/extinction procedure on threat-conditioned pupil size and skin conductance responses Journal Article Learning & Memory, 27 (4), pp. 164–172, 2020. @article{Zimmermann2020b, title = {Impact of a reminder/extinction procedure on threat-conditioned pupil size and skin conductance responses}, author = {Josua Zimmermann and Dominik R Bach}, doi = {10.1101/lm.050211.119}, year = {2020}, date = {2020-01-01}, journal = {Learning & Memory}, volume = {27}, number = {4}, pages = {164--172}, abstract = {A reminder can render consolidated memory labile and susceptible to amnesic agents during a reconsolidation window. For the case of threat memory (also termed fear memory), it has been suggested that extinction training during this reconsolidation window has the same disruptive impact. This procedure could provide a powerful therapeutic principle for treatment of unwanted aversive memories. However, human research yielded contradictory results. Notably, all published positive replications quantified threat memory by conditioned skin conductance responses (SCR). Yet, other studies measuring SCR and/or fear-potentiated startle failed to observe an effect of a reminder/extinction procedure on the return of fear. Here we sought to shed light on this discrepancy by using a different autonomic response, namely, conditioned pupil dilation, in addition to SCR, in a replication of the original human study. N = 71 humans underwent a 3-d threat conditioning, reminder/extinction, and reinstatement, procedure with 2 CS+, of which one was reminded. Participants successfully learned the threat association on day 1, extinguished conditioned responding on day 2, and showed reinstatement on day 3. However, there was no difference in conditioned responding between the reminded and the nonreminded CS, neither in pupil size nor SCR. Thus, we found no evidence that a reminder trial before extinction prevents the return of threat-conditioned responding.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A reminder can render consolidated memory labile and susceptible to amnesic agents during a reconsolidation window. For the case of threat memory (also termed fear memory), it has been suggested that extinction training during this reconsolidation window has the same disruptive impact. This procedure could provide a powerful therapeutic principle for treatment of unwanted aversive memories. However, human research yielded contradictory results. Notably, all published positive replications quantified threat memory by conditioned skin conductance responses (SCR). Yet, other studies measuring SCR and/or fear-potentiated startle failed to observe an effect of a reminder/extinction procedure on the return of fear. Here we sought to shed light on this discrepancy by using a different autonomic response, namely, conditioned pupil dilation, in addition to SCR, in a replication of the original human study. N = 71 humans underwent a 3-d threat conditioning, reminder/extinction, and reinstatement, procedure with 2 CS+, of which one was reminded. Participants successfully learned the threat association on day 1, extinguished conditioned responding on day 2, and showed reinstatement on day 3. However, there was no difference in conditioned responding between the reminded and the nonreminded CS, neither in pupil size nor SCR. Thus, we found no evidence that a reminder trial before extinction prevents the return of threat-conditioned responding. |
Artyom Zinchenko; Markus Conci; Johannes Hauser; Hermann J Müller; Thomas Geyer Distributed attention beats the down-side of statistical context learning in visual search Journal Article Journal of Vision, 20 (7), pp. 1–14, 2020. @article{Zinchenko2020, title = {Distributed attention beats the down-side of statistical context learning in visual search}, author = {Artyom Zinchenko and Markus Conci and Johannes Hauser and Hermann J Müller and Thomas Geyer}, doi = {10.1167/JOV.20.7.4}, year = {2020}, date = {2020-01-01}, journal = {Journal of Vision}, volume = {20}, number = {7}, pages = {1--14}, abstract = {Learnt target-distractor contexts guide visual search. However, updating a previously acquired target-distractor memory subsequent to a change of the target location has been found to be rather inefficient and slow. These results show that the imperviousness of contextual memory to incorporating relocated targets is particularly pronounced when observers adopt a narrow focus of attention to perform a rather difficult form-conjunction search task. By contrast, when they adopt a broad attentional distribution, context-based memories can be updated more readily because this mode promotes the acquisition of more global contextual representations that continue to provide effective cues even after target relocation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Learnt target-distractor contexts guide visual search. However, updating a previously acquired target-distractor memory subsequent to a change of the target location has been found to be rather inefficient and slow. These results show that the imperviousness of contextual memory to incorporating relocated targets is particularly pronounced when observers adopt a narrow focus of attention to perform a rather difficult form-conjunction search task. By contrast, when they adopt a broad attentional distribution, context-based memories can be updated more readily because this mode promotes the acquisition of more global contextual representations that continue to provide effective cues even after target relocation. |
Artyom Zinchenko; Markus Conci; Thomas Töllner; Hermann J Müller; Thomas Geyer Automatic guidance (and misguidance) of visuospatial attention by acquired scene memory: Evidence from an N1pc polarity reversal Journal Article Psychological Science, 31 (12), pp. 1–13, 2020. @article{Zinchenko2020a, title = {Automatic guidance (and misguidance) of visuospatial attention by acquired scene memory: Evidence from an N1pc polarity reversal}, author = {Artyom Zinchenko and Markus Conci and Thomas Töllner and Hermann J Müller and Thomas Geyer}, doi = {10.1177/0956797620954815}, year = {2020}, date = {2020-01-01}, journal = {Psychological Science}, volume = {31}, number = {12}, pages = {1--13}, abstract = {Visual search is facilitated when the target is repeatedly encountered at a fixed position within an invariant (vs. randomly variable) distractor layout—that is, when the layout is learned and guides attention to the target, a phenomenon known as contextual cuing. Subsequently changing the target location within a learned layout abolishes contextual cuing, which is difficult to relearn. Here, we used lateralized event-related electroencephalogram (EEG) potentials to explore memory-based attentional guidance (N = 16). The results revealed reliable contextual cuing during initial learning and an associated EEG-amplitude increase for repeated layouts in attention-related components, starting with an early posterior negativity (N1pc, 80–180 ms). When the target was relocated to the opposite hemifield following learning, contextual cuing was effectively abolished, and the N1pc was reversed in polarity (indicative of persistent misguidance of attention to the original target location). Thus, once learned, repeated layouts trigger attentional-priority signals from memory that proactively interfere with contextual relearning after target relocation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Visual search is facilitated when the target is repeatedly encountered at a fixed position within an invariant (vs. randomly variable) distractor layout—that is, when the layout is learned and guides attention to the target, a phenomenon known as contextual cuing. Subsequently changing the target location within a learned layout abolishes contextual cuing, which is difficult to relearn. Here, we used lateralized event-related electroencephalogram (EEG) potentials to explore memory-based attentional guidance (N = 16). The results revealed reliable contextual cuing during initial learning and an associated EEG-amplitude increase for repeated layouts in attention-related components, starting with an early posterior negativity (N1pc, 80–180 ms). When the target was relocated to the opposite hemifield following learning, contextual cuing was effectively abolished, and the N1pc was reversed in polarity (indicative of persistent misguidance of attention to the original target location). Thus, once learned, repeated layouts trigger attentional-priority signals from memory that proactively interfere with contextual relearning after target relocation. |
Wieske van Zoest; Mieke Donk; Jan Theeuwes The role of stimulus-driven and goal-driven control in saccadic visual selection Journal Article Journal of Experimental Psychology: Human Perception and Performance, 30 (4), pp. 746–759, 2004. @article{Zoest2004, title = {The role of stimulus-driven and goal-driven control in saccadic visual selection}, author = {Wieske van Zoest and Mieke Donk and Jan Theeuwes}, doi = {10.1037/0096-1523.30.4.746}, year = {2004}, date = {2004-01-01}, journal = {Journal of Experimental Psychology: Human Perception and Performance}, volume = {30}, number = {4}, pages = {746--759}, abstract = {Four experiments were conducted to investigate the role of stimulus-driven and goal-driven control in saccadic eye movements. Participants were required to make a speeded saccade toward a predefined target presented concurrently with multiple nontargets and possibly 1 distractor. Target and distractor were either equally salient (Experiments 1 and 2) or not (Experiments 3 and 4). The results uniformly demonstrated that fast eye movements were completely stimulus driven, whereas slower eye movements were goal driven. These results are in line with neither a bottom-up account nor a top-down notion of visual selection. Instead, they indicate that visual selection is the outcome of 2 independent processes, one stimulus driven and the other goal driven, operating in different time windows.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Four experiments were conducted to investigate the role of stimulus-driven and goal-driven control in saccadic eye movements. Participants were required to make a speeded saccade toward a predefined target presented concurrently with multiple nontargets and possibly 1 distractor. Target and distractor were either equally salient (Experiments 1 and 2) or not (Experiments 3 and 4). The results uniformly demonstrated that fast eye movements were completely stimulus driven, whereas slower eye movements were goal driven. These results are in line with neither a bottom-up account nor a top-down notion of visual selection. Instead, they indicate that visual selection is the outcome of 2 independent processes, one stimulus driven and the other goal driven, operating in different time windows. |
Wieske van Zoest; Mieke Donk Saccadic target selection as a function of time Journal Article Spatial Vision, 19 (1), pp. 61–76, 2006. @article{Zoest2006, title = {Saccadic target selection as a function of time}, author = {Wieske van Zoest and Mieke Donk}, doi = {10.1007/s10530-005-5106-0}, year = {2006}, date = {2006-01-01}, journal = {Spatial Vision}, volume = {19}, number = {1}, pages = {61--76}, abstract = {Recent evidence indicates that stimulus-driven and goal-directed control of visual selection operate independently and in different time windows (van Zoest et al., 2004). The present study further investigates how eye movements are affected by stimulus-driven and goal-directed control. Observers were presented with search displays consisting of one target, multiple non-targets and one distractor element. The task of observers was to make a fast eye movement to a target immediately following the offset of a central fixation point, an event that either co-occurred with or soon followed the presentation of the search display. Distractor saliency and target-distractor similarity were independently manipulated. The results demonstrated that the effect of distractor saliency was transient and only present for the fastest eye movements, whereas the effect of target-distractor similarity was sustained and present in all but the fastest eye movements. The results support an independent timing account of visual selection.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent evidence indicates that stimulus-driven and goal-directed control of visual selection operate independently and in different time windows (van Zoest et al., 2004). The present study further investigates how eye movements are affected by stimulus-driven and goal-directed control. Observers were presented with search displays consisting of one target, multiple non-targets and one distractor element. The task of observers was to make a fast eye movement to a target immediately following the offset of a central fixation point, an event that either co-occurred with or soon followed the presentation of the search display. Distractor saliency and target-distractor similarity were independently manipulated. The results demonstrated that the effect of distractor saliency was transient and only present for the fastest eye movements, whereas the effect of target-distractor similarity was sustained and present in all but the fastest eye movements. The results support an independent timing account of visual selection. |
Wieske van Zoest; Amelia R Hunt Saccadic eye movements and perceptual judgments reveal a shared visual representation that is increasingly accurate over time Journal Article Vision Research, 51 (1), pp. 111–119, 2011. @article{Zoest2011, title = {Saccadic eye movements and perceptual judgments reveal a shared visual representation that is increasingly accurate over time}, author = {Wieske van Zoest and Amelia R Hunt}, doi = {10.1016/j.visres.2010.10.013}, year = {2011}, date = {2011-01-01}, journal = {Vision Research}, volume = {51}, number = {1}, pages = {111--119}, publisher = {Elsevier Ltd}, abstract = {Although there is evidence to suggest visual illusions affect perceptual judgments more than actions, many studies have failed to detect task-dependant dissociations. In two experiments we attempt to resolve the contradiction by exploring the time-course of visual illusion effects on both saccadic eye movements and perceptual judgments, using the Judd illusion. The results showed that, regardless of whether a saccadic response or a perceptual judgement was made, the illusory bias was larger when responses were based on less information, that is, when saccadic latencies were short, or display duration was brief. The time-course of the effect was similar for both the saccadic responses and perceptual judgements, suggesting that both modes may be driven by a shared visual representation. Changes in the strength of the illusion over time also highlight the importance of controlling for the latency of different response systems when evaluating possible dissociations between them.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Although there is evidence to suggest visual illusions affect perceptual judgments more than actions, many studies have failed to detect task-dependant dissociations. In two experiments we attempt to resolve the contradiction by exploring the time-course of visual illusion effects on both saccadic eye movements and perceptual judgments, using the Judd illusion. The results showed that, regardless of whether a saccadic response or a perceptual judgement was made, the illusory bias was larger when responses were based on less information, that is, when saccadic latencies were short, or display duration was brief. The time-course of the effect was similar for both the saccadic responses and perceptual judgements, suggesting that both modes may be driven by a shared visual representation. Changes in the strength of the illusion over time also highlight the importance of controlling for the latency of different response systems when evaluating possible dissociations between them. |
Wieske van Zoest; Mieke Donk; Stefan van der Stigchel Stimulus-salience and the time-course of saccade trajectory deviations Journal Article Journal of Vision, 12 (8), pp. 1–16, 2012. @article{Zoest2012, title = {Stimulus-salience and the time-course of saccade trajectory deviations}, author = {Wieske van Zoest and Mieke Donk and Stefan van der Stigchel}, doi = {10.1167/12.8.16}, year = {2012}, date = {2012-01-01}, journal = {Journal of Vision}, volume = {12}, number = {8}, pages = {1--16}, abstract = {The deviation of a saccade trajectory is a measure of the oculomotor competition evoked by a distractor. The aim of the present study was to investigate the impact of stimulus-salience on the time-course of saccade trajectory deviations to get a better insight into how stimulus-salience influences oculomotor competition over time. Two experiments were performed in which participants were required to make a vertical saccade to a target presented in an array of nontarget line elements and one additional distractor. The distractor varied in salience, where salience was defined by an orientation contrast relative to the surrounding nontargets. In Experiment 2, target-distractor similarity was additionally manipulated. In both Experiments 1 and 2, the results revealed that the eyes deviated towards the irrelevant distractor and did so more when the distractor was salient compared to when it was not salient. Critically, salience influenced performance only when people were fast to elicit an eye movement and had no effect when saccade latencies were long. Target-distractor similarity did not influence this pattern. These results show that the impact of salience in the visual system is transient.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The deviation of a saccade trajectory is a measure of the oculomotor competition evoked by a distractor. The aim of the present study was to investigate the impact of stimulus-salience on the time-course of saccade trajectory deviations to get a better insight into how stimulus-salience influences oculomotor competition over time. Two experiments were performed in which participants were required to make a vertical saccade to a target presented in an array of nontarget line elements and one additional distractor. The distractor varied in salience, where salience was defined by an orientation contrast relative to the surrounding nontargets. In Experiment 2, target-distractor similarity was additionally manipulated. In both Experiments 1 and 2, the results revealed that the eyes deviated towards the irrelevant distractor and did so more when the distractor was salient compared to when it was not salient. Critically, salience influenced performance only when people were fast to elicit an eye movement and had no effect when saccade latencies were long. Target-distractor similarity did not influence this pattern. These results show that the impact of salience in the visual system is transient. |
Wieske van Zoest; Dirk Kerzel The effects of saliency on manual reach trajectories and reach target selection Journal Article Vision Research, 113 , pp. 179–187, 2015. @article{Zoest2015, title = {The effects of saliency on manual reach trajectories and reach target selection}, author = {Wieske van Zoest and Dirk Kerzel}, doi = {10.1016/j.visres.2014.11.015}, year = {2015}, date = {2015-01-01}, journal = {Vision Research}, volume = {113}, pages = {179--187}, publisher = {Elsevier Ltd}, abstract = {Reaching trajectories curve toward salient distractors, reflecting the competing activation of reach plans toward target and distractor stimuli. We investigated whether the relative saliency of target and distractor influenced the curvature of the movement and the selection of the final endpoint of the reach. Participants were asked to reach a bar tilted to the right in a context of gray vertical bars. A bar tilted to the left served as distractor. Relative stimulus saliency was varied via color: either the distractor was red and the target was gray, or vice versa. Throughout, we observed that reach trajectories deviated toward the distractor. Surprisingly, relative saliency had no effect on the curvature of reach trajectories. Moreover, when we increased time pressure in separate experiments and analyzed the curvature as a function of reaction time, no influence of relative stimulus saliency was found, not even for the fastest reaction times. If anything, curvature decreased with strong time pressure. In contrast, reach target selection under strong time pressure was influenced by relative saliency: reaches with short reaction times were likely to go to the red distractor. The time course of reach target selection was comparable to saccadic target selection. Implications for the neural basis of trajectory deviations and target selection in manual and eye movements are discussed.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Reaching trajectories curve toward salient distractors, reflecting the competing activation of reach plans toward target and distractor stimuli. We investigated whether the relative saliency of target and distractor influenced the curvature of the movement and the selection of the final endpoint of the reach. Participants were asked to reach a bar tilted to the right in a context of gray vertical bars. A bar tilted to the left served as distractor. Relative stimulus saliency was varied via color: either the distractor was red and the target was gray, or vice versa. Throughout, we observed that reach trajectories deviated toward the distractor. Surprisingly, relative saliency had no effect on the curvature of reach trajectories. Moreover, when we increased time pressure in separate experiments and analyzed the curvature as a function of reaction time, no influence of relative stimulus saliency was found, not even for the fastest reaction times. If anything, curvature decreased with strong time pressure. In contrast, reach target selection under strong time pressure was influenced by relative saliency: reaches with short reaction times were likely to go to the red distractor. The time course of reach target selection was comparable to saccadic target selection. Implications for the neural basis of trajectory deviations and target selection in manual and eye movements are discussed. |
Wieske van Zoest; Benedetta Heimler; Francesco Pavani The oculomotor salience of flicker, apparent motion and continuous motion in saccade trajectories Journal Article Experimental Brain Research, 235 , pp. 181–191, 2017. @article{Zoest2017, title = {The oculomotor salience of flicker, apparent motion and continuous motion in saccade trajectories}, author = {Wieske van Zoest and Benedetta Heimler and Francesco Pavani}, doi = {10.1007/s00221-016-4779-1}, year = {2017}, date = {2017-01-01}, journal = {Experimental Brain Research}, volume = {235}, pages = {181--191}, publisher = {Springer Berlin Heidelberg}, abstract = {The aim of the present study was to investigate the impact of dynamic distractors on the time-course of oculomotor selection using saccade trajectory deviations. Participants were instructed to make a speeded eye move- ment (pro-saccade) to a target presented above or below the fixation point while an irrelevant distractor was presented. Four types of distractors were varied within participants: (1) static, (2) flicker, (3) rotating apparent motion and (4) continuous motion. The eccentricity of the distractor was varied between participants. The results showed that sac- cadic trajectories curved towards distractors presented near the vertical midline; no reliable deviation was found for distractors presented further away from the vertical mid- line. Differences between the flickering and rotating dis- tractor were found when distractor eccentricity was small and these specific effects developed over time such that there was a clear differentiation between saccadic deviation based on apparent motion for long-latency saccades, but not short-latency saccades. The present results suggest that the influence on performance of apparent motion stimuli is relatively delayed and acts in a more sustained manner compared to the influence of salient static, flickering and continuous moving stimuli.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The aim of the present study was to investigate the impact of dynamic distractors on the time-course of oculomotor selection using saccade trajectory deviations. Participants were instructed to make a speeded eye move- ment (pro-saccade) to a target presented above or below the fixation point while an irrelevant distractor was presented. Four types of distractors were varied within participants: (1) static, (2) flicker, (3) rotating apparent motion and (4) continuous motion. The eccentricity of the distractor was varied between participants. The results showed that sac- cadic trajectories curved towards distractors presented near the vertical midline; no reliable deviation was found for distractors presented further away from the vertical mid- line. Differences between the flickering and rotating dis- tractor were found when distractor eccentricity was small and these specific effects developed over time such that there was a clear differentiation between saccadic deviation based on apparent motion for long-latency saccades, but not short-latency saccades. The present results suggest that the influence on performance of apparent motion stimuli is relatively delayed and acts in a more sustained manner compared to the influence of salient static, flickering and continuous moving stimuli. |
Nahid Zokaei; Alexander G Board; Sanjay G Manohar; Anna C Nobre Modulation of the pupillary response by the content of visual working memory Journal Article Proceedings of the National Academy of Sciences, 115 (45), pp. 22802–22810, 2019. @article{Zokaei2019, title = {Modulation of the pupillary response by the content of visual working memory}, author = {Nahid Zokaei and Alexander G Board and Sanjay G Manohar and Anna C Nobre}, doi = {10.1073/pnas.1909959116}, year = {2019}, date = {2019-10-01}, journal = {Proceedings of the National Academy of Sciences}, volume = {115}, number = {45}, pages = {22802--22810}, abstract = {Studies of selective attention during perception have revealed modulation of the pupillary response according to the brightness of task-relevant (attended) vs. -irrelevant (unattended) stimuli within a visual display. As a strong test of top-down modulation of the pupil response by selective attention, we asked whether changes in pupil diameter follow internal shifts of attention to memoranda of visual stimuli of different brightness maintained in working memory, in the absence of any visual stimulation. Across 3 studies, we reveal dilation of the pupil when participants orient attention to the memorandum of a dark grating relative to that of a bright grating. The effect occurs even when the attention-orienting cue is independent of stimulus brightness, and even when stimulus brightness is merely incidental and not required for the working-memory task of judging stimulus orientation. Furthermore, relative dilation and constriction of the pupil occurred dynamically and followed the changing temporal expectation that 1 or the other stimulus would be probed across the retention delay. The results provide surprising and consistent evidence that pupil responses are under top-down control by cognitive factors, even when there is no direct adaptive gain for such modulation, since no visual stimuli were presented or anticipated. The results also strengthen the view of sensory recruitment during working memory, suggesting even activation of sensory receptors. The thought-provoking corollary to our findings is that the pupils provide a reliable measure of what is in the focus of mind, thus giving a different meaning to old proverbs about the eyes being a window to the mind.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Studies of selective attention during perception have revealed modulation of the pupillary response according to the brightness of task-relevant (attended) vs. -irrelevant (unattended) stimuli within a visual display. As a strong test of top-down modulation of the pupil response by selective attention, we asked whether changes in pupil diameter follow internal shifts of attention to memoranda of visual stimuli of different brightness maintained in working memory, in the absence of any visual stimulation. Across 3 studies, we reveal dilation of the pupil when participants orient attention to the memorandum of a dark grating relative to that of a bright grating. The effect occurs even when the attention-orienting cue is independent of stimulus brightness, and even when stimulus brightness is merely incidental and not required for the working-memory task of judging stimulus orientation. Furthermore, relative dilation and constriction of the pupil occurred dynamically and followed the changing temporal expectation that 1 or the other stimulus would be probed across the retention delay. The results provide surprising and consistent evidence that pupil responses are under top-down control by cognitive factors, even when there is no direct adaptive gain for such modulation, since no visual stimuli were presented or anticipated. The results also strengthen the view of sensory recruitment during working memory, suggesting even activation of sensory receptors. The thought-provoking corollary to our findings is that the pupils provide a reliable measure of what is in the focus of mind, thus giving a different meaning to old proverbs about the eyes being a window to the mind. |
Joshua Zonca; Giorgio Coricelli; Luca Polonio Gaze data reveal individual differences in relational representation processes Journal Article Journal of Experimental Psychology: Learning, Memory, and Cognition, 46 (2), pp. 257–279, 2020. @article{Zonca2020, title = {Gaze data reveal individual differences in relational representation processes}, author = {Joshua Zonca and Giorgio Coricelli and Luca Polonio}, doi = {10.1037/xlm0000723}, year = {2020}, date = {2020-01-01}, journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition}, volume = {46}, number = {2}, pages = {257--279}, publisher = {American Psychological Association Inc.}, abstract = {In our everyday life, we often need to anticipate the potential occurrence of events and their consequences. In this context, the way we represent contingencies can determine our ability to adapt to the environment. However, it is not clear how agents encode and organize available knowledge about the future to react to possible states of the world. In the present study, we investigated the process of contingency representation with three eye-tracking experiments. In Experiment 1, we introduced a novel relational-inference task in which participants had to learn and represent conditional rules regulating the occurrence of interdependent future events. A cluster analysis on early gaze data revealed the existence of 2 distinct types of encoders. A group of (sophisticated) participants built exhaustive contingency models that explicitly linked states with each of their potential consequences. Another group of (unsophisticated) participants simply learned binary conditional rules without exploring the underlying relational complexity. Analyses of individual cognitive measures revealed that cognitive reflection is associated with the emergence of either sophisticated or unsophisticated representation behavior. In Experiment 2, we observed that unsophisticated participants switched toward the sophisticated strategy after having received information about its existence, suggesting that representation behavior was modulated by strategy generation mechanisms. In Experiment 3, we showed that the heterogeneity in representation strategy emerges also in conditional reasoning with verbal sequences, indicating the existence of a general disposition in building either sophisticated or unsophisticated models of contingencies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In our everyday life, we often need to anticipate the potential occurrence of events and their consequences. In this context, the way we represent contingencies can determine our ability to adapt to the environment. However, it is not clear how agents encode and organize available knowledge about the future to react to possible states of the world. In the present study, we investigated the process of contingency representation with three eye-tracking experiments. In Experiment 1, we introduced a novel relational-inference task in which participants had to learn and represent conditional rules regulating the occurrence of interdependent future events. A cluster analysis on early gaze data revealed the existence of 2 distinct types of encoders. A group of (sophisticated) participants built exhaustive contingency models that explicitly linked states with each of their potential consequences. Another group of (unsophisticated) participants simply learned binary conditional rules without exploring the underlying relational complexity. Analyses of individual cognitive measures revealed that cognitive reflection is associated with the emergence of either sophisticated or unsophisticated representation behavior. In Experiment 2, we observed that unsophisticated participants switched toward the sophisticated strategy after having received information about its existence, suggesting that representation behavior was modulated by strategy generation mechanisms. In Experiment 3, we showed that the heterogeneity in representation strategy emerges also in conditional reasoning with verbal sequences, indicating the existence of a general disposition in building either sophisticated or unsophisticated models of contingencies. |
Regine Zopf; Marina Butko; Alexandra Woolgar; Mark A Williams; Anina N Rich Representing the location of manipulable objects in shape-selective occipitotemporal cortex: Beyond retinotopic reference frames? Journal Article Cortex, 106 , pp. 132–150, 2018. @article{Zopf2018, title = {Representing the location of manipulable objects in shape-selective occipitotemporal cortex: Beyond retinotopic reference frames?}, author = {Regine Zopf and Marina Butko and Alexandra Woolgar and Mark A Williams and Anina N Rich}, doi = {10.1016/j.cortex.2018.05.009}, year = {2018}, date = {2018-01-01}, journal = {Cortex}, volume = {106}, pages = {132--150}, abstract = {When interacting with objects, we have to represent their location relative to our bodies. To facilitate bodily reactions, location may be encoded in the brain not just with respect to the retina (retinotopic reference frame), but also in relation to the head, trunk or arm (collectively spatiotopic reference frames). While spatiotopic reference frames for location encoding can be found in brain areas for action planning, such as parietal areas, there is debate about the existence of spatiotopic reference frames in higher-level occipitotemporal visual areas. In an extensive multi-voxel pattern analysis (MVPA) fMRI study using faces, headless bodies and scenes stimuli, Golomb and Kanwisher (2012) did not find evidence for spatiotopic reference frames in shape-selective occipitotemporal cortex. This finding is important for theories of how stimulus location is encoded in the brain. It is possible, however, that their failure to find spatiotopic reference frames is related to their stimuli: we typically do not manipulate faces, headless bodies or scenes. It is plausible that we only represent body-centred location when viewing objects that are typically manipulated. Here, we tested for object location encoding in shape-selective occipitotemporal cortex using manipulable object stimuli (balls and cups) in a MVPA fMRI study. We employed Bayesian analyses to determine sample size and evaluate the sensitivity of our data to test the hypothesis that location can be encoded in a spatiotopic reference frame in shape-selective occipitotemporal cortex over the null hypothesis of no spatiotopic location encoding. We found strong evidence for retinotopic location encoding consistent with previous findings that retinotopic reference frames are common neural representations of object location. In contrast, when testing for spatiotopic encoding, we found evidence that object location information for small manipulable objects is not decodable in relation to the body in shape-selective occipitotemporal cortex. Post-hoc exploratory analyses suggested that spatiotopic aspects might modulate retinotopic location encoding.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When interacting with objects, we have to represent their location relative to our bodies. To facilitate bodily reactions, location may be encoded in the brain not just with respect to the retina (retinotopic reference frame), but also in relation to the head, trunk or arm (collectively spatiotopic reference frames). While spatiotopic reference frames for location encoding can be found in brain areas for action planning, such as parietal areas, there is debate about the existence of spatiotopic reference frames in higher-level occipitotemporal visual areas. In an extensive multi-voxel pattern analysis (MVPA) fMRI study using faces, headless bodies and scenes stimuli, Golomb and Kanwisher (2012) did not find evidence for spatiotopic reference frames in shape-selective occipitotemporal cortex. This finding is important for theories of how stimulus location is encoded in the brain. It is possible, however, that their failure to find spatiotopic reference frames is related to their stimuli: we typically do not manipulate faces, headless bodies or scenes. It is plausible that we only represent body-centred location when viewing objects that are typically manipulated. Here, we tested for object location encoding in shape-selective occipitotemporal cortex using manipulable object stimuli (balls and cups) in a MVPA fMRI study. We employed Bayesian analyses to determine sample size and evaluate the sensitivity of our data to test the hypothesis that location can be encoded in a spatiotopic reference frame in shape-selective occipitotemporal cortex over the null hypothesis of no spatiotopic location encoding. We found strong evidence for retinotopic location encoding consistent with previous findings that retinotopic reference frames are common neural representations of object location. In contrast, when testing for spatiotopic encoding, we found evidence that object location information for small manipulable objects is not decodable in relation to the body in shape-selective occipitotemporal cortex. Post-hoc exploratory analyses suggested that spatiotopic aspects might modulate retinotopic location encoding. |
Eirini Zormpa; Antje S Meyer; Laurel E Brehm Slow naming of pictures facilitates memory for their names Journal Article Psychonomic Bulletin & Review, 26 , pp. 1675–1682, 2019. @article{Zormpa2019, title = {Slow naming of pictures facilitates memory for their names}, author = {Eirini Zormpa and Antje S Meyer and Laurel E Brehm}, doi = {10.3758/s13423-019-01620-x}, year = {2019}, date = {2019-01-01}, journal = {Psychonomic Bulletin & Review}, volume = {26}, pages = {1675--1682}, publisher = {Psychonomic Bulletin & Review}, abstract = {Speakers remember their own utterances better than those of their interlocutors, suggesting that language production is beneficial to memory. This may be partly explained by a generation effect: The act of generating a word is known to lead to a memory advantage (Slamecka & Graf, 1978). In earlier work, we showed a generation effect for recognition of images (Zormpa, Brehm, Hoedemaker, & Meyer, 2019). Here, we tested whether the recognition of their names would also benefit from name generation. Testing whether picture naming improves memory for words was our primary aim, as it serves to clarify whether the representations affected by generation are visual or conceptual/lexical. A secondary aim was to assess the influence of processing time on memory. Fifty-one participants named pictures in three conditions: after hearing the picture name (identity condition), backward speech, or an unrelated word. A day later, recognition memory was tested in a yes/no task. Memory in the backward speech and unrelated conditions, which required generation, was superior to memory in the identity condition, which did not require generation. The time taken by participants for naming was a good predictor of memory, such that words that took longer to be retrieved were remembered better. Importantly, that was the case only when generation was required: In the no-generation (identity) condition, processing time was not related to recognition memory performance. This work has shown that generation affects conceptual/lexical representations, making an important contribution to the understanding of the relationship between memory and language.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Speakers remember their own utterances better than those of their interlocutors, suggesting that language production is beneficial to memory. This may be partly explained by a generation effect: The act of generating a word is known to lead to a memory advantage (Slamecka & Graf, 1978). In earlier work, we showed a generation effect for recognition of images (Zormpa, Brehm, Hoedemaker, & Meyer, 2019). Here, we tested whether the recognition of their names would also benefit from name generation. Testing whether picture naming improves memory for words was our primary aim, as it serves to clarify whether the representations affected by generation are visual or conceptual/lexical. A secondary aim was to assess the influence of processing time on memory. Fifty-one participants named pictures in three conditions: after hearing the picture name (identity condition), backward speech, or an unrelated word. A day later, recognition memory was tested in a yes/no task. Memory in the backward speech and unrelated conditions, which required generation, was superior to memory in the identity condition, which did not require generation. The time taken by participants for naming was a good predictor of memory, such that words that took longer to be retrieved were remembered better. Importantly, that was the case only when generation was required: In the no-generation (identity) condition, processing time was not related to recognition memory performance. This work has shown that generation affects conceptual/lexical representations, making an important contribution to the understanding of the relationship between memory and language. |
Heng Zou; Hermann J Muller; Zhuanghua Shi Non-spatial sounds regulate eye movements and enhance visual search Journal Article Journal of Vision, 12 (5), pp. 2–2, 2012. @article{Zou2012, title = {Non-spatial sounds regulate eye movements and enhance visual search}, author = {Heng Zou and Hermann J Muller and Zhuanghua Shi}, doi = {10.1167/12.5.2}, year = {2012}, date = {2012-01-01}, journal = {Journal of Vision}, volume = {12}, number = {5}, pages = {2--2}, abstract = {Spatially uninformative sounds can enhance visual search when the sounds are synchronized with color changes of the visual target, a phenomenon referred to as "pip-and-pop" effect (van der Burg, Olivers, Bronkhorst, & Theeuwes, 2008). The present study investigated the relationship of this effect to changes in oculomotor scanning behavior induced by the sounds. The results revealed sound events to increase fixation durations upon their occurrence and to decrease the mean number of saccades. More specifically, spatially uninformative sounds facilitated the orientation of ocular scanning away from already scanned display regions not containing a target (Experiment 1) and enhanced search performance even on target-absent trials (Experiment 2). Facilitation was also observed when the sounds were presented 100 ms prior to the target or at random (Experiment 3). These findings suggest that non-spatial sounds cause a general freezing effect on oculomotor scanning behavior, an effect which in turn benefits visual search performance by temporally and spatially extended information sampling.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Spatially uninformative sounds can enhance visual search when the sounds are synchronized with color changes of the visual target, a phenomenon referred to as "pip-and-pop" effect (van der Burg, Olivers, Bronkhorst, & Theeuwes, 2008). The present study investigated the relationship of this effect to changes in oculomotor scanning behavior induced by the sounds. The results revealed sound events to increase fixation durations upon their occurrence and to decrease the mean number of saccades. More specifically, spatially uninformative sounds facilitated the orientation of ocular scanning away from already scanned display regions not containing a target (Experiment 1) and enhanced search performance even on target-absent trials (Experiment 2). Facilitation was also observed when the sounds were presented 100 ms prior to the target or at random (Experiment 3). These findings suggest that non-spatial sounds cause a general freezing effect on oculomotor scanning behavior, an effect which in turn benefits visual search performance by temporally and spatially extended information sampling. |
Tianlong Zu; John Hutson; Lester C Loschky; Sanjay N Rebello Using eye movements to measure intrinsic, extraneous, and germane load in a multimedia learning environment Journal Article Journal of Educational Psychology, 112 (7), pp. 1338–1352, 2020. @article{Zu2020, title = {Using eye movements to measure intrinsic, extraneous, and germane load in a multimedia learning environment}, author = {Tianlong Zu and John Hutson and Lester C Loschky and Sanjay N Rebello}, doi = {10.1037/edu0000441}, year = {2020}, date = {2020-01-01}, journal = {Journal of Educational Psychology}, volume = {112}, number = {7}, pages = {1338--1352}, abstract = {In a previous study, DeLeeuw and Mayer (2008) found support for the triarchic model of cognitive load (Sweller, Van Merrienboer, & Paas, 1998, 2019) by showing that three different metrics could be used to independently measure 3 hypothesized types of cognitive load: intrinsic, extraneous, and germane. However, 2 of the 3 metrics that the authors used were intrusive in nature because learning had to be stopped momentarily to complete the measures. The current study extends the design of DeLeeuw and Mayer (2008) by investigating whether learners' eye movement behavior can be used to measure the three proposed types of cognitive load without interrupting learning. During a 1-hr experiment, we presented a multimedia lesson explaining the mechanism of electric motors to participants who had low prior knowledge of this topic. First, we replicated the main results of DeLeeuw and Mayer (2008), providing further support for the triarchic structure of cognitive load. Second, we identified eye movement measures that differentiated the three types of cognitive load. These findings were independent of participants' working memory capacity. Together, these results provide further evidence for the triarchic nature of cognitive load (Sweller et al., 1998, 2019), and are a first step toward online measures of cognitive load that could potentially be implemented into computer assisted learning technologies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In a previous study, DeLeeuw and Mayer (2008) found support for the triarchic model of cognitive load (Sweller, Van Merrienboer, & Paas, 1998, 2019) by showing that three different metrics could be used to independently measure 3 hypothesized types of cognitive load: intrinsic, extraneous, and germane. However, 2 of the 3 metrics that the authors used were intrusive in nature because learning had to be stopped momentarily to complete the measures. The current study extends the design of DeLeeuw and Mayer (2008) by investigating whether learners' eye movement behavior can be used to measure the three proposed types of cognitive load without interrupting learning. During a 1-hr experiment, we presented a multimedia lesson explaining the mechanism of electric motors to participants who had low prior knowledge of this topic. First, we replicated the main results of DeLeeuw and Mayer (2008), providing further support for the triarchic structure of cognitive load. Second, we identified eye movement measures that differentiated the three types of cognitive load. These findings were independent of participants' working memory capacity. Together, these results provide further evidence for the triarchic nature of cognitive load (Sweller et al., 1998, 2019), and are a first step toward online measures of cognitive load that could potentially be implemented into computer assisted learning technologies. |
Wietske Zuiderbaan; Ben M Harvey; Serge O Dumoulin Modeling center – surround configurations in population receptive fields using fMRI Journal Article Journal of Vision, 12 (3), pp. 1–15, 2012. @article{Zuiderbaan2012, title = {Modeling center – surround configurations in population receptive fields using fMRI}, author = {Wietske Zuiderbaan and Ben M Harvey and Serge O Dumoulin}, doi = {10.1167/12.3.10.Introduction}, year = {2012}, date = {2012-01-01}, journal = {Journal of Vision}, volume = {12}, number = {3}, pages = {1--15}, abstract = {Antagonistic center–surround configurations are a central organizational principle of our visual system. In visual cortex, stimulation outside the classical receptive field can decrease neural activity and also decrease functional Magnetic Resonance Imaging (fMRI) signal amplitudes. Decreased fMRI amplitudes below baseline—0% contrast—are often referred to as “negative” responses. Using neural model-based fMRI data analyses, we can estimate the region of visual space to which each cortical location responds, i.e., the population receptive field (pRF). Current models of the pRF do not account for a center–surround organization or negative fMRI responses. Here, we extend the pRF model by adding surround suppression. Where the conventional model uses a circular symmetric Gaussian function to describe the pRF, the new model uses a circular symmetric difference-of-Gaussians (DoG) function. The DoG model allows the pRF analysis to capture fMRI signals below baseline and surround suppression. Comparing the fits of the models, an increased variance explained is found for the DoG model. This improvement was predominantly present in V1/2/3 and decreased in later visual areas. The improvement of the fits was particularly striking in the parts of the fMRI signal below baseline. Estimates for the surround size of the pRF show an increase with eccentricity and over visual areas V1/2/3. For the suppression index, which is based on the ratio between the volumes of both Gaussians, we show a decrease over visual areas V1 and V2. Using non-invasive fMRI techniques, this method gives the possibility to examine assumptions about center–surround receptive fields in human subjects.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Antagonistic center–surround configurations are a central organizational principle of our visual system. In visual cortex, stimulation outside the classical receptive field can decrease neural activity and also decrease functional Magnetic Resonance Imaging (fMRI) signal amplitudes. Decreased fMRI amplitudes below baseline—0% contrast—are often referred to as “negative” responses. Using neural model-based fMRI data analyses, we can estimate the region of visual space to which each cortical location responds, i.e., the population receptive field (pRF). Current models of the pRF do not account for a center–surround organization or negative fMRI responses. Here, we extend the pRF model by adding surround suppression. Where the conventional model uses a circular symmetric Gaussian function to describe the pRF, the new model uses a circular symmetric difference-of-Gaussians (DoG) function. The DoG model allows the pRF analysis to capture fMRI signals below baseline and surround suppression. Comparing the fits of the models, an increased variance explained is found for the DoG model. This improvement was predominantly present in V1/2/3 and decreased in later visual areas. The improvement of the fits was particularly striking in the parts of the fMRI signal below baseline. Estimates for the surround size of the pRF show an increase with eccentricity and over visual areas V1/2/3. For the suppression index, which is based on the ratio between the volumes of both Gaussians, we show a decrease over visual areas V1 and V2. Using non-invasive fMRI techniques, this method gives the possibility to examine assumptions about center–surround receptive fields in human subjects. |
Jan Zwickel; Hermann J Müller Eye movements as a means to evaluate and improve robots Journal Article International Journal of Social Robotics, 1 (4), pp. 357–366, 2009. @article{Zwickel2009, title = {Eye movements as a means to evaluate and improve robots}, author = {Jan Zwickel and Hermann J Müller}, doi = {10.1007/s12369-009-0033-3}, year = {2009}, date = {2009-01-01}, journal = {International Journal of Social Robotics}, volume = {1}, number = {4}, pages = {357--366}, abstract = {Abstract With an increase in their capabilities, robots start to play a role in everyday settings. This necessitates a step from a robot-centered (i.e., teaching humans to adapt to robots) to a more human-centered approach (where robots integrate naturally into human activities). Achieving this will increase the effectiveness of robot usage (e.g., shortening the time required for learning), reduce errors, and increase user acceptance. Robotic camera control will play an important role for a more natural and easier-to-interpret behavior, owing to the central importance of gaze in human communication. This study is intended to provide a first step towards improving camera control by a better understanding of human gaze behavior in social situations. To this end, we registered the eye movements of humans watching different types of movies. In all movies, the same two triangles moved around in a self-propelled fashion. However, crucially, some of the movies elicited the attribution of mental states to the triangles, while others did not. This permitted us to directly distinguish eye movement patterns relating to the attribution of mental states in (perceived) social situations, from the patterns in non-social situations. We argue that a better understanding of what characterizes human gaze patterns in social situations will help shape robotic behavior, make it more natural for humans to communicate with robots, and establish joint attention (to certain objects) between humans and robots. In addition, a better understanding of human gaze in social situations will provide a measure for evaluating whether robots are perceived as social agents rather than non-intentional machines. This could help decide which behaviors a robot should display in order to be perceived as a social interaction partner.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Abstract With an increase in their capabilities, robots start to play a role in everyday settings. This necessitates a step from a robot-centered (i.e., teaching humans to adapt to robots) to a more human-centered approach (where robots integrate naturally into human activities). Achieving this will increase the effectiveness of robot usage (e.g., shortening the time required for learning), reduce errors, and increase user acceptance. Robotic camera control will play an important role for a more natural and easier-to-interpret behavior, owing to the central importance of gaze in human communication. This study is intended to provide a first step towards improving camera control by a better understanding of human gaze behavior in social situations. To this end, we registered the eye movements of humans watching different types of movies. In all movies, the same two triangles moved around in a self-propelled fashion. However, crucially, some of the movies elicited the attribution of mental states to the triangles, while others did not. This permitted us to directly distinguish eye movement patterns relating to the attribution of mental states in (perceived) social situations, from the patterns in non-social situations. We argue that a better understanding of what characterizes human gaze patterns in social situations will help shape robotic behavior, make it more natural for humans to communicate with robots, and establish joint attention (to certain objects) between humans and robots. In addition, a better understanding of human gaze in social situations will provide a measure for evaluating whether robots are perceived as social agents rather than non-intentional machines. This could help decide which behaviors a robot should display in order to be perceived as a social interaction partner. |
Jan Zwickel; Melissa L -H Võ How the presence of persons biases eye movements Journal Article Psychonomic Bulletin & Review, 17 (2), pp. 257–262, 2010. @article{Zwickel2010, title = {How the presence of persons biases eye movements}, author = {Jan Zwickel and Melissa L -H V{õ}}, doi = {10.3758/PBR.17.2.257}, year = {2010}, date = {2010-01-01}, journal = {Psychonomic Bulletin & Review}, volume = {17}, number = {2}, pages = {257--262}, abstract = {We investigated modulation of gaze behavior of observers viewing complex scenes that included a person. To assess spontaneous orientation-following, and in contrast to earlier studies, we did not make the person salient via instruction or low-level saliency. Still, objects that were referred to by the orientation of the person were visited earlier, more often, and longer than when they were not referred to. Analysis of fixation sequences showed that the number of saccades to the cued and uncued objects differed only for saccades that started from the head region, but not for saccades starting from a control object or from a body region. We therefore argue that viewing a person leads to an increase in spontaneous following of the person's viewing direction even when the person plays no role in scene understanding and is not made prominent.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We investigated modulation of gaze behavior of observers viewing complex scenes that included a person. To assess spontaneous orientation-following, and in contrast to earlier studies, we did not make the person salient via instruction or low-level saliency. Still, objects that were referred to by the orientation of the person were visited earlier, more often, and longer than when they were not referred to. Analysis of fixation sequences showed that the number of saccades to the cued and uncued objects differed only for saccades that started from the head region, but not for saccades starting from a control object or from a body region. We therefore argue that viewing a person leads to an increase in spontaneous following of the person's viewing direction even when the person plays no role in scene understanding and is not made prominent. |
Jan Zwickel; Mathias Hegele; Marc Grosjean Ocular tracking of biological and nonbiological motion: The effect of instructed agency Journal Article Psychonomic Bulletin & Review, 19 (1), pp. 52–57, 2012. @article{Zwickel2012, title = {Ocular tracking of biological and nonbiological motion: The effect of instructed agency}, author = {Jan Zwickel and Mathias Hegele and Marc Grosjean}, doi = {10.3758/s13423-011-0193-7}, year = {2012}, date = {2012-01-01}, journal = {Psychonomic Bulletin & Review}, volume = {19}, number = {1}, pages = {52--57}, abstract = {Recent findings suggest that visuomotor performance is modulated by people's beliefs about the agency (e.g., animate vs. inanimate) behind the events they perceive. This study investigated the effect of instructed agency on ocular tracking of point-light motions with biological and nonbiological velocity profiles. The motions followed either a relatively simple (ellipse) or a more complex (scribble) trajectory, and agency was manipulated by informing the participants that the motions they saw were either human or computer generated. In line with previous findings, tracking performance was better for biological than for nonbiological motions, and this effect was particularly pronounced for the simpler (elliptical) motions. The biological advantage was also larger for the human than for the computer instruction condition, but only for a measure that captured the predictive component of smooth pursuit. These results suggest that ocular tracking is influenced by the internal forward model people choose to adopt.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Recent findings suggest that visuomotor performance is modulated by people's beliefs about the agency (e.g., animate vs. inanimate) behind the events they perceive. This study investigated the effect of instructed agency on ocular tracking of point-light motions with biological and nonbiological velocity profiles. The motions followed either a relatively simple (ellipse) or a more complex (scribble) trajectory, and agency was manipulated by informing the participants that the motions they saw were either human or computer generated. In line with previous findings, tracking performance was better for biological than for nonbiological motions, and this effect was particularly pronounced for the simpler (elliptical) motions. The biological advantage was also larger for the human than for the computer instruction condition, but only for a measure that captured the predictive component of smooth pursuit. These results suggest that ocular tracking is influenced by the internal forward model people choose to adopt. |
Ariel Zylberberg; Pablo Barttfeld; Mariano Sigman The construction of confidence in a perceptual decision Journal Article Frontiers in Integrative Neuroscience, 6 (September), pp. 1–10, 2012. @article{Zylberberg2012, title = {The construction of confidence in a perceptual decision}, author = {Ariel Zylberberg and Pablo Barttfeld and Mariano Sigman}, doi = {10.3389/fnint.2012.00079}, year = {2012}, date = {2012-01-01}, journal = {Frontiers in Integrative Neuroscience}, volume = {6}, number = {September}, pages = {1--10}, abstract = {Decision-making involves the selection of one out of many possible courses of action. A decision may bear on other decisions, as when humans seek a second medical opinion before undergoing a risky surgical intervention. These "meta-decisions" are mediated by confidence judgments-the degree to which decision-makers consider that a choice is likely to be correct. We studied how subjective confidence is constructed from noisy sensory evidence. The psychophysical kernels used to convert sensory information into choice and confidence decisions were precisely reconstructed measuring the impact of small fluctuations in sensory input. This is shown in two independent experiments in which human participants made a decision about the direction of motion of a set of randomly moving dots, or compared the brightness of a group of fluctuating bars, followed by a confidence report. The results of both experiments converged to show that: (1) confidence was influenced by evidence during a short window of time at the initial moments of the decision, and (2) confidence was influenced by evidence for the selected choice but was virtually blind to evidence for the non-selected choice. Our findings challenge classical models of subjective confidence-which posit that the difference of evidence in favor of each choice is the seed of the confidence signal.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Decision-making involves the selection of one out of many possible courses of action. A decision may bear on other decisions, as when humans seek a second medical opinion before undergoing a risky surgical intervention. These "meta-decisions" are mediated by confidence judgments-the degree to which decision-makers consider that a choice is likely to be correct. We studied how subjective confidence is constructed from noisy sensory evidence. The psychophysical kernels used to convert sensory information into choice and confidence decisions were precisely reconstructed measuring the impact of small fluctuations in sensory input. This is shown in two independent experiments in which human participants made a decision about the direction of motion of a set of randomly moving dots, or compared the brightness of a group of fluctuating bars, followed by a confidence report. The results of both experiments converged to show that: (1) confidence was influenced by evidence during a short window of time at the initial moments of the decision, and (2) confidence was influenced by evidence for the selected choice but was virtually blind to evidence for the non-selected choice. Our findings challenge classical models of subjective confidence-which posit that the difference of evidence in favor of each choice is the seed of the confidence signal. |
Ariel Zylberberg; Manuel Oliva; Mariano Sigman Pupil dilation: A fingerprint of temporal selection during the "Attentional Blink" Journal Article Frontiers in Psychology, 3 (AUG), pp. 1–6, 2012. @article{Zylberberg2012a, title = {Pupil dilation: A fingerprint of temporal selection during the "Attentional Blink"}, author = {Ariel Zylberberg and Manuel Oliva and Mariano Sigman}, doi = {10.3389/fpsyg.2012.00316}, year = {2012}, date = {2012-01-01}, journal = {Frontiers in Psychology}, volume = {3}, number = {AUG}, pages = {1--6}, abstract = {Pupil dilation indexes cognitive events of behavioral relevance, like the storage of information to memory and the deployment of attention. Yet, given the slow temporal response of the pupil dilation, it is not known from previous studies whether the pupil can index cognitive events in the short time scale of ∼100 ms. Here we measured the size of the pupil in the Attentional Blink (AB) experiment, a classic demonstration of attentional limitations in processing rapidly presented stimuli. In the AB, two targets embedded in a sequence have to be reported and the second stimulus is often missed if presented between 200 and 500 ms after the first. We show that pupil dilation can be used as a marker of cognitive processing in AB, revealing both the timing and amount of cognitive processing. Specifically, we found that in the time range where the AB is known to occur: (i) the pupil dilation was delayed, mimicking the pattern of response times in the Psychological Refractory Period (PRP) paradigm, (ii) the amplitude of the pupil was reduced relative to that of larger lags, even for correctly identified targets, and (iii) the amplitude of the pupil was smaller for missed than for correctly reported targets. These results support two-stage theories of the Attentional Blink where a second processing stage is delayed inside the interference regime, and indicate that the pupil dilation can be used as a marker of cognitive processing in the time scale of ∼100 ms. Furthermore, given the known relation between the pupil dilation and the activity of the locus coeruleus, our results also support theories that link the serial stage to the action of a specific neuromodulator, norepinephrine.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Pupil dilation indexes cognitive events of behavioral relevance, like the storage of information to memory and the deployment of attention. Yet, given the slow temporal response of the pupil dilation, it is not known from previous studies whether the pupil can index cognitive events in the short time scale of ∼100 ms. Here we measured the size of the pupil in the Attentional Blink (AB) experiment, a classic demonstration of attentional limitations in processing rapidly presented stimuli. In the AB, two targets embedded in a sequence have to be reported and the second stimulus is often missed if presented between 200 and 500 ms after the first. We show that pupil dilation can be used as a marker of cognitive processing in AB, revealing both the timing and amount of cognitive processing. Specifically, we found that in the time range where the AB is known to occur: (i) the pupil dilation was delayed, mimicking the pattern of response times in the Psychological Refractory Period (PRP) paradigm, (ii) the amplitude of the pupil was reduced relative to that of larger lags, even for correctly identified targets, and (iii) the amplitude of the pupil was smaller for missed than for correctly reported targets. These results support two-stage theories of the Attentional Blink where a second processing stage is delayed inside the interference regime, and indicate that the pupil dilation can be used as a marker of cognitive processing in the time scale of ∼100 ms. Furthermore, given the known relation between the pupil dilation and the activity of the locus coeruleus, our results also support theories that link the serial stage to the action of a specific neuromodulator, norepinephrine. |
Ariel Zylberberg; Daniel M Wolpert; Michael N Shadlen Counterfactual reasoning underlies the learning of priors in decision making Journal Article Neuron, 99 (5), pp. 1083–1097, 2018. @article{Zylberberg2018, title = {Counterfactual reasoning underlies the learning of priors in decision making}, author = {Ariel Zylberberg and Daniel M Wolpert and Michael N Shadlen}, doi = {10.1016/j.neuron.2018.07.035}, year = {2018}, date = {2018-01-01}, journal = {Neuron}, volume = {99}, number = {5}, pages = {1083--1097}, publisher = {The Authors}, abstract = {Accurate decisions require knowledge of prior probabilities (e.g., prevalence or base rate), but it is unclear how prior probabilities are learned in the absence of a teacher. We hypothesized that humans could learn base rates from experience making decisions, even without feedback. Participants made difficult decisions about the direction of dynamic random dot motion. Across blocks of 15–42 trials, the base rate favoring left or right varied. Participants were not informed of the base rate or choice accuracy, yet they gradually biased their choices and thereby increased accuracy and confidence in their decisions. They achieved this by updating knowledge of base rate after each decision, using a counterfactual representation of confidence that simulates a neutral prior. The strategy is consistent with Bayesian updating of belief and suggests that humans represent both true confidence, which incorporates the evolving belief of the prior, and counterfactual confidence, which discounts the prior. Zylberberg et al. show that human decision makers can learn environmental biases from sequences of difficult decisions, without feedback about accuracy, by calculating the belief that the decisions would have been correct in an unbiased environment—a form of counterfactual confidence.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Accurate decisions require knowledge of prior probabilities (e.g., prevalence or base rate), but it is unclear how prior probabilities are learned in the absence of a teacher. We hypothesized that humans could learn base rates from experience making decisions, even without feedback. Participants made difficult decisions about the direction of dynamic random dot motion. Across blocks of 15–42 trials, the base rate favoring left or right varied. Participants were not informed of the base rate or choice accuracy, yet they gradually biased their choices and thereby increased accuracy and confidence in their decisions. They achieved this by updating knowledge of base rate after each decision, using a counterfactual representation of confidence that simulates a neutral prior. The strategy is consistent with Bayesian updating of belief and suggests that humans represent both true confidence, which incorporates the evolving belief of the prior, and counterfactual confidence, which discounts the prior. Zylberberg et al. show that human decision makers can learn environmental biases from sequences of difficult decisions, without feedback about accuracy, by calculating the belief that the decisions would have been correct in an unbiased environment—a form of counterfactual confidence. |