-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGCD.bib
890 lines (887 loc) · 73.3 KB
/
GCD.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
Automatically generated by Mendeley Desktop 1.18
Any changes to this file will be lost if it is regenerated by Mendeley.
BibTeX export options can be customized via Options -> BibTeX in Mendeley Desktop
@article{Church2014,
author = {Church, R. Breckinridge and Kelly, Spencer and Holcombe, David},
doi = {10.1080/01690965.2013.857783},
file = {:home/josiah/Documents/Mendeley Desktop/Church, Kelly, Holcombe - 2014 - Temporal synchrony between speech, action and gesture during language production.pdf:pdf},
issn = {2327-3798},
journal = {Language, Cognition and Neuroscience},
keywords = {Action,Gesture,Language,Speech,Synchrony},
month = {mar},
number = {3},
pages = {345--354},
publisher = {Taylor {\&} Francis},
title = {{Temporal synchrony between speech, action and gesture during language production}},
url = {http://dx.doi.org/10.1080/01690965.2013.857783 http://www.tandfonline.com/doi/abs/10.1080/01690965.2013.857783},
volume = {29},
year = {2014}
}
@article{DePaulo1992,
author = {DePaulo, Bella M.},
doi = {10.1037/0033-2909.111.2.203},
file = {:home/josiah/Documents/Mendeley Desktop/DePaulo - 1992 - Nonverbal behavior and self-presentation.pdf:pdf},
isbn = {0033-2909$\backslash$r1939-1455},
issn = {1939-1455},
journal = {Psychological Bulletin},
keywords = {deception},
mendeley-tags = {deception},
number = {2},
pages = {203--243},
pmid = {1557474},
title = {{Nonverbal behavior and self-presentation.}},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/0033-2909.111.2.203},
volume = {111},
year = {1992}
}
@article{Vrij2004,
abstract = {Vrij, Aldert Edward, Katherine Roberts, Kim P Bull, Ray Detecting Deceit via Analysis of Verbal and Nonverbal Behavior},
author = {Vrij, Aldert and Akehurst, Lucy and Soukara, Stavroula and Bull, Ray},
doi = {10.1111/j.1468-2958.2004.tb00723.x},
file = {:home/josiah/Documents/Mendeley Desktop/Vrij et al. - 2004 - Detecting Deceit Via Analyses of Verbal and Nonverbal Behavior in Children and Adults.pdf:pdf},
isbn = {0191-5886},
issn = {0360-3989},
journal = {Human Communication Research},
keywords = {1,by observing how,criteria-based content anal-,detecting deceit,in principle,liars,nonverbal behavior,reality monitoring,smile or show,the movements they make,there are,they behave,three ways to catch,whether or not they,ysis},
month = {jan},
number = {1},
pages = {8--41},
title = {{Detecting Deceit Via Analyses of Verbal and Nonverbal Behavior in Children and Adults}},
url = {https://academic.oup.com/hcr/article/30/1/8-41/4331442},
volume = {30},
year = {2004}
}
@article{Habets2013,
author = {Habets, Boukje and Kita, Sotaro and Shao, Zeshu and {\"{O}}zyurek, Asli and Hagoort, Peter},
doi = {10.1162/jocn.2010.21462},
file = {:home/josiah/Documents/Mendeley Desktop/Habets et al. - 2011 - The Role of Synchrony and Ambiguity in Speech–Gesture Integration during Comprehension.pdf:pdf},
issn = {0898-929X},
journal = {Journal of Cognitive Neuroscience},
month = {aug},
number = {8},
pages = {1845--1854},
title = {{The Role of Synchrony and Ambiguity in Speech–Gesture Integration during Comprehension}},
url = {http://www.mitpressjournals.org/doi/10.1162/jocn.2010.21462},
volume = {23},
year = {2011}
}
@article{Vrij1995,
abstract = {Nonverbal indicators of deception in a simulated police interview were examined, particularly the relationship between arm and hand movements and decep-tion, an area in which conflicting results have previously been found. Therefore, in this study, I adopted a more detailed scoring system of arm and hand movements and introduced a baseline interview. Results showed a decrease in subtle hand and finger movements during deception, and thus strongly supported the assumption that sub-jects try to control their behavior during deception but that this attempted control is only partly successful.},
author = {Vrij, Aldert},
doi = {10.1080/00223980.1995.9914944},
file = {:home/josiah/Documents/Mendeley Desktop/Vrij - 1995 - Behavioral Correlates of Deception in a Simulated Police Interview.pdf:pdf},
isbn = {0022-3980},
issn = {0022-3980},
journal = {The Journal of Psychology},
keywords = {deception},
mendeley-tags = {deception},
month = {jan},
number = {1},
pages = {15--28},
title = {{Behavioral Correlates of Deception in a Simulated Police Interview}},
url = {http://www.tandfonline.com/doi/abs/10.1080/00223980.1995.9914944},
volume = {129},
year = {1995}
}
@article{Goldin-Meadow2001,
abstract = {Why is it that people cannot keep their hands still when they talk? One reason may be that gesturing actually lightens cognitive load while a person is thinking of what to say. We asked adults and children to remember a list of letters or words while explaining how they solved a math problem. Both groups remembered significantly more items when they gestured during their math explanations than when they did not gesture. Gesturing appeared to save the speakers' cognitive resources on the explanation task, permitting the speakers to allocate more resources to the memory task. It is widely accepted that gesturing reflects a speaker's cognitive state, but our observations suggest that, by reducing cognitive load, gesturing may also play a role in shaping that state.},
author = {Goldin-Meadow, Susan and Nusbaum, Howard and Kelly, Spencer D. and Wagner, Susan},
doi = {10.1111/1467-9280.00395},
file = {:home/josiah/Documents/Mendeley Desktop/Goldin-Meadow et al. - 2001 - Explaining math gesturing lightens the load.pdf:pdf},
isbn = {0956-7976 (Print){\$}\backslash{\$}r0956-7976 (Linking)},
issn = {0956-7976},
journal = {Psychological science : a journal of the American Psychological Society / APS},
number = {6},
pages = {516--522},
pmid = {11760141},
title = {{Explaining math: gesturing lightens the load.}},
volume = {12},
year = {2001}
}
@inproceedings{Arciuli2009,
author = {Arciuli, J. and Villar, G. and Mallard, D.},
booktitle = {Proceedings of the 31st Annual Conference of the Cognitive Science Society},
file = {:home/josiah/Documents/Mendeley Desktop/Arciuli, Villar, Mallard - 2009 - Lies , lies and more lies.pdf:pdf},
keywords = {1978,and an important,bok,deception,described as threatening the,fabric of our society,lies,linguistic cues to deception,lying has been variously,moral},
mendeley-tags = {deception},
pages = {2329--2334},
title = {{Lies , lies and more lies}},
year = {2009}
}
@article{Vrij1996a,
abstract = {Beliefs about behavioral clues to deception were investigated in 212 people, consisting of prisoners, police detectives, patrol police officers, prison guards, customs officers, and college students. Previous studies, mainly conducted with college students as subjects, showed that people have some incorrect beliefs about behavioral clues to deception. It was hypothesized that prisoners would have the best notion about clues of deception, due to the fact that they receive the most adequate feedback about successful deception strategies. The results supported this hypothesis.},
author = {Vrij, Aldert and Semin, G{\"{u}}n R.},
doi = {10.1007/BF02248715},
file = {:home/josiah/Documents/Mendeley Desktop/Vrij, Semin - 1996 - Lie experts' beliefs about nonverbal indicators of deception.pdf:pdf},
isbn = {0191-5886; 1573-3653},
issn = {0191-5886},
journal = {Journal of Nonverbal Behavior},
month = {mar},
number = {1},
pages = {65--80},
title = {{Lie experts' beliefs about nonverbal indicators of deception}},
url = {http://link.springer.com/10.1007/BF02248715},
volume = {20},
year = {1996}
}
@article{Bates2015,
abstract = {lme4: Mixed-effects modeling with R},
archivePrefix = {arXiv},
arxivId = {1406.5823},
author = {Bates, Douglas and M{\"{a}}chler, Martin and Bolker, Ben and Walker, Steve},
doi = {10.18637/jss.v067.i01},
eprint = {1406.5823},
file = {:home/josiah/Documents/Mendeley Desktop/Bates et al. - 2015 - Fitting Linear Mixed-Effects Models Using lme4.pdf:pdf},
isbn = {3900051070},
issn = {1548-7660},
journal = {Journal of Statistical Software},
keywords = {lme4,mixed,statistics methodology},
mendeley-tags = {lme4,mixed,statistics methodology},
month = {oct},
number = {1},
pages = {1215--1225},
pmid = {20043131029},
title = {{Fitting Linear Mixed-Effects Models Using lme4}},
url = {http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf http://journals.sagepub.com/doi/10.1177/009286150103500418 http://www.jstatsoft.org/v67/i01/},
volume = {67},
year = {2015}
}
@article{Caso2006,
abstract = {The present experiment examined the relationship between different types of discourse linked hand movements and deception. Hand gestures were experimentally studied during truth telling and deception, and in situations with either weak or strong suspicion. Participants (128 Italian psychology students) were interviewed twice about the possession of an object. In one interview they were asked to lie and in the other asked to tell the truth (veracity factor). In both condi- tions, suspicion was raised after the interview: Participants were accused of lying by the interviewer and asked to repeat their account a second time (suspicion factor). Results indicate that lying was associated with a decrease in deictic gestures, and an increase in metaphoric gestures (main effect of veracity). Also a decrease in self- adaptor gestures was found. Strong suspicion was associated with an increase in metaphoric, rhythmic, and deictic gestures and a decrease in self-adaptor, emblematic, and cohesive gestures (main effect of suspicion). No interaction effect was found.},
author = {Caso, Letizia and Maricchiolo, Fridanna and Bonaiuto, Marino and Vrij, Aldert and Mann, Samantha},
doi = {10.1007/s10919-005-0001-z},
file = {:home/josiah/Documents/Mendeley Desktop/Caso et al. - 2006 - THE IMPACT OF DECEPTION AND SUSPICION ON DIFFERENT HAND MOVEMENTS.pdf:pdf},
isbn = {0191-5886},
issn = {0191-5886},
journal = {Journal of Nonverbal Behavior},
keywords = {Deception,Hand gestures,Suspicion},
month = {mar},
number = {1},
pages = {1--19},
title = {{THE IMPACT OF DECEPTION AND SUSPICION ON DIFFERENT HAND MOVEMENTS}},
url = {http://link.springer.com/10.1007/s10919-005-0001-z},
volume = {30},
year = {2006}
}
@inproceedings{Benus2006,
abstract = {We use a corpus of spontaneous interview speech to investigate the relationship between the distributional and prosodic characteristics of silent and filled pauses and the intent of an interviewee to deceive an interviewer. Our data suggest that the use of pauses correlates more with truthful than with deceptive speech, and that prosodic features extracted from filled pauses themselves as well as features describing contextual prosodic information in the vicinity of filled pauses may facilitate the detection of deceit in speech.},
author = {Benus, Stefan and Enos, Frank and Hirschberg, Julia and Shriberg, Elizabeth},
booktitle = {Speech Prosody},
file = {:home/josiah/Documents/Mendeley Desktop/Benus et al. - 2006 - Pauses in Deceptive Speech.pdf:pdf},
keywords = {deception},
mendeley-tags = {deception},
month = {may},
title = {{Pauses in Deceptive Speech}},
year = {2006}
}
@article{Baayen2008,
abstract = {Statistical analysis is a useful skill for linguists and psycholinguists, allowing them to understand the quantitative structure of their data. This textbook provides a straightforward introduction to the statistical analysis of language. Designed for linguists with a non-mathematical background, it clearly introduces the basic principles and methods of statistical analysis, using 'R', the leading computational statistics programme. The reader is guided step-by-step through a range of real data sets, allowing them to analyse acoustic data, construct grammatical trees for a variety of languages, quantify register variation in corpus linguistics, and measure experimental data using state-of-the-art models. The visualization of data plays a key role, both in the initial stages of data exploration and later on when the reader is encouraged to criticize various models. Containing over 40 exercises with model answers, this book will be welcomed by all linguists wishing to learn more about working with and presenting quantitative data.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Baayen, R.H.},
doi = {10.1558/sols.v2i3.471},
eprint = {arXiv:1011.1669v3},
file = {:home/josiah/Documents/Mendeley Desktop/Baayen - 2008 - Analyzing Linguistic Data A Practical Introduction to Statistics Using R.pdf:pdf},
isbn = {9780521882590},
issn = {1750-8657},
journal = {Sociolinguistic Studies},
keywords = {R,linguistics,psycholinguistics,r,statistics,statistics methodology},
mendeley-tags = {r,statistics methodology},
month = {dec},
number = {3},
pages = {353},
pmid = {25246403},
title = {{Analyzing Linguistic Data: A Practical Introduction to Statistics Using R.}},
url = {http://www.equinoxjournals.com/ojs/index.php/SS/article/view/5557},
volume = {2},
year = {2008}
}
@article{Kelly2010,
abstract = {Previous research has demonstrated a link between language and action in the brain. The present study investigates the strength of this neural relationship by focusing on a potential interface between the two systems: cospeech iconic gesture. Participants performed a Stroop-like task in which they watched videos of a man and a woman speaking and gesturing about common actions. The videos differed as to whether the gender of the speaker and gesturer was the same or different and whether the content of the speech and gesture was congruent or incongruent. The task was to identify whether a man or a woman produced the spoken portion of the videos while accuracy rates, RTs, and ERPs were recorded to the words. Although not relevant to the task, participants paid attention to the semantic relationship between the speech and the gesture, producing a larger N400 to words accompanied by incongruent versus congruent gestures. In addition, RTs were slower to incongruent versus congruent gesture-speech stimuli, but this effect was greater when the gender of the gesturer and speaker was the same versus different. These results suggest that the integration of gesture and speech during language comprehension is automatic but also under some degree of neurocognitive control.},
author = {Kelly, Spencer D. and Creigh, Peter and Bartolotti, James},
doi = {10.1162/jocn.2009.21254},
file = {:home/josiah/Documents/Mendeley Desktop/Kelly, Creigh, Bartolotti - 2010 - Integrating Speech and Iconic Gestures in a Stroop-like Task Evidence for Automatic Processing.pdf:pdf},
isbn = {0898-929X},
issn = {0898-929X},
journal = {Journal of Cognitive Neuroscience},
month = {apr},
number = {4},
pages = {683--694},
pmid = {19413483},
title = {{Integrating Speech and Iconic Gestures in a Stroop-like Task: Evidence for Automatic Processing}},
url = {http://www.mitpressjournals.org/doi/10.1162/jocn.2009.21254},
volume = {22},
year = {2010}
}
@article{King2018,
abstract = {{\textcopyright} 2017 Taylor {\&} Francis Group, LLC Where the veracity of a statement is in question, listeners tend to interpret disfluency as signaling dishonesty. Previous research in deception suggests that this results from a speaker model, linking lying to cognitive effort and effort to disfluency. However, the disfluency–lying bias occurs very quickly: Might listeners instead simply heuristically associate disfluency with lying? To investigate this, we look at whether listeners' disfluency–lying biases are sensitive to context. Participants listened to a potentially dishonest speaker describe treasure as being behind a named object while viewing scenes comprising the referent (the named object) and a distractor. Their task was to click on the treasure's suspected true location. In line with previous work, participants clicked on the distractor more following disfluent descriptions, and this effect corresponded to an early fixation bias, demonstrating the online nature of the pragmatic judgment. The present study, however, also manipulated the presence of an alternative, local cause of speaker disfluency: the speaker momentarily distracted by a car horn. When disfluency could be attributed to speaker distraction, participants initially fixated more on the referent, only later fixating on and selecting the distractor. These findings support the speaker modeling view, showing that listeners can take momentary contextual causes of disfluency into account.},
author = {King, Josiah P.J. and Loy, Jia E. and Corley, Martin},
doi = {10.1080/0163853X.2017.1330041},
file = {:home/josiah/Documents/Mendeley Desktop/King, Loy, Corley - 2018 - Contextual Effects on Online Pragmatic Inferences of Deception.pdf:pdf},
issn = {15326950},
journal = {Discourse Processes},
number = {2},
pages = {123--135},
publisher = {Routledge},
title = {{Contextual Effects on Online Pragmatic Inferences of Deception}},
url = {https://doi.org/10.1080/0163853X.2017.1330041},
volume = {55},
year = {2018}
}
@article{Snodgrass1980,
abstract = {In this article we present a standardized set of 260 pictures for use in experiments investigating differences and similarities in the processing of pictures and words. The pictures are black-and-white line drawings executed according to a set of rules that provide consistency of pictorial representation. The pictures have been standardized on four variables of central relevance to memory and cognitive processing: name agreement, image agreement, familiarity, and visual complexity. The intercorrelations among the four measures were low, suggesting that they are indices of different attributes of the pictures. The concepts were selected to provide exemplars from several widely studied semantic categories. Sources of naming variance, and mean familiarity and complexity of the exemplars, differed significantly across the set of categories investigated. The potential significance of each of the normative variables to a number of semantic and episodic memory tasks is discussed.},
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Snodgrass, Joan G. and Vanderwart, Mary},
doi = {10.1037/0278-7393.6.2.174},
eprint = {NIHMS150003},
file = {:home/josiah/Documents/Mendeley Desktop/Snodgrass, Vanderwart - 1980 - A standardized set of 260 pictures Norms for name agreement, image agreement, familiarity, and visual com.pdf:pdf},
isbn = {0096-1515 (Print)$\backslash$r0096-1515 (Linking)},
issn = {0096-1515},
journal = {Journal of Experimental Psychology: Human Learning and Memory},
keywords = {standardized set of pictures for use in experiment},
number = {2},
pages = {174--215},
pmid = {7373248},
title = {{A standardized set of 260 pictures: Norms for name agreement, image agreement, familiarity, and visual complexity.}},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/0278-7393.6.2.174},
volume = {6},
year = {1980}
}
@article{Silverman2010,
abstract = {This study examined iconic gesture comprehension in autism, with the goal of assessing whether cross-modal processing difficulties impede speech-and-gesture integration. Participants were 19 adolescents with high functioning autism (HFA) and 20 typical controls matched on age, gender, verbal IQ, and socio-economic status (SES). Gesture comprehension was assessed via quantitative analyses of visual fixations during a video-based task, using the visual world paradigm. Participants' eye movements were recorded while they watched videos of a person describing one of four shapes shown on a computer screen, using speech-and-gesture or speech-only descriptions. Participants clicked on the shape that the speaker described. Since gesture naturally precedes speech, earlier visual fixations to the target shape during speech-and-gesture compared to speech-only trials, would suggest immediate integration of auditory and visual information. Analyses of eye movements supported this pattern in control participants but not in individuals with autism: iconic gestures facilitated comprehension in typical individuals, while it hindered comprehension in those with autism. Cross-modal processing difficulties in autism were not accounted for by impaired unimodal speech or gesture processing. The results have important implications for the treatment of children and adults with this disorder. ?? 2010 Elsevier B.V. All rights reserved.},
author = {Silverman, Laura B. and Bennetto, Loisa and Campana, Ellen and Tanenhaus, Michael K.},
doi = {10.1016/j.cognition.2010.01.002},
file = {:home/josiah/Documents/Mendeley Desktop/Silverman et al. - 2010 - Speech-and-gesture integration in high functioning autism.pdf:pdf},
isbn = {1873-7838 (Electronic)$\backslash$r0010-0277 (Linking)},
issn = {00100277},
journal = {Cognition},
keywords = {Autism,Cross-modal integration,Eye movements,Gesture,Semantic processing},
month = {jun},
number = {3},
pages = {380--393},
pmid = {20356575},
publisher = {Elsevier B.V.},
title = {{Speech-and-gesture integration in high functioning autism}},
url = {http://dx.doi.org/10.1016/j.cognition.2010.01.002 http://linkinghub.elsevier.com/retrieve/pii/S0010027710000156},
volume = {115},
year = {2010}
}
@manual{Rbase2017,
address = {Vienna, Austria},
author = {{R Core Team}},
organization = {R Foundation for Statistical Computing},
title = {{R: A Language and Environment for Statistical Computing}},
url = {https://www.r-project.org/},
year = {2017}
}
@article{Gregersen2005,
abstract = {This observation study examined the nonverbal behavior of anxious and nonanxious foreign language learners during a videotaped oral foreign language exam. Focusing primarily on the kinesic signals found in facial expressions, gazing behavior, body movement and gesture, and posture, it was discovered that anxious learners manifested limited facial activity including brow behavior and smiling; maintained less eye contact with the teacher; were more rigid and closed with their posture; and, although they self-touched and manipulated objects more than the nonanxious, they used fewer illustrative and regulatory gestures. Although significant differences were found, limitations for using nonverbal observation for detecting anxiety are discussed.},
author = {Gregersen, Tammy S.},
doi = {10.1111/j.1944-9720.2005.tb02225.x},
file = {:home/josiah/Documents/Mendeley Desktop/Gregersen - 2005 - Nonverbal cues Clues to the detection of foreign language anxiety.pdf:pdf},
isbn = {0015-718X},
issn = {0015718X},
journal = {Foreign Language Annals},
keywords = {Body language,Facial expression,Foreign language anxiety,Gesture,Nonverbal behavior},
number = {3},
pages = {388--400},
title = {{Nonverbal cues: Clues to the detection of foreign language anxiety}},
volume = {38},
year = {2005}
}
@article{DePaulo1982,
abstract = {Examined (a) specific verbal and paralinguistic cues that might reveal when deception is occurring or that might be used by perceivers in their attempts to detect deception and (b) the correspondence between actual and perceived cues to deception. 40 undergraduates served as liars and perceivers. As senders, Ss described either honestly or dishonestly other people they disliked; they also described the person they liked, pretending to dislike him or her. The verbal and nonverbal cues measured were nonfluencies, "um's" and "er's," rate, number of sentences, rate change, undifferentiating, simple differentiating, differentiating, dispositional, cognitive complexity, egocentric, mutual, other-oriented, excess other, positive, negative, neutral, excess positive, extremes, and "but's" and "yet's." The degree to which the cues actually were associated with deception corresponded significantly to the degree to which perceivers used those cues as signs of deceit. When Ss lied about people they really disliked, their descriptions were less positive and more neutral than when they honestly described people they really disliked. When feigning disliking, Ss uttered more nonfluencies than when expressing honest disliking. Descriptions that were spoken slowly and contained many "um's" and "er's" were judged by perceivers as deceptive. Expressions of liking that contained many "other" references, few self-references, and many nonspecific descriptors were also perceived to be deceptive. (27 ref) (PsycINFO Database Record (c) 2007 APA, all rights reserved)},
author = {DePaulo, Bella M. and Rosenthal, Robert and Rosenkrantz, Judith and Green, Carolyn Rieder},
doi = {10.1207/s15324834basp0304_6},
file = {:home/josiah/Documents/Mendeley Desktop/DePaulo et al. - 1982 - Actual and Perceived Cues to Deception A Closer Look at Speech.pdf:pdf},
isbn = {0197-3533},
issn = {0197-3533},
journal = {Basic and Applied Social Psychology},
keywords = {deception},
mendeley-tags = {deception},
month = {dec},
number = {4},
pages = {291--312},
title = {{Actual and Perceived Cues to Deception: A Closer Look at Speech}},
url = {http://www.tandfonline.com/doi/abs/10.1207/s15324834basp0304{\_}6},
volume = {3},
year = {1982}
}
@article{Barr2008,
abstract = {A new framework is offered that uses multilevel logistic regression (MLR) to analyze data from 'visual world' eyetracking experiments used in psycholinguistic research. The MLR framework overcomes some of the problems with conventional analyses, making it possible to incorporate time as a continuous variable and gaze location as a categorical dependent variable. The multilevel approach minimizes the need for data aggregation and thus provides a more statistically powerful approach. With MLR, the researcher builds a mathematical model of the overall response curve that separates the response into different temporal components. The researcher can test hypotheses by examining the impact of independent variables and their interactions on these components. A worked example using MLR is provided. {\textcopyright} 2007 Elsevier Inc. All rights reserved.},
author = {Barr, Dale J.},
doi = {10.1016/j.jml.2007.09.002},
file = {:home/josiah/Documents/Mendeley Desktop/Barr - 2008 - Analyzing ‘visual world' eyetracking data using multilevel logistic regression.pdf:pdf},
isbn = {0749-596X},
issn = {0749596X},
journal = {Journal of Memory and Language},
keywords = {Eyetracking,Multilevel modeling,Statistics,eyetracking,statistics methodology},
mendeley-tags = {eyetracking,statistics methodology},
month = {nov},
number = {4},
pages = {457--474},
title = {{Analyzing ‘visual world' eyetracking data using multilevel logistic regression}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0749596X07001015},
volume = {59},
year = {2008}
}
@incollection{Zuckerman1981,
author = {Zuckerman, Miron and DePaulo, Bella M. and Rosenthal, Robert},
doi = {10.1016/S0065-2601(08)60369-X},
pages = {1--59},
title = {{Verbal and Nonverbal Communication of Deception}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S006526010860369X},
year = {1981}
}
@article{DePaulo2003,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {DePaulo, Bella M. and Lindsay, James J. and Malone, Brian E. and Muhlenbruck, Laura and Charlton, Kelly and Cooper, Harris},
doi = {10.1037//0033-2909.129.1.74},
eprint = {NIHMS150003},
file = {:home/josiah/Documents/Mendeley Desktop/DePaulo et al. - 2003 - Cues to deception.pdf:pdf},
isbn = {0033-2909},
issn = {0033-2909},
journal = {Psychological Bulletin},
keywords = {deception},
mendeley-tags = {deception},
number = {1},
pages = {74--112},
pmid = {12555795},
title = {{Cues to deception.}},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037//0033-2909.129.1.74},
volume = {129},
year = {2003}
}
@article{Vrij1996,
abstract = {Previous research suggests that liars are not aware that they tend to decrease their movements during deception. Moreover, it is unclear how liars will behave if someone informs them about their behavioral rigidity during deception, and to what extent several processes (tension, attempted behavioral control, and cognitive effort) are associated with deception. In the present experiment, subjects were interviewed twice. During one interview, they told the truth, and during the other interview, they lied. In the information-present condition, before both interviews, subjects were told that deception is usually associated with a decrease in movements. In the information-absent condition, no information was given. The results revealed that whereas subjects believed that they increased their movements during deception, a decrease in movements, in fact, occurred. Provision of information about deceptive behavior had no effect. The results also showed that a decrease in movements was associated with attempted control and cognitive load processes, and occurred independently from the tension experienced by deceivers.},
author = {Vrij, Aldert and Semin, Gun R. and Bull, Ray},
doi = {10.1111/j.1468-2958.1996.tb00378.x},
file = {:home/josiah/Documents/Mendeley Desktop/Vrij, Semin, Bull - 1996 - Insight Into Behavior Displayed During Deception.pdf:pdf},
isbn = {0360-3989; 1468-2958},
issn = {0360-3989},
journal = {Human Communication Research},
keywords = {deception},
mendeley-tags = {deception},
month = {jun},
number = {4},
pages = {544--562},
title = {{Insight Into Behavior Displayed During Deception}},
url = {https://academic.oup.com/hcr/article/22/4/544-562/4564904},
volume = {22},
year = {1996}
}
@article{Kelly2010a,
abstract = {Accessed: 23-01-2017 20:46 UTC JSTOR is a not-for-profit service that helps scholars, researchers, and students discover, use, and build upon a wide range of content in a trusted digital archive. We use information technology and tools to increase productivity and facilitate new forms of scholarship. For more information about JSTOR, please contact [email protected]. Abstract Gesture and speech are assumed to form an integrated system during language production. Based on this view, we},
author = {Kelly, Spencer D. and {\"{O}}zy{\"{u}}rek, Aslı and Maris, Eric},
doi = {10.1177/0956797609357327},
file = {:home/josiah/Documents/Mendeley Desktop/Kelly, {\"{O}}zy{\"{u}}rek, Maris - 2010 - Two Sides of the Same Coin.pdf:pdf},
isbn = {1467-9280 (Electronic)$\backslash$r0956-7976 (Linking)},
issn = {0956-7976},
journal = {Psychological Science},
keywords = {Action,Communication,Comprehension,Iconic gestures,Integrated-systems hypothesis,Language,Multimodal,Mutual,Obligatory,Semantic processing,Speech},
month = {feb},
number = {2},
pages = {260--267},
pmid = {20424055},
title = {{Two Sides of the Same Coin}},
url = {http://journals.sagepub.com/doi/10.1177/0956797609357327},
volume = {21},
year = {2010}
}
@article{Vrij2000,
abstract = {As soon as liars realize that evaluators use CBCA to assess the credibility of their statements, it is possible that liars will gain knowledge of CBCA and try to 'improve' their statements in order to make an honest impression on CBCA-judges. The present experiment investigated to what extent liars are capable of doing this. In all, 45 participants were randomly allocated to one of the following three conditions: a truth telling condition in which participants were asked to recall a videotaped event which they had just seen; an uninformed deception condition in which participants who had only been given guidelines about the content of the videotaped event were asked to recall the event as though they had seen the videotape; and an informed deception condition in which participants received information about CBCA before they were asked to pretend that they had seen the videotape. CBCA-raters scored the accounts and a comparison was made between the total CBCA-scores of the three conditions. The study also examined the extent to which CBCA-assessments could correctly classify truthful and deceptive accounts, first by means of a discriminant analysis (with the total CBCA-score as dependent variable) and secondly by asking a British CBCA-expert to judge the veracity of the statements. The results indicated that liars are capable of influencing CBCA-assessments. First, the CBCA-scores of liars who were informed about CBCA were similar to the CBCA-scores of truth tellers and significantly higher than the CBCA-scores of liars who were not informed about CBCA. Secondly, the objective status of the participant (truth teller vs. informed liar) could not be successfully predicted in a discriminant analysis on the basis of total CBCA-scores. Thirdly, statements of the majority of informed liars were assessed as truthful by a British CBCA-expert},
author = {Vrij, Aldert and Kneller, Wendy and Mann, Samantha},
doi = {10.1348/135532500167976},
file = {:home/josiah/Documents/Mendeley Desktop/Vrij, Kneller, Mann - 2000 - The effect of informing liars about Criteria-Based Content Analysis on their ability to deceive CBCA-raters.pdf:pdf},
isbn = {1355-3259},
issn = {13553259},
journal = {Legal and Criminological Psychology},
month = {feb},
number = {1},
pages = {57--70},
title = {{The effect of informing liars about Criteria-Based Content Analysis on their ability to deceive CBCA-raters}},
url = {http://doi.wiley.com/10.1348/135532500167976},
volume = {5},
year = {2000}
}
@article{Vrij2001,
author = {Vrij, Aldert and Edward, Katherine and Bull, Ray},
doi = {10.1348/000712601162248},
file = {:home/josiah/Documents/Mendeley Desktop/Vrij, Edward, Bull - 2001 - People's insight into their own behaviour and speech content while lying.pdf:pdf},
issn = {00071269},
journal = {British Journal of Psychology},
keywords = {deception},
mendeley-tags = {deception},
month = {may},
number = {2},
pages = {373--389},
title = {{People's insight into their own behaviour and speech content while lying}},
url = {http://doi.wiley.com/10.1348/000712601162248},
volume = {92},
year = {2001}
}
@article{Mathot2012,
author = {Math{\^{o}}t, Sebastiaan and Schreij, Daniel and Theeuwes, Jan},
doi = {10.3758/s13428-011-0168-7},
issn = {1554-3528},
journal = {Behavior Research Methods},
month = {jun},
number = {2},
pages = {314--324},
title = {{OpenSesame: An open-source, graphical experiment builder for the social sciences}},
url = {http://www.springerlink.com/index/10.3758/s13428-011-0168-7},
volume = {44},
year = {2012}
}
@article{King2017,
abstract = {{\textcopyright} 2017 Taylor {\&} Francis Group, LLC Where the veracity of a statement is in question, listeners tend to interpret disfluency as signaling dishonesty. Previous research in deception suggests that this results from a speaker model, linking lying to cognitive effort and effort to disfluency. However, the disfluency–lying bias occurs very quickly: Might listeners instead simply heuristically associate disfluency with lying? To investigate this, we look at whether listeners' disfluency–lying biases are sensitive to context. Participants listened to a potentially dishonest speaker describe treasure as being behind a named object while viewing scenes comprising the referent (the named object) and a distractor. Their task was to click on the treasure's suspected true location. In line with previous work, participants clicked on the distractor more following disfluent descriptions, and this effect corresponded to an early fixation bias, demonstrating the online nature of the pragmatic judgment. The present study, however, also manipulated the presence of an alternative, local cause of speaker disfluency: the speaker momentarily distracted by a car horn. When disfluency could be attributed to speaker distraction, participants initially fixated more on the referent, only later fixating on and selecting the distractor. These findings support the speaker modeling view, showing that listeners can take momentary contextual causes of disfluency into account.},
author = {King, Josiah P.J. and Loy, Jia E. and Corley, Martin},
doi = {10.1080/0163853X.2017.1330041},
file = {:home/josiah/Documents/Mendeley Desktop/King, Loy, Corley - 2017 - Contextual Effects on Online Pragmatic Inferences of Deception.pdf:pdf},
issn = {15326950},
journal = {Discourse Processes},
keywords = {comprehension,deception,disfluency,distraction,eyetracking,noise},
mendeley-tags = {comprehension,deception,disfluency,distraction,eyetracking,noise},
pages = {1--13},
title = {{Contextual Effects on Online Pragmatic Inferences of Deception}},
volume = {00},
year = {2017}
}
@article{Zuckerman1981a,
abstract = {People's beliefs about tile association of 19 visual and auditory cues with deception were assessed in one of two questionnaires: Subjects were asked to indicate the association of each cue with deception in their own behavior (self-perception condition) or in other people's behavior (otiler-perception condition). The 19 behaviors listed in the questionnaires had been previously examined in research on actual behaviors associated with deception; ten of these behaviors had also been examined in research on cues associated with judgment of deception. Stronger association between tile various cues and deception were obtained in the other-perception than in the self-perception condition, indicating that people believe they control their own deceptive behavior better than other people control theirs. Beliefs about the association of each behavior wittl deception (averaged across the two conditions and sex of respondents) correlated .11 witt{\~{}} the actual association of each cue with deception, and .44 with the association of each cue with judgment of deception. The possibility that the correspondence between beliefs about deception and actual cues to deception is higher for some specific types of lie-telling was discussed.},
author = {Zuckerman, Miron and Koestner, Richard and Driver, Robert},
doi = {10.1007/BF00987286},
file = {:home/josiah/Documents/Mendeley Desktop/Zuckerman, Koestner, Driver - 1981 - Beliefs about cues associated with deception.pdf:pdf},
isbn = {0191-5886; 1573-3653},
issn = {0191-5886},
journal = {Journal of Nonverbal Behavior},
keywords = {deception},
mendeley-tags = {deception},
number = {2},
pages = {105--114},
title = {{Beliefs about cues associated with deception}},
url = {http://link.springer.com/10.1007/BF00987286},
volume = {6},
year = {1981}
}
@article{Loy2017,
abstract = {Predicting the binding mode of flexible polypeptides to proteins is an important task that falls outside the domain of applicability of most small molecule and protein−protein docking tools. Here, we test the small molecule flexible ligand docking program Glide on a set of 19 non-$\alpha$-helical peptides and systematically improve pose prediction accuracy by enhancing Glide sampling for flexible polypeptides. In addition, scoring of the poses was improved by post-processing with physics-based implicit solvent MM- GBSA calculations. Using the best RMSD among the top 10 scoring poses as a metric, the success rate (RMSD ≤ 2.0 {\AA} for the interface backbone atoms) increased from 21{\%} with default Glide SP settings to 58{\%} with the enhanced peptide sampling and scoring protocol in the case of redocking to the native protein structure. This approaches the accuracy of the recently developed Rosetta FlexPepDock method (63{\%} success for these 19 peptides) while being over 100 times faster. Cross-docking was performed for a subset of cases where an unbound receptor structure was available, and in that case, 40{\%} of peptides were docked successfully. We analyze the results and find that the optimized polypeptide protocol is most accurate for extended peptides of limited size and number of formal charges, defining a domain of applicability for this approach.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Loy, Jia E. and Rohde, Hannah and Corley, Martin},
doi = {10.1111/cogs.12378},
eprint = {arXiv:1011.1669v3},
file = {:home/josiah/Documents/Mendeley Desktop/Loy, Rohde, Corley - 2017 - Effects of Disfluency in Online Interpretation of Deception.pdf:pdf},
isbn = {9788578110796},
issn = {03640213},
journal = {Cognitive Science},
keywords = {Language understanding,Pragmatics,Psychology,deception,disfluency},
mendeley-tags = {deception,disfluency},
month = {may},
pages = {1434--1456},
pmid = {25246403},
title = {{Effects of Disfluency in Online Interpretation of Deception}},
url = {http://doi.wiley.com/10.1111/cogs.12378},
volume = {41},
year = {2017}
}
@article{Arnold2007,
abstract = {Eye-tracking and gating experiments examined reference comprehension with fluent (Click on the red. . .) and disfluent (Click on [pause] thee uh red . . .) instructions while listeners viewed displays with 2 familiar (e.g., ice cream cones) and 2 unfamiliar objects (e.g., squiggly shapes). Disfluent instructions made unfamiliar objects more expected, which influenced listeners' on-line hypotheses from the onset of the color word. The unfamiliarity bias was sharply reduced by instructions that the speaker had object agnosia, and thus difficulty naming familiar objects (Experiment 2), but was not affected by intermittent sources of speaker distraction (beeps and construction noises; Experiments 3). The authors conclude that listeners can make situation-specific inferences about likely sources of disfluency, but there are some limitations to these attributions.},
author = {Arnold, Jennifer E. and Kam, Carla L. Hudson and Tanenhaus, Michael K.},
doi = {10.1037/0278-7393.33.5.914},
file = {:home/josiah/Documents/Mendeley Desktop/Arnold, Kam, Tanenhaus - 2007 - If you say thee uh you are describing something hard The on-line attribution of disfluency during refere.pdf:pdf},
isbn = {0278-7393 (Print)},
issn = {1939-1285},
journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition},
keywords = {attribution inferences,disfluency,eye-tracking,reference comprehension},
number = {5},
pages = {914--930},
pmid = {17723069},
title = {{If you say thee uh you are describing something hard: The on-line attribution of disfluency during reference comprehension.}},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/0278-7393.33.5.914},
volume = {33},
year = {2007}
}
@article{Lo2015,
abstract = {Linear mixed-effect models (LMMs) are being increasingly widely used in psychology to analyse multi-level research designs. This feature allows LMMs to address some of the problems identified by Speelman and McGann (2013) about the use of mean data, because they do not average across individual responses. However, recent guidelines for using LMM to analyse skewed reaction time (RT) data collected in many cognitive psychological studies recommend the application of non-linear transformations to satisfy assumptions of normality. Uncritical adoption of this recommendation has important theoretical implications which can yield misleading conclusions. For example, Balota et al. (2013) showed that analyses of raw RT produced additive effects of word frequency and stimulus quality on word identification, which conflicted with the interactive effects observed in analyses of transformed RT. Generalized linear mixed-effect models (GLMM) provide a solution to this problem by satisfying normality assumptions without the need for transformation. This allows differences between individuals to be properly assessed, using the metric most appropriate to the researcher's theoretical context. We outline the major theoretical decisions involved in specifying a GLMM, and illustrate them by reanalysing Balota et al.'s datasets. We then consider the broader benefits of using GLMM to investigate individual differences.},
author = {Lo, Steson and Andrews, Sally},
doi = {10.3389/fpsyg.2015.01171},
file = {:home/josiah/Documents/Mendeley Desktop/Lo, Andrews - 2015 - To transform or not to transform using generalized linear mixed models to analyse reaction time data.pdf:pdf},
isbn = {1664-1078(Electronic)},
issn = {1664-1078},
journal = {Frontiers in Psychology},
keywords = {RT transformations,additive factors,generalized linear mixed-effect models,glmer,interaction effects,mental chronometry,mixed,reaction times,regression,rt,rt transformations,statistics methodology,transform,transformation},
mendeley-tags = {glmer,mixed,reaction times,regression,rt,statistics methodology,transform,transformation},
month = {aug},
number = {August},
pages = {1--16},
pmid = {26300841},
title = {{To transform or not to transform: using generalized linear mixed models to analyse reaction time data}},
url = {http://journal.frontiersin.org/Article/10.3389/fpsyg.2015.01171/abstract},
volume = {6},
year = {2015}
}
@inproceedings{Swerts1996,
abstract = {The study aims to test quantitatively whether filled pauses (FPs)$\backslash$nmay highlight discourse structure. More specifically it is first$\backslash$ninvestigated whether FPs are more typical in the vicinity of major$\backslash$ndiscourse boundaries. Secondly, the FPs are analyzed acoustically, to$\backslash$ncheck whether those occurring at major discourse boundaries are$\backslash$nsegmentally and prosodically different from those at shallower breaks.$\backslash$nAnalyses of twelve spontaneous monologues (Dutch) show that phrases$\backslash$nfollowing major discourse boundaries more often contain FPs.$\backslash$nAdditionally, FPs after stronger breaks tend to occur phrase-initially,$\backslash$nwhereas the majority of the FPs after weak boundaries are in$\backslash$nphrase-internal position. Also, acoustic observations reveal that FPs at$\backslash$nmajor discourse boundaries are both segmentally and prosodically$\backslash$ndistinct. They also differ with respect to the distribution of$\backslash$nneighbouring silent pauses},
author = {Swerts, M. and Wichmann, A. and Beun, R.-J.},
booktitle = {Proceeding of Fourth International Conference on Spoken Language Processing. ICSLP '96},
doi = {10.1109/ICSLP.1996.607780},
file = {:home/josiah/Documents/Mendeley Desktop/Swerts, Wichmann, Beun - 1996 - Filled pauses as markers of discourse structure.pdf:pdf},
isbn = {0-7803-3555-4},
issn = {03782166},
pages = {1033--1036},
pmid = {5679377},
publisher = {IEEE},
title = {{Filled pauses as markers of discourse structure}},
url = {http://ieeexplore.ieee.org/document/607780/},
volume = {2},
year = {1996}
}
@article{DePaulo,
author = {DePaulo, Bella M. and Charlton, Kelly and Cooper, Harris and Lindsay, James J. and Muhlenbruck, Laura},
doi = {10.1207/s15327957pspr0104_5},
file = {:home/josiah/Documents/Mendeley Desktop/DePaulo et al. - 1997 - The Accuracy-Confidence Correlation in the Detection of Deception.pdf:pdf},
issn = {1088-8683},
journal = {Personality and Social Psychology Review},
month = {nov},
number = {4},
pages = {346--357},
title = {{The Accuracy-Confidence Correlation in the Detection of Deception}},
url = {http://journals.sagepub.com/doi/10.1207/s15327957pspr0104{\_}5},
volume = {1},
year = {1997}
}
@article{Swerts2005,
abstract = {This paper describes two experiments on the role of audiovisual prosody for signalling and detecting meta-cognitive information in question answering. The first study consists of an experiment, in which participants are asked factual questions in a conversational setting, while they are being filmed. Statistical analyses bring to light that the speakers' Feeling of Knowing (FOK) is cued by a number of visual and verbal properties. It appears that answers tend to have a higher number of marked auditory and visual cues, including divergences from the neutral facial expression, when the FOK score is low, while the reverse is true for non-answers. The second study is a perception experiment, in which a selection of the utterances from the first study is presented to participants in one of three conditions: vision only, sound only, or vision + sound. Results reveal that human observers can reliably distinguish high FOK responses from low FOK responses in all three conditions, but that answers are easier than non-answers, and that a bimodal presentation of the stimuli is easier than the unimodal counterparts. {\textcopyright} 2005 Elsevier Inc. All rights reserved.},
author = {Swerts, Marc and Krahmer, Emiel},
doi = {10.1016/j.jml.2005.02.003},
file = {:home/josiah/Documents/Mendeley Desktop/Swerts, Krahmer - 2005 - Audiovisual prosody and feeling of knowing.pdf:pdf},
isbn = {0749-596X},
issn = {0749596X},
journal = {Journal of Memory and Language},
keywords = {Audiovisual prosody,Facial expressions,Feeling of another's knowing,Feeling of knowing,Question answering,Speech perception,Speech production,Tip of the tongue},
month = {jul},
number = {1},
pages = {81--94},
title = {{Audiovisual prosody and feeling of knowing}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0749596X05000227},
volume = {53},
year = {2005}
}
@article{Kelly1999,
author = {Kelly, Spencer D and Barr, Dale J.},
file = {:home/josiah/Documents/Mendeley Desktop/Kelly, Barr - 1999 - Offering a Hand to Pragmatic Understanding The Role of Speech and Gesture in Comprehension and Memory.pdf:pdf},
pages = {577--592},
title = {{Offering a Hand to Pragmatic Understanding : The Role of Speech and Gesture in Comprehension and Memory}},
volume = {592},
year = {1999}
}
@incollection{Ekman1989,
address = {Dordrecht},
author = {Ekman, Paul},
booktitle = {Credibility Assessment},
doi = {10.1007/978-94-015-7856-1_4},
file = {:home/josiah/Documents/Mendeley Desktop/Ekman - 1989 - Why Lies Fail and What Behaviors Betray a Lie.pdf:pdf},
isbn = {9048140439},
issn = {0258-123X},
keywords = {deception},
mendeley-tags = {deception},
pages = {71--81},
publisher = {Springer Netherlands},
title = {{Why Lies Fail and What Behaviors Betray a Lie}},
url = {http://link.springer.com/10.1007/978-94-015-7856-1{\_}4},
year = {1989}
}
@article{Huettig2011,
abstract = {We describe the key features of the visual world paradigm and review the main research areas where it has been used. In our discussion we highlight that the paradigm provides information about the way language users integrate linguistic information with information derived from the visual environment. Therefore the paradigm is well suited to study one of the key issues of current cognitive psychology, namely the interplay between linguistic and visual information processing. However, conclusions about linguistic processing (e.g., about activation, competition, and timing of access of linguistic representations) in the absence of relevant visual information must be drawn with caution. {\textcopyright} 2010 Elsevier B.V.},
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Huettig, Falk and Rommers, Joost and Meyer, Antje S.},
doi = {10.1016/j.actpsy.2010.11.003},
eprint = {NIHMS150003},
file = {:home/josiah/Documents/Mendeley Desktop/Huettig, Rommers, Meyer - 2011 - Using the visual world paradigm to study language processing A review and critical evaluation.pdf:pdf},
isbn = {0001-6918},
issn = {00016918},
journal = {Acta Psychologica},
keywords = {Attention,Eye movements,Language,Vision,Visual world paradigm},
month = {jun},
number = {2},
pages = {151--171},
pmid = {21288498},
publisher = {Elsevier B.V.},
title = {{Using the visual world paradigm to study language processing: A review and critical evaluation}},
url = {http://dx.doi.org/10.1016/j.actpsy.2010.11.003 http://linkinghub.elsevier.com/retrieve/pii/S0001691810002180},
volume = {137},
year = {2011}
}
@inproceedings{Saryazdi2017,
abstract = {In conversation, speakers spontaneously produce manual gestures that can facilitate listeners' comprehension of speech. However, various factors may affect listeners' ability to use gesture cues. Here we examine a situation where a speaker is referring to physical objects in the contextual here-and-now. In this situation, objects for potential reference will compete with gestures for visual attention. In two experiments, a speaker provided instructions to pick up objects in the visual environment ("Pick up the candy"). On some trials, the speaker produced a "pick up" gesture that reflected the size/shape of the target object. Gaze position was recorded to evaluate how listeners allocated attention to scene elements. Experiment 1 showed that, although iconic gestures (when present) were rarely fixated directly, peripheral uptake of these cues speeded listeners' visual identification of intended referents as the instruction unfolded. However, the benefit was mild and occurred primarily for small/hard-to-identify objects. In Experiment 2, background noise was added to reveal whether challenging auditory environments lead listeners to allocate additional visual attention to gesture cues in a compensatory manner. Interestingly, background noise actually reduced listeners' use of gesture cues. Together the findings highlight how situational factors govern the use of visual cues during multimodal communication.},
address = {ISCA},
author = {Saryazdi, Raheleh and Chambers, Craig G.},
booktitle = {Interspeech 2017},
doi = {10.21437/Interspeech.2017-1676},
file = {:home/josiah/Documents/Mendeley Desktop/Saryazdi, Chambers - 2017 - Attentional Factors in Listeners' Uptake of Gesture Cues During Speech Processing.pdf:pdf},
issn = {19909772},
keywords = {Attention,Co-Speech Gesture,Listening In Adverse Environments,Real-Time Language Comprehension,comprehension,eyetracking,gesture,noise,visual world},
mendeley-tags = {comprehension,eyetracking,gesture,noise,visual world},
month = {aug},
pages = {869--873},
publisher = {ISCA},
title = {{Attentional Factors in Listeners' Uptake of Gesture Cues During Speech Processing}},
url = {http://www.isca-speech.org/archive/Interspeech{\_}2017/abstracts/1676.html},
volume = {2017-Augus},
year = {2017}
}
@article{Mirman2008,
author = {Mirman, Daniel and Dixon, James A and Magnuson, James S},
doi = {10.1016/j.jml.2007.11.006},
file = {:home/josiah/Documents/Mendeley Desktop/Mirman, Dixon, Magnuson - 2008 - Statistical and computational models of the visual world paradigm Growth curves and individual differen.pdf:pdf},
issn = {0749596X},
journal = {Journal of Memory and Language},
keywords = {eye tracking,growth curve,individual differences,spoken language processing,statistics},
month = {nov},
number = {4},
pages = {475--494},
title = {{Statistical and computational models of the visual world paradigm: Growth curves and individual differences}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0749596X07001313},
volume = {59},
year = {2008}
}
@article{DePaulo1996,
abstract = {In 2 diary studies of lying, 77 college students reported telling 2 lies a day, and 70 community mem-bers told 1. Participants told more self-centered lies than other-oriented lies, except in dyads involv-ing only women, in which other-oriented lies were as common as self-centered ones. Participants told relatively more self-centered lies to men and relatively more other-oriented lies to women. Consistent with the view of lying as an everyday social interaction process, participants said that they did not regard their lies as serious and did not plan them much or worry about being caught. Still, social interactions in which lies were told were less pleasant and less intimate than those in which no lies were told.},
author = {DePaulo, Bella M. and Kashy, Deborah A. and Kirkendol, Susan E. and Wyer, Melissa M. and Epstein, Jennifer A.},
doi = {10.1037/0022-3514.70.5.979},
file = {:home/josiah/Documents/Mendeley Desktop/DePaulo et al. - 1996 - Lying in everyday life.pdf:pdf},
isbn = {0022-3514; 1939-1315},
issn = {1939-1315},
journal = {Journal of Personality and Social Psychology},
keywords = {deception},
mendeley-tags = {deception},
number = {5},
pages = {979--995},
pmid = {8656340},
title = {{Lying in everyday life.}},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/0022-3514.70.5.979},
volume = {70},
year = {1996}
}
@article{Cohen2010,
abstract = {This study explores the morphology of iconic gestures during deception. Participants narrated a static cartoon story twice. In one condition they provided an accurate account of the story, in the other they were instructed to introduce false details. Participants produced significantly fewer iconic gestures when describing plot-line events deceptively than when narrating comparable episode units truthfully. Deceptive gestures had significantly fewer post-stroke holds and shorter stroke phase durations than those produced alongside truthful utterances. Following Beattie (2003) three narrators in the deceptive condition produced gestures that in their morphology contradicted the semantic information encoded in their speech stream, and ultimately signaled possible deceit.},
author = {Cohen, Doron and Beattie, Geoffrey and Shovelton, Heather},
doi = {10.1515/semi.2010.055},
file = {:home/josiah/Documents/Mendeley Desktop/Cohen, Beattie, Shovelton - 2010 - Nonverbal indicators of deception How iconic gestures reveal thoughts that cannot be suppressed.pdf:pdf},
issn = {00371998},
journal = {Semiotica},
keywords = {Contradictory gestures,Deception,Gesture frequency,Gesture-speech mismatches,Iconic gestures,Nonverbal leakage,deception,gesture,iconic,production},
mendeley-tags = {deception,gesture,iconic,production},
number = {182},
pages = {133--174},
title = {{Nonverbal indicators of deception: How iconic gestures reveal thoughts that cannot be suppressed}},
volume = {2010},
year = {2010}
}
@article{DePaulo1998,
abstract = {[n 2 diary studies, 77 undergraduates and 70 community members recorded their social interactions and lies for a week. Because lying violates the openness and authenticity that people value in their close relationships, we predicted (and found) that participants would tell fewer lies per social interaction to the people to whom they felt closer and would feel more uncomfortable when they did lie to those people. Because altruistic lies can communicate caring, we also predicted (and found) that relatively more of the lies told to best friends and friends would be altruistic than self-serving, whereas the reverse would be true of lies told to acquaintances and strangers. Also consistent with predictions, lies told to closer partners were more often discovered.},
author = {DePaulo, Bella M. and Kashy, Deborah A.},
doi = {10.1037/0022-3514.74.1.63},
file = {:home/josiah/Documents/Mendeley Desktop/DePaulo, Kashy - 1998 - Everyday lies in close and casual relationships.pdf:pdf},
isbn = {00223514},
issn = {1939-1315},
journal = {Journal of Personality and Social Psychology},
keywords = {deception},
mendeley-tags = {deception},
number = {1},
pages = {63--79},
pmid = {9457776},
title = {{Everyday lies in close and casual relationships.}},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/0022-3514.74.1.63},
volume = {74},
year = {1998}
}
@inproceedings{Busso2004,
abstract = {The interaction between human beings and computers will be more natural if computers are able to perceive and respond to human non-verbal communication such as emotions. Although several approaches have been proposed to recognize human emotions based on facial expressions or speech, relatively limited work has been done to fuse these two, and other, modalities to improve the accuracy and robustness of the emotion recognition system. This paper analyzes the strengths and the limitations of systems based only on facial expressions or acoustic information. It also discusses two approaches used to fuse these two modalities: decision level and feature level integration. Using a database recorded from an actress, four emotions were classified: sadness, anger, happiness, and neutral state. By the use of markers on her face, detailed facial motions were captured with motion capture, in conjunction with simultaneous speech recordings. The results reveal that the system based on facial expression gave better performance than the system based on just acoustic information for the emotions considered. Results also show the complementarily of the two modalities and that when these two modalities are fused, the performance and the robustness of the emotion recognition system improve measurably.},
address = {New York, New York, USA},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Busso, Carlos and Deng, Zhigang and Yildirim, Serdar and Bulut, Murtaza and Lee, Chul Min and Kazemzadeh, Abe and Lee, Sungbok and Neumann, Ulrich and Narayanan, Shrikanth},
booktitle = {Proceedings of the 6th international conference on Multimodal interfaces - ICMI '04},
doi = {10.1145/1027933.1027968},
eprint = {arXiv:1011.1669v3},
file = {:home/josiah/Downloads/3{\_} Analysis of Emotion Recognition using facial expression-speech.pdf:pdf},
isbn = {1581139950},
issn = {1581139543},
pages = {205},
pmid = {25246403},
publisher = {ACM Press},
title = {{Analysis of emotion recognition using facial expressions, speech and multimodal information}},
url = {http://portal.acm.org/citation.cfm?doid=1027933.1027968},
year = {2004}
}
@article{Corley2011,
abstract = {Several studies suggest that speech understanding can sometimes benefit from the presence of filled pauses (uh, um, and the like), and that words following such filled pauses are recognised more quickly. Three experiments examined whether this is because filled pauses serve to delay the onset of upcoming words and these delays facilitate auditory word recognition, or whether the fillers themselves serve to signal upcoming delays in a way which informs listeners' reactions. Participants viewed pairs of images on a computer screen, and followed recorded instructions to press buttons corresponding to either an easy (unmanipulated, with a high-frequency name) or a difficult (visually blurred, low-frequency) image. In all three experiments, participants were faster to respond to easy images. In 50{\%} of trials in each experiment, the name of the image was directly preceded by a delay; in the remaining trials an equivalent delay was included earlier in the instruction. Participants were quicker to respond when a name was directly preceded by a delay, regardless of whether this delay was filled with a spoken um, was silent, or contained an artificial tone. This effect did not interact with the effect of image difficulty, nor did it change over the course of each experiment. Taken together, our consistent finding that delays of any kind help word recognition indicates that natural delays such as fillers need not be seen as 'signals' to explain the benefits they have to listeners' ability to recognise and respond to the words which follow them.},
author = {Corley, Martin and Hartsuiker, Robert J.},
doi = {10.1371/journal.pone.0019792},
editor = {Perc, Matjaz},
file = {:home/josiah/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Corley, Hartsuiker - 2011 - Why Um Helps Auditory Word Recognition The Temporal Delay Hypothesis.pdf:pdf},
issn = {1932-6203},
journal = {PLoS ONE},
month = {may},
number = {5},
pages = {e19792},
pmid = {21611164},
title = {{Why Um Helps Auditory Word Recognition: The Temporal Delay Hypothesis}},
url = {http://dx.plos.org/10.1371/journal.pone.0019792},
volume = {6},
year = {2011}
}
@article{Schlenker2001,
author = {Barry R. Schlenker and Beth A. Pontari and Andrew N. Christopher},
title ={Excuses and Character: Personal and Social Implications of Excuses},
journal = {Personality and Social Psychology Review},
volume = {5},
number = {1},
pages = {15-32},
year = {2001},
doi = {10.1207/S15327957PSPR0501\_2},
URL = {https://doi.org/10.1207/S15327957PSPR0501_2}
}
@book{ekman2009,
title={Telling lies: Clues to deceit in the marketplace, politics, and marriage (revised edition)},
author={Ekman, Paul},
year={2009},
published={1985},
publisher={WW Norton \& Company}
}
@article{Akehurst1996,
abstract = {Research on the detection of deception, via non-verbal cues, has shown that people's ability to successfully discriminate between truth and deception is only slightly better than chance level. One of the reasons for these disappointing findings possibly lies in people's inappropriate beliefs regarding 'lying behaviour'. A 64-item questionnaire originally used in Germany, which targets participants' beliefs regarding truthful and deceptibe behaviour, was used. The present study differed from previous research in three ways: (i) instead of a student population, police officers and lay people were sampled, (ii) both people's beliefs regarding others' deceptive behaviour and their beliefs regarding their own deceptive behaviour were examined, and (iii) both non-verbal cues to, and content characteristics of, deceptive statements were examined. Results were consistent with previous studies, which found significant differences between people's beliefs regarding deceptive behaviour and experimental observations of actual deceptive behaviour. Further, police officers held as many false beliefs as did lay people and finally, participants were more accurate in their beliefs regarding their own deceptive behaviour than they were in their beliefs regarding others' behaviour.},
author = {Akehurst, Lucy and Kohnken, Gunter and Vrij, Aldert and Bull, Ray},
doi = {10.1002/(SICI)1099-0720(199612)10:6<461::AID-ACP413>3.0.CO;2-2},
file = {:home/josiah/Documents/Mendeley Desktop/Akehurst et al.{\_}1996{\_}Lay Persons' and Police Officers' Beliefs Regarding Deceptive Behaviour.pdf:pdf},
isbn = {0888-4080; 1099-0720},
issn = {0888-4080},
journal = {Applied Cognitive Psychology},
number = {February},
pages = {461--471},
title = {{Lay Persons' and Police Officers' Beliefs Regarding Deceptive Behaviour}},
url = {http://doi.wiley.com/10.1002/(SICI)1099-0720(199612)10:6{\%}3C461::AID-ACP413{\%}3E3.0.CO;2-2},
volume = {10},
year = {1996}
}
@article{Hartwig2011,
title={Why do lie-catchers fail? A lens model meta-analysis of human lie judgments.},
author={Hartwig, Maria and Bond, Charles F},
journal={Psychological bulletin},
volume={137},
number={4},
pages={643},
year={2011},
publisher={American Psychological Association}
}
@article{Gullberg2006,
author = {Gullberg, Marianne and Holmqvist, Kenneth},
doi = {10.1075/pc.14.1.05gul},
isbn = {0929-0907},
issn = {0929-0907},
journal = {Pragmatics {\&} Cognition},
number = {1},
pages = {53--82},
title = {{What speakers do and what addressees look at: Visual attention to gestures in human interaction live and on video}},
url = {http://www.jbe-platform.com/content/journals/10.1075/pc.14.1.05gul},
volume = {14},
year = {2006}
}
@article{Holle2007,
author = {Holle, Henning and Gunter, Thomas C},
doi = {10.1162/jocn.2007.19.7.1175},
file = {:home/josiah/Documents/Mendeley Desktop/Holle, Gunter{\_}2007{\_}The Role of Iconic Gestures in Speech Disambiguation ERP Evidence.pdf:pdf},
issn = {0898-929X},
journal = {Journal of Cognitive Neuroscience},
month = {jul},
number = {7},
pages = {1175--1192},
title = {{The Role of Iconic Gestures in Speech Disambiguation: ERP Evidence}},
url = {http://web.ebscohost.com.login.ezproxy.library.ualberta.ca/ehost/pdfviewer/pdfviewer?vid=3{\&}sid=8bad4340-b04f-4bb8-8c32-50cb005884c9{\%}40sessionmgr12{\&}hid=9 http://www.mitpressjournals.org/doi/10.1162/jocn.2007.19.7.1175},
volume = {19},
year = {2007}
}
@article{Ekman1969,
author = {Ekman, Paul and Friesen, Wallace V},
journal = {semiotica},
number = {1},
pages = {49--98},
publisher = {De Gruyter Mouton},
title = {{The repertoire of nonverbal behavior: Categories, origins, usage, and coding}},
volume = {1},
year = {1969}
}
@book{Mcneill1992,
author = {McNeill, David},
publisher = {University of Chicago press},
title = {{Hand and mind: What gestures reveal about thought}},
year = {1992}
}
@article{Ozyurek2007,
abstract = {{\&} During language comprehension, listeners use the global semantic representation from previous sentence or discourse context to immediately integrate the meaning of each up-coming word into the unfolding message-level representation. Here we investigate whether communicative gestures that of-ten spontaneously co-occur with speech are processed in a similar fashion and integrated to previous sentence context in the same way as lexical meaning. Event-related potentials were measured while subjects listened to spoken sentences with a critical verb (e.g., knock), which was accompanied by an iconic co-speech gesture (i.e., KNOCK). Verbal and/or gestural se-mantic content matched or mismatched the content of the preceding part of the sentence. Despite the difference in the modality and in the specificity of meaning conveyed by spoken words and gestures, the latency, amplitude, and topographical distribution of both word and gesture mismatches are found to be similar, indicating that the brain integrates both types of information simultaneously. This provides evidence for the claim that neural processing in language comprehension in-volves the simultaneous incorporation of information coming from a broader domain of cognition than only verbal seman-tics. The neural evidence for similar integration of information from speech and gesture emphasizes the tight interconnec-tion between speech and co-speech gestures. {\&}},
author = {{\"{O}}zy{\"{u}}rek, Aslı and Willems, Roel M. and Kita, Sotaro and Hagoort, Peter},
doi = {10.1162/jocn.2007.19.4.605},
file = {:home/josiah/Documents/Mendeley Desktop/{\"{O}}zy{\"{u}}rek et al.{\_}2007{\_}On-line Integration of Semantic Information from Speech and Gesture Insights from Event-related Brain Potentials.pdf:pdf},
isbn = {0898-929X},
issn = {0898-929X},
journal = {Journal of Cognitive Neuroscience},
number = {4},
pages = {605--616},
pmid = {17381252},
title = {{On-line Integration of Semantic Information from Speech and Gesture: Insights from Event-related Brain Potentials}},
url = {http://www.mitpressjournals.org/doi/10.1162/jocn.2007.19.4.605},
volume = {19},
year = {2007}
}
@article{Kelly2004,
abstract = {The present study examined the neural correlates of speech and hand gesture comprehension in a naturalistic context. Fifteen participants watched audiovisual segments of speech and gesture while event-related potentials (ERPs) were recorded to the speech. Gesture influenced the ERPs to the speech. Specifically, there was a right-lateralized N400 effect - reflecting semantic integration - when gestures mismatched versus matched the speech. In addition, early sensory components in bilateral occipital and frontal sites differentiated speech accompanied by matching versus non-matching gestures. These results suggest that hand gestures may be integrated with speech at early and late stages of language processing. {\textcopyright} 2003 Elsevier Inc. All rights reserved.},
author = {Kelly, Spencer D. and Kravitz, Corinne and Hopkins, Michael},
doi = {10.1016/S0093-934X(03)00335-3},
file = {:home/josiah/Documents/Mendeley Desktop/Kelly, Kravitz, Hopkins{\_}2004{\_}Neural correlates of bimodal speech and gesture comprehension(2).pdf:pdf},
isbn = {0093-934X (Print)$\backslash$r0093-934X (Linking)},
issn = {0093934X},
journal = {Brain and Language},
number = {1},
pages = {253--260},
pmid = {15010257},
title = {{Neural correlates of bimodal speech and gesture comprehension}},
volume = {89},
year = {2004}
}
@article{Habets2011,
abstract = {During face-to-face communication, one does not only hear speech but also see a speaker's communicative hand movements. It has been shown that such hand gestures play an important role in communication where the two modalities influence each other's interpretation. A gesture typically temporally overlaps with coexpressive speech, but the gesture is often initiated before (but not after) the coexpressive speech. The present ERP study investigated what degree of asynchrony in the speech and gesture onsets are optimal for semantic integration of the concurrent gesture and speech. Videos of a person gesturing were combined with speech segments that were either semantically congruent or incongruent with the gesture. Although gesture and speech always overlapped in time, gesture and speech were presented with three different degrees of asynchrony. In the SOA 0 condition, the gesture onset and the speech onset were simultaneous. In the SOA 160 and 360 conditions, speech was delayed by 160 and 360 msec, respectively. ERPs time locked to speech onset showed a significant difference between semantically congruent versus incongruent gesture-speech combinations on the N400 for the SOA 0 and 160 conditions. No significant difference was found for the SOA 360 condition. These results imply that speech and gesture are integrated most efficiently when the differences in onsets do not exceed a certain time span because of the fact that iconic gestures need speech to be disambiguated in a way relevant to the speech context.},
author = {Habets, Boukje and Kita, Sotaro and Shao, Zeshu and Ozy{\"{u}}rek, Asli and Hagoort, Peter},
doi = {10.1162/jocn.2010.21462},
file = {:home/josiah/Documents/Mendeley Desktop/Habets et al.{\_}2011{\_}The role of synchrony and ambiguity in speech-gesture integration during comprehension(2).pdf:pdf},
isbn = {0898-929X},
issn = {1530-8898},
journal = {Journal of cognitive neuroscience},
keywords = {Adolescent,Analysis of Variance,Brain Mapping,Comprehension,Comprehension: physiology,Electroencephalography,Electroencephalography Phase Synchronization,Electroencephalography Phase Synchronization: phys,Electroencephalography: methods,Female,Gestures,Humans,Male,Photic Stimulation,Reaction Time,Speech,Speech: physiology,Young Adult},
number = {8},
pages = {1845--1854},
pmid = {20201632},
title = {{The role of synchrony and ambiguity in speech-gesture integration during comprehension.}},
url = {http://www.mitpressjournals.org.proxy3.library.mcgill.ca/doi/abs/10.1162/jocn.2010.21462{\#}.VyQTr4QrK00},
volume = {23},
year = {2011}
}
@article{Kelly2001,
abstract = {Recently, much research has explored the role that nonverbal pointing behaviours play in children's early acquisition of language, for example during word learning. However, few researchers have considered the possibility that these behaviours may continue to play a role in language comprehension as children develop more sophisticated language skills. The present study investigates the role that eye gaze and pointing gestures play in three- to five-year-olds understanding of complex pragmatic communication. Experiment 1 demonstrates that children (N = 29) better understand videotapes of a mother making indirect requests to a child when the requests are accompanied by nonverbal pointing behaviours. Experiment 2 uses a different methodology in which children (N = 27) are actual participants rather than observers in order to generalize the findings to naturalistic, face-to-face interactions. The results from both experiments suggest that broader units of analysis beyond the verbal message may be needed in studying children's continuing understanding of pragmatic processes.},
author = {Kelly, Spencer D.},
doi = {10.1017/S0305000901004664},
file = {:home/josiah/Documents/Mendeley Desktop/Kelly{\_}2001{\_}Broadening the units of analysis in communication Speech and nonverbal behaviours in pragmatic comprehension.pdf:pdf},
isbn = {0305-0009 (Print)$\backslash$r0305-0009 (Linking)},
issn = {03050009},
journal = {Journal of Child Language},
number = {2},
pages = {325--349},
pmid = {11449942},
title = {{Broadening the units of analysis in communication: Speech and nonverbal behaviours in pragmatic comprehension}},
volume = {28},
year = {2001}
}