Publications
Search
Karkada, Deepthi; Manuvinakurike, Ramesh; Paetzel-Prüsmann, Maike; Georgila, Kallirroi
Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task Proceedings Article
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5768–5777, European Language Resources Association, Marseille, France, 2022.
@inproceedings{karkada_strategy-level_2022,
title = {Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Maike Paetzel-Prüsmann and Kallirroi Georgila},
url = {https://aclanthology.org/2022.lrec-1.620},
year = {2022},
date = {2022-06-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5768–5777},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this work, we study entrainment of users playing a creative reference resolution game with an autonomous dialogue system. The language understanding module in our dialogue system leverages annotated human-wizard conversational data, openly available knowledge graphs, and crowd-augmented data. Unlike previous entrainment work, our dialogue system does not attempt to make the human conversation partner adopt lexical items in their dialogue, but rather to adapt their descriptive strategy to one that is simpler to parse for our natural language understanding unit. By deploying this dialogue system through a crowd-sourced study, we show that users indeed entrain on a “strategy-level” without the change of strategy impinging on their creativity. Our work thus presents a promising future research direction for developing dialogue management systems that can strategically influence people's descriptive strategy to ease the system's language understanding in creative tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Probabilistic Models of Annotation Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 105–145, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
@incollection{paun_probabilistic_2022-1,
title = {Probabilistic Models of Annotation},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_5},
doi = {10.1007/978-3-031-03763-4_5},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {105–145},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Using Agreement Measures for CL Annotation Tasks Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 47–78, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
@incollection{paun_using_2022,
title = {Using Agreement Measures for CL Annotation Tasks},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_3},
doi = {10.1007/978-3-031-03763-4_3},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {47–78},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
abstract = {We will now review the use of intercoder agreement measures in CL since Carletta’s original paper in the light of the discussion in the previous sections. We begin with a summary of Krippendorff’s recommendations about measuring reliability (Krippendorff, 2004a, Chapter 11), then discuss how coefficients of agreement have been used in CL to measure the reliability of annotation, focusing in particular on the types of annotation where there has been some debate concerning the most appropriate measures of agreement.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Probabilistic Models of Agreement Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 79–101, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
@incollection{paun_probabilistic_2022,
title = {Probabilistic Models of Agreement},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_4},
doi = {10.1007/978-3-031-03763-4_4},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {79–101},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Learning from Multi-Annotated Corpora Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 147–165, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
@incollection{paun_learning_2022,
title = {Learning from Multi-Annotated Corpora},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_6},
doi = {10.1007/978-3-031-03763-4_6},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {147–165},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hoegen, Jessie; DeVault, David; Gratch, Jonathan
Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2022, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
@article{hoegen_exploring_2022,
title = {Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus},
author = {Jessie Hoegen and David DeVault and Jonathan Gratch},
doi = {10.1109/TAFFC.2022.3223030},
issn = {1949-3045},
year = {2022},
date = {2022-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {For affective computing to have an impact outside the laboratory, facial expressions must be studied in rich naturalistic situations. We argue negotiations are one such situation as they are ubiquitous in daily life, often evoke strong emotions, and perceived emotion shapes decisions and outcomes. Negotiations are a growing focus in AI research and applications, including agents that negotiate directly with people and attempt to use affective information. We introduce the DyNego-WOZ Corpus, which includes dyadic negotiation between participants and wizard-controlled virtual humans. We demonstrate the value of this corpus to the affective computing community by examining participants' facial expressions in response to a virtual human negotiation partner. We show that people's facial expressions typically co-occur with the end of their partner's speech (suggesting they reflect a reaction to the content of this speech), that these reactions do not correspond to prototypical emotional expressions, and that these reactions can help predict the expresser's subsequent action. We highlight challenges in working with such naturalistic data, including difficulties of expression recognition during speech, and the extreme variability of expressions, both across participants and within a negotiation. Our findings reinforce arguments that facial expressions convey more than emotional state but serve important communicative functions.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Statistical Methods for Annotation Analysis Book
Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03753-5 978-3-031-03763-4.
@book{paun_statistical_2022,
title = {Statistical Methods for Annotation Analysis},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://link.springer.com/10.1007/978-3-031-03763-4},
doi = {10.1007/978-3-031-03763-4},
isbn = {978-3-031-03753-5 978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Marge, Matthew; Espy-Wilson, Carol; Ward, Nigel G.; Alwan, Abeer; Artzi, Yoav; Bansal, Mohit; Blankenship, Gil; Chai, Joyce; Daumé, Hal; Dey, Debadeepta; Harper, Mary; Howard, Thomas; Kennington, Casey; Kruijff-Korbayová, Ivana; Manocha, Dinesh; Matuszek, Cynthia; Mead, Ross; Mooney, Raymond; Moore, Roger K.; Ostendorf, Mari; Pon-Barry, Heather; Rudnicky, Alexander I.; Scheutz, Matthias; Amant, Robert St.; Sun, Tong; Tellex, Stefanie; Traum, David; Yu, Zhou
Spoken language interaction with robots: Recommendations for future research Journal Article
In: Computer Speech & Language, vol. 71, pp. 101255, 2022, ISSN: 08852308.
@article{marge_spoken_2022,
title = {Spoken language interaction with robots: Recommendations for future research},
author = {Matthew Marge and Carol Espy-Wilson and Nigel G. Ward and Abeer Alwan and Yoav Artzi and Mohit Bansal and Gil Blankenship and Joyce Chai and Hal Daumé and Debadeepta Dey and Mary Harper and Thomas Howard and Casey Kennington and Ivana Kruijff-Korbayová and Dinesh Manocha and Cynthia Matuszek and Ross Mead and Raymond Mooney and Roger K. Moore and Mari Ostendorf and Heather Pon-Barry and Alexander I. Rudnicky and Matthias Scheutz and Robert St. Amant and Tong Sun and Stefanie Tellex and David Traum and Zhou Yu},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0885230821000620},
doi = {10.1016/j.csl.2021.101255},
issn = {08852308},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-23},
journal = {Computer Speech & Language},
volume = {71},
pages = {101255},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hernandez, Stephanie; Artstein, Ron
Annotating low-confidence questions improves classifier performance Journal Article
In: Proceedings of the 25th Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, 2021.
@article{hernandez_annotating_2021,
title = {Annotating low-confidence questions improves classifier performance},
author = {Stephanie Hernandez and Ron Artstein},
url = {https://par.nsf.gov/biblio/10313591-annotating-low-confidence-questions-improves-classifier-performance},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
journal = {Proceedings of the 25th Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
abstract = {This paper compares methods to select data for annotation in order to improve a classifier used in a question-answering dialogue system. With a classifier trained on 1,500 questions, adding 300 training questions on which the classifier is least confident results in consistently improved performance, whereas adding 300 arbitrarily selected training questions does not yield consistent improvement, and sometimes even degrades performance. The paper uses a new method for comparative evaluation of classifiers for dialogue, which scores each classifier based on the number of appropriate responses retrieved.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90–97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale
Rapport Between Humans and Socially Interactive Agents Book Section
In: Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.): The Handbook on Socially Interactive Agents, pp. 433–462, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
@incollection{gratch_rapport_2021,
title = {Rapport Between Humans and Socially Interactive Agents},
author = {Jonathan Gratch and Gale Lucas},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/10.1145/3477322.3477335},
doi = {10.1145/3477322.3477335},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {The Handbook on Socially Interactive Agents},
pages = {433–462},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Bonial, Claire; Abrams, Mitchell; Baker, Anthony L.; Hudson, Taylor; Lukin, Stephanie; Traum, David; Voss, Clare
Context is key: Annotating situated dialogue relations in multi-floor dialogue Proceedings Article
In: 2021.
@inproceedings{bonial_context_2021,
title = {Context is key: Annotating situated dialogue relations in multi-floor dialogue},
author = {Claire Bonial and Mitchell Abrams and Anthony L. Baker and Taylor Hudson and Stephanie Lukin and David Traum and Clare Voss},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-3006/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Traum, David
Identity models for role-play dialogue characters Proceedings Article
In: 2021.
@inproceedings{chaffey_identity_2021,
title = {Identity models for role-play dialogue characters},
author = {Patricia Chaffey and David Traum},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-4022/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.)
1, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
@book{lugrin_handbook_2021,
title = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 1: Methods, Behavior, Cognition},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/book/10.1145/3477322},
doi = {10.1145/3477322},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Bonial, Claire; Abrams, Mitchell; Traum, David; Voss, Clare
Builder, we have done it: Evaluating & Extending Dialogue-AMR NLU Pipeline for Two Collaborative Domains Proceedings Article
In: Proceedings of the 14th International Conference on Computational Semantics (IWCS), pp. 173–183, Association for Computational Linguistics, Groningen, The Netherlands (online), 2021.
@inproceedings{bonial_builder_2021,
title = {Builder, we have done it: Evaluating & Extending Dialogue-AMR NLU Pipeline for Two Collaborative Domains},
author = {Claire Bonial and Mitchell Abrams and David Traum and Clare Voss},
url = {https://aclanthology.org/2021.iwcs-1.17},
year = {2021},
date = {2021-06-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 14th International Conference on Computational Semantics (IWCS)},
pages = {173–183},
publisher = {Association for Computational Linguistics},
address = {Groningen, The Netherlands (online)},
abstract = {We adopt, evaluate, and improve upon a two-step natural language understanding (NLU) pipeline that incrementally tames the variation of unconstrained natural language input and maps to executable robot behaviors. The pipeline first leverages Abstract Meaning Representation (AMR) parsing to capture the propositional content of the utterance, and second converts this into “Dialogue-AMR,” which augments standard AMR with information on tense, aspect, and speech acts. Several alternative approaches and training datasets are evaluated for both steps and corresponding components of the pipeline, some of which outperform the original. We extend the Dialogue-AMR annotation schema to cover a different collaborative instruction domain and evaluate on both domains. With very little training data, we achieve promising performance in the new domain, demonstrating the scalability of this approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 2021.
@article{gervits_classication-based_2021,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
year = {2021},
date = {2021-03-01},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Proceedings Article
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21–29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Book Section
In: Marchi, Erik; Siniscalchi, Sabato Marco; Cumani, Sandro; Salerno, Valerio Mario; Li, Haizhou (Ed.): Increasing Naturalness and Flexibility in Spoken Dialogue Interaction: 10th International Workshop on Spoken Dialogue Systems, pp. 115–127, Springer, Singapore, 2021, ISBN: 978-981-15-9323-9.
@incollection{gervits_classification-based_2021,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
editor = {Erik Marchi and Sabato Marco Siniscalchi and Sandro Cumani and Valerio Mario Salerno and Haizhou Li},
url = {https://doi.org/10.1007/978-981-15-9323-9_10},
doi = {10.1007/978-981-15-9323-9_10},
isbn = {978-981-15-9323-9},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Increasing Naturalness and Flexibility in Spoken Dialogue Interaction: 10th International Workshop on Spoken Dialogue Systems},
pages = {115–127},
publisher = {Springer},
address = {Singapore},
series = {Lecture Notes in Electrical Engineering},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multi-floor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Book Section
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41–50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 978-981-15-8394-0 978-981-15-8395-7.
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {978-981-15-8394-0 978-981-15-8395-7},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145–160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Filter
2019
Tavabi, Leili; Stefanov, Kalin; Gilani, Setareh Nasihati; Traum, David; Soleymani, Mohammad
Multimodal Learning for Identifying Opportunities for Empathetic Responses Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction, pp. 95–104, ACM, Suzhou China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{tavabi_multimodal_2019,
title = {Multimodal Learning for Identifying Opportunities for Empathetic Responses},
author = {Leili Tavabi and Kalin Stefanov and Setareh Nasihati Gilani and David Traum and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3340555.3353750},
doi = {10.1145/3340555.3353750},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction},
pages = {95–104},
publisher = {ACM},
address = {Suzhou China},
abstract = {Embodied interactive agents possessing emotional intelligence and empathy can create natural and engaging social interactions. Providing appropriate responses by interactive virtual agents requires the ability to perceive users’ emotional states. In this paper, we study and analyze behavioral cues that indicate an opportunity to provide an empathetic response. Emotional tone in language in addition to facial expressions are strong indicators of dramatic sentiment in conversation that warrant an empathetic response. To automatically recognize such instances, we develop a multimodal deep neural network for identifying opportunities when the agent should express positive or negative empathetic responses. We train and evaluate our model using audio, video and language from human-agent interactions in a wizard-of-Oz setting, using the wizard’s empathetic responses and annotations collected on Amazon Mechanical Turk as ground-truth labels. Our model outperforms a textbased baseline achieving F1-score of 0.71 on a three-class classification. We further investigate the results and evaluate the capability of such a model to be deployed for real-world human-agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Yanov, Volodymyr; Traum, David; Georgila, Kallirroi
A Wizard of Oz Data Collection Framework for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, pp. 3, SEMDIAL, London, UK, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{gordon_wizard_2019,
title = {A Wizard of Oz Data Collection Framework for Internet of Things Dialogues},
author = {Carla Gordon and Volodymyr Yanov and David Traum and Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z19/Z19-4024/},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
pages = {3},
publisher = {SEMDIAL},
address = {London, UK},
abstract = {We describe a novel Wizard of Oz dialogue data collection framework in the Internet of Things domain. Our tool is designed for collecting dialogues between a human user, and 8 different system profiles, each with a different communication strategy. We then describe the data collection conducted with this tool, as well as the dialogue corpus that was generated.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Proceedings Article
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199–210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems, pp. 161–167, Springer, Cham, Switzerland, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{lycan_direct_2019,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {https://doi.org/10.1007/978-3-319-92108-2_17},
doi = {10.1007/978-3-319-92108-2_17},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems},
volume = {510},
pages = {161–167},
publisher = {Springer},
address = {Cham, Switzerland},
series = {Lecture Notes in Electrical Engineering},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-Lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-Ann
Can a Signing Virtual Human Engage a Baby's Attention? Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 162–169, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags:
@inproceedings{nasihati_gilani_can_2019,
title = {Can a Signing Virtual Human Engage a Baby's Attention?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-Lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329463},
doi = {10.1145/3308532.3329463},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {162–169},
publisher = {ACM Press},
address = {Paris, France},
abstract = {The child developmental period of ages 6-12 months marks a widely understood “critical period” for healthy language learning, during which, failure to receive exposure to language can place babies at risk for language and reading problems spanning life. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period. Technology has been used to augment linguistic input (e.g., auditory devices; language videotapes) but research finds limitations in learning. We evaluated an AI system that uses an Avatar (provides language and socially contingent interactions) and a robot (aids attention to the Avatar) to facilitate infants’ ability to learn aspects of American Sign Language (ASL), and asked three questions: (1) Can babies with little/no exposure to ASL distinguish among the Avatar’s different conversational modes (Linguistic Nursery Rhymes; Social Gestures; Idle/nonlinguistic postures; 3rd person observer)? (2) Can an Avatar stimulate babies’ production of socially contingent responses, and crucially, nascent language responses? (3) What is the impact of parents’ presence/absence of conversational participation? Surprisingly, babies (i) spontaneously distinguished among Avatar conversational modes, (ii) produced varied socially contingent responses to Avatar’s modes, and (iii) parents influenced an increase in babies’ response tokens to some Avatar modes, but the overall categories and pattern of babies’ behavioral responses remained proportionately similar irrespective of parental participation. Of note, babies produced the greatest percentage of linguistic responses to the Avatar’s Linguistic Nursery Rhymes versus other Avatar conversational modes. This work demonstrates the potential for Avatars to facilitate language learning in young babies.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Leuski, Anton; Marsella, Stacy
UBeBot: voice-driven, personalized, avatar-based communicative video content in A/R Proceedings Article
In: ACM SIGGRAPH 2019 Appy Hour, pp. 1–2, ACM, Los Angeles California, 2019, ISBN: 978-1-4503-6306-8.
@inproceedings{shapiro_ubebot_2019,
title = {UBeBot: voice-driven, personalized, avatar-based communicative video content in A/R},
author = {Ari Shapiro and Anton Leuski and Stacy Marsella},
url = {https://dl.acm.org/doi/10.1145/3305365.3329734},
doi = {10.1145/3305365.3329734},
isbn = {978-1-4503-6306-8},
year = {2019},
date = {2019-07-01},
urldate = {2024-11-01},
booktitle = {ACM SIGGRAPH 2019 Appy Hour},
pages = {1–2},
publisher = {ACM},
address = {Los Angeles California},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Ultes, Stefan; Rojas-Barahona, Lina; Pincus, Eli; Traum, David; Eskenazi, Maxine
An Assessment Framework for DialPort Book Section
In: Advanced Social Interaction with Agents, vol. 510, pp. 79–85, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92107-5 978-3-319-92108-2.
Abstract | Links | BibTeX | Tags:
@incollection{lee_assessment_2019,
title = {An Assessment Framework for DialPort},
author = {Kyusong Lee and Tiancheng Zhao and Stefan Ultes and Lina Rojas-Barahona and Eli Pincus and David Traum and Maxine Eskenazi},
url = {http://link.springer.com/10.1007/978-3-319-92108-2_10},
doi = {10.1007/978-3-319-92108-2_10},
isbn = {978-3-319-92107-5 978-3-319-92108-2},
year = {2019},
date = {2019-06-01},
urldate = {2019-10-28},
booktitle = {Advanced Social Interaction with Agents},
volume = {510},
pages = {79–85},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Collecting a large amount of real human-computer interaction data in various domains is a cornerstone in the development of better data-driven spoken dialog systems. The DialPort project is creating a portal to collect a constant stream of real user conversational data on a variety of topics. In order to keep real users attracted to DialPort, it is crucial to develop a robust evaluation framework to monitor and maintain high performance. Different from earlier spoken dialog systems, DialPort has a heterogeneous set of spoken dialog systems gathered under one outward-looking agent. In order to access this new structure, we have identified some unique challenges that DialPort will encounter so that it can appeal to real users and have created a novel evaluation scheme that quantitatively assesses their performance in these situations. We look at assessment from the point of view of the system developer as well as that of the end user.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sohail, Usman; Traum, David
A Blissymbolics Translation System Proceedings Article
In: Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies, pp. 32–36, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{sohail_blissymbolics_2019,
title = {A Blissymbolics Translation System},
author = {Usman Sohail and David Traum},
url = {http://aclweb.org/anthology/W19-1705},
doi = {10.18653/v1/W19-1705},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Blissymbolics (Bliss) is a pictographic writing system that is used by people with communication disorders. Bliss attempts to create a writing system that makes words easier to distinguish by using pictographic symbols that encapsulate meaning rather than sound, as the English alphabet does for example. Users of Bliss rely on human interpreters to use Bliss. We created a translation system from Bliss to natural English with the hopes of decreasing the reliance on human interpreters by the Bliss community. We first discuss the basic rules of Blissymbolics. Then we point out some of the challenges associated with developing computer assisted tools for Blissymbolics. Next we talk about our ongoing work in developing a translation system, including current limitations, and future work. We conclude with a set of examples showing the current capabilities of our translation system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-ann
Can a Virtual Human Facilitate Language Learning in a Young Baby? Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags:
@inproceedings{gilani_can_2019,
title = {Can a Virtual Human Facilitate Language Learning in a Young Baby?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-ann Petitto},
url = {https://dl.acm.org/citation.cfm?id=3332035},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {There is a significant paucity of work on language learning systems for young infants [2, 5, 19] despite the widely understood critical importance that this developmental period has for healthy language and cognitive growth, and related reading and academic success [6, 14]. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period [18]. This causes potentially devastating impact on children's linguistic, cognitive, and social skills [9, 10, 15, 16, 20]. We introduced an AI system, called RAVE (Robot, AVatar, thermal Enhanced language learning tool), designed specifically for babies within the age range of 6-12 months [8, 17]. RAVE consists of two agents: a virtual human (provides language and socially contingent interactions) and an embodied robot (provides socially engaging physical cues to babies and directs babies' attention to the virtual human). Detailed description of the system's constituent components and dialogue algorithms are presented in [17] and [8].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Core, Mark G; Nye, Benjamin D; Karumbaiah, Shamya; Auerbach, Daniel; Ram, Maya
Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 9, IFAAMAS, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgila_using_2019,
title = {Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training},
author = {Kallirroi Georgila and Mark G Core and Benjamin D Nye and Shamya Karumbaiah and Daniel Auerbach and Maya Ram},
url = {http://www.ifaamas.org/Proceedings/aamas2019/pdfs/p737.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {9},
publisher = {IFAAMAS},
address = {Montreal, Canada},
abstract = {Reinforcement Learning (RL) has been applied successfully to Intelligent Tutoring Systems (ITSs) in a limited set of well-defined domains such as mathematics and physics. This work is unique in using a large state space and for applying RL to tutoring interpersonal skills. Interpersonal skills are increasingly recognized as critical to both social and economic development. In particular, this work enhances an ITS designed to teach basic counseling skills that can be applied to challenging issues such as sexual harassment and workplace conflict. An initial data collection was used to train RL policies for the ITS, and an evaluation with human participants compared a hand-crafted ITS which had been used for years with students (control) versus the new ITS guided by RL policies. The RL condition differed from the control condition most notably in the strikingly large quantity of guidance it provided to learners. Both systems were effective and there was an overall significant increase from pre- to post-test scores. Although learning gains did not differ significantly between conditions, learners had a significantly higher self-rating of confidence in the RL condition. Confidence and learning gains were both part of the reward function used to train the RL policies, and it could be the case that there was the most room for improvement in confidence, an important learner emotion. Thus, RL was successful in improving an ITS for teaching interpersonal skills without the need to prune the state space (as previously done).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Woo, Simon S.; Artstein, Ron; Kaiser, Elsi; Le, Xiao; Mirkovic, Jelena
Using Episodic Memory for User Authentication Journal Article
In: ACM Transactions on Privacy and Security, vol. 22, no. 2, pp. Article 11, 2019.
Abstract | Links | BibTeX | Tags:
@article{woo_using_2019,
title = {Using Episodic Memory for User Authentication},
author = {Simon S. Woo and Ron Artstein and Elsi Kaiser and Xiao Le and Jelena Mirkovic},
url = {https://doi.org/10.1145/3308992},
doi = {10.1145/3308992},
year = {2019},
date = {2019-04-01},
journal = {ACM Transactions on Privacy and Security},
volume = {22},
number = {2},
pages = {Article 11},
abstract = {Passwords are widely used for user authentication, but they are often difficult for a user to recall, easily cracked by automated programs, and heavily reused. Security questions are also used for secondary authentication. They are more memorable than passwords, because the question serves as a hint to the user, but they are very easily guessed. We propose a new authentication mechanism, called “life-experience passwords (LEPs).” Sitting somewhere between passwords and security questions, an LEP consists of several facts about a user-chosen life event—such as a trip, a graduation, a wedding, and so on. At LEP creation, the system extracts these facts from the user’s input and transforms them into questions and answers. At authentication, the system prompts the user with questions and matches the answers with the stored ones. We show that question choice and design make LEPs much more secure than security questions and passwords, while the question-answer format promotes low password reuse and high recall. Specifically, we find that: (1) LEPs are 109–1014 × stronger than an ideal, randomized, eight-character password; (2) LEPs are up to 3 × more memorable than passwords and on par with security questions; and (3) LEPs are reused half as often as passwords. While both LEPs and security questions use personal experiences for authentication, LEPs use several questions that are closely tailored to each user. This increases LEP security against guessing attacks. In our evaluation, only 0.7% of LEPs were guessed by casual friends, and 9.5% by family members or close friends—roughly half of the security question guessing rate. On the downside, LEPs take around 5 × longer to input than passwords. So, these qualities make LEPs suitable for multi-factor authentication at high-value servers, such as financial or sensitive work servers, where stronger authentication strength is needed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Gordon, Carla; Sohail, Usman; Merchant, Chirag; Jones, Andrew; Campbell, Julia; Trimmer, Matthew; Bevington, Jeffrey; Engen, COL Christopher; Traum, David
Digital Survivor of Sexual Assault Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 417–425, ACM, Marina del Rey, California, 2019, ISBN: 978-1-4503-6272-6.
Abstract | Links | BibTeX | Tags:
@inproceedings{artstein_digital_2019,
title = {Digital Survivor of Sexual Assault},
author = {Ron Artstein and Carla Gordon and Usman Sohail and Chirag Merchant and Andrew Jones and Julia Campbell and Matthew Trimmer and Jeffrey Bevington and COL Christopher Engen and David Traum},
url = {https://doi.org/10.1145/3301275.3302303},
doi = {10.1145/3301275.3302303},
isbn = {978-1-4503-6272-6},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {417–425},
publisher = {ACM},
address = {Marina del Rey, California},
abstract = {The Digital Survivor of Sexual Assault (DS2A) is an interface that allows a user to have a conversational experience with a survivor of sexual assault, using Artificial Intelligence technology and recorded videos. The application uses a statistical classifier to retrieve contextually appropriate pre-recorded video utterances by the survivor, together with dialogue management policies which enable users to conduct simulated conversations with the survivor about the sexual assault, its aftermath, and other pertinent topics. The content in the application has been specifically elicited to support the needs for the training of U.S. Army professionals in the Sexual Harassment/Assault Response and Prevention (SHARP) Program, and the application comes with an instructional support package. The system has been tested with approximately 200 users, and is presently being used in the SHARP Academy's capstone course.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Leuski, Anton; Benn, Grace; Klassen, Eric; Fast, Edward; Liewer, Matt; Hartholt, Arno; Traum, David
PRIMER: An Emotionally Aware Virtual Agent Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 10, ACM, Los Angeles, CA, 2019.
Abstract | Links | BibTeX | Tags:
@inproceedings{gordon_primer_2019,
title = {PRIMER: An Emotionally Aware Virtual Agent},
author = {Carla Gordon and Anton Leuski and Grace Benn and Eric Klassen and Edward Fast and Matt Liewer and Arno Hartholt and David Traum},
url = {https://www.research.ibm.com/haifa/Workshops/user2agent2019/},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {10},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {PRIMER is a proof-of-concept system designed to show the potential of immersive dialogue agents and virtual environments that adapt and respond to both direct verbal input and indirect emotional input. The system has two novel interfaces: (1) for the user, an immersive VR environment and an animated virtual agent both of which adapt and react to the user’s direct input as well as the user’s perceived emotional state, and (2) for an observer, an interface that helps track the perceived emotional state of the user, with visualizations to provide insight into the system’s decision making process. While the basic system architecture can be adapted for many potential real world applications, the initial version of this system was designed to assist clinical social workers in helping children cope with bullying. The virtual agent produces verbal and non-verbal behaviors guided by a plan for the counseling session, based on in-depth discussions with experienced counselors, but is also reactive to both initiatives that the user takes, e.g. asking their own questions, and the user’s perceived emotional state.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Lucas, Gale M; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jonathan; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Culture, Errors, and Rapport-building Dialogue in Social Agents Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 51–58, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{lucas_culture_2018,
title = {Culture, Errors, and Rapport-building Dialogue in Social Agents},
author = {Gale M Lucas and Jill Boberg and David Traum and Ron Artstein and Jonathan Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {https://dl.acm.org/citation.cfm?id=3267887},
doi = {10.1145/3267851.3267887},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {51–58},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {This work explores whether culture impacts the extent to which social dialogue can mitigate (or exacerbate) the loss of trust caused when agents make conversational errors. Our study uses an agent designed to persuade users to agree with its rankings on two tasks. Participants from the U.S. and Japan completed our study. We perform two manipulations: (1) The presence of conversational errors – the agent exhibited errors in the second task or not; (2) The presence of social dialogue – between the two tasks, users either engaged in a social dialogue with the agent or completed a control task. Replicating previous research, conversational errors reduce the agent’s influence. However, we found that culture matters: there was a marginally significant three-way interaction with culture, presence of social dialogue, and presence of errors. The pattern of results suggests that, for American participants, social dialogue backfired if it is followed by errors, presumably because it extends the period of good performance, creating a stronger contrast effect with the subsequent errors. However, for Japanese participants, social dialogue if anything mitigates the detrimental effect of errors; the negative effect of errors is only seen in the absence of a social dialogue. Agent design should therefore take the culture of the intended users into consideration when considering use of social dialogue to bolster agents against conversational errors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Georgila, Kallirroi; Choi, Hyungtak; Boberg, Jill; Traum, David
Evaluating Subjective Feedback for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 22nd Workshop on the Semantics and Pragmatics of Dialogue, pp. 64–72, Aix-en-Provence, France, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{gordon_evaluating_2018,
title = {Evaluating Subjective Feedback for Internet of Things Dialogues},
author = {Carla Gordon and Kallirroi Georgila and Hyungtak Choi and Jill Boberg and David Traum},
url = {https://amubox.univ-amu.fr/s/6YcAg3TpLpfzGEn#pdfviewer},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 22nd Workshop on the Semantics and Pragmatics of Dialogue},
pages = {64–72},
address = {Aix-en-Provence, France},
abstract = {This paper discusses the process of determining which subjective features are seen as ideal in a dialogue system, and linking these features to objectively quantifiable behaviors. A corpus of simulated system-user dialogues in the Internet of Things domain was manually annotated with a set of system communicative and action responses, and crowd-sourced ratings and qualitative feedback of these dialogues were collected. This corpus of subjective feedback was analyzed, revealing that raters described top ranked dialogues as Intelligent, Natural, Pleasant, and as having Personality. Additionally, certain communicative and action responses were statistically more likely to be present in dialogues described as having these features. There was also found to be a lack of agreement among raters as to whether a direct communication style, or a conversational one was preferred, suggesting that future research and development should consider creating models for different communication styles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Boberg, Jill; Artstein, Ron; Gratch, Jonathan
Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 125–132, ACM, Sydney, Australia, 2018, ISBN: ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{mell_towards_2018,
title = {Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jill Boberg and Ron Artstein and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267910},
doi = {10.1145/3267851.3267910},
isbn = {ISBN: 978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {125–132},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {We present the results of a study in which humans negotiate with computerized agents employing varied tactics over a repeated number of economic ultimatum games. We report that certain agents are highly effective against particular classes of humans: several individual difference measures for the human participant are shown to be critical in determining which agents will be successful. Asking for favors works when playing with pro-social people but backfires with more selfish individuals. Further, making poor offers invites punishment from Machiavellian individuals. These factors may be learned once and applied over repeated negotiations, which means user modeling techniques that can detect these differences accurately will be more successful than those that don’t. Our work additionally shows that a significant benefit of cooperation is also present in repeated games—after sufficient interaction. These results have deep significance to agent designers who wish to design agents that are effective in negotiating with a broad swath of real human opponents. Furthermore, it demonstrates the effectiveness of techniques which can reason about negotiation over time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Merla, Arcangelo; Hee, Eugenia; Walker, Zoey; Manini, Barbara; Gallagher, Grady; Petitto, Laura-Ann
Multimodal Dialogue Management for Multiparty Interaction with Infants Proceedings Article
In: Proceedings of the 2018 on International Conference on Multimodal Interaction - ICMI '18, pp. 5–13, ACM Press, Boulder, CO, USA, 2018, ISBN: 978-1-4503-5692-3.
Abstract | Links | BibTeX | Tags:
@inproceedings{nasihati_gilani_multimodal_2018,
title = {Multimodal Dialogue Management for Multiparty Interaction with Infants},
author = {Setareh Nasihati Gilani and David Traum and Arcangelo Merla and Eugenia Hee and Zoey Walker and Barbara Manini and Grady Gallagher and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3242969.3243029},
doi = {10.1145/3242969.3243029},
isbn = {978-1-4503-5692-3},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the 2018 on International Conference on Multimodal Interaction - ICMI '18},
pages = {5–13},
publisher = {ACM Press},
address = {Boulder, CO, USA},
abstract = {We present dialogue management routines for a system to engage in multiparty agent-infant interaction. The ultimate purpose of this research is to help infants learn a visual sign language by engaging them in naturalistic and socially contingent conversations during an early-life critical period for language development (ages 6 to 12 months) as initiated by an artificial agent. As a first step, we focus on creating and maintaining agent-infant engagement that elicits appropriate and socially contingent responses from the baby. Our system includes two agents, a physical robot and an animated virtual human. The system's multimodal perception includes an eye-tracker (measures attention) and a thermal infrared imaging camera (measures patterns of emotional arousal). A dialogue policy is presented that selects individual actions and planned multiparty sequences based on perceptual inputs about the baby's internal changing states of emotional engagement. The present version of the system was evaluated in interaction with 8 babies. All babies demonstrated spontaneous and sustained engagement with the agents for several minutes, with patterns of conversationally relevant and socially contingent behaviors. We further performed a detailed case-study analysis with annotation of all agent and baby behaviors. Results show that the baby's behaviors were generally relevant to agent conversations and contained direct evidence for socially contingent responses by the baby to specific linguistic samples produced by the avatar. This work demonstrates the potential for language learning from agents in very young babies and has especially broad implications regarding the use of artificial agents with babies who have minimal language exposure in early life.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Lukin, Stephanie M.; Hayes, Cory J.; Foots, Ashley; Artstein, Ron; Henry, Cassidy; Pollard, Kimberly A.; Gordon, Carla; Gervits, Felix; Leuski, Anton; Hill, Susan G.; Voss, Clare R.; Traum, David
Balancing Efficiency and Coverage in Human-Robot Dialogue Collection Proceedings Article
In: Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction, arXiv, Arlington, Virginia, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{marge_balancing_2018,
title = {Balancing Efficiency and Coverage in Human-Robot Dialogue Collection},
author = {Matthew Marge and Claire Bonial and Stephanie M. Lukin and Cory J. Hayes and Ashley Foots and Ron Artstein and Cassidy Henry and Kimberly A. Pollard and Carla Gordon and Felix Gervits and Anton Leuski and Susan G. Hill and Clare R. Voss and David Traum},
url = {https://arxiv.org/abs/1810.02017},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction},
publisher = {arXiv},
address = {Arlington, Virginia},
abstract = {We describe a multi-phased Wizard-of-Oz approach to collecting human-robot dialogue in a collaborative search and navigation task. The data is being used to train an initial automated robot dialogue system to support collaborative exploration tasks. In the first phase, a wizard freely typed robot utterances to human participants. For the second phase, this data was used to design a GUI that includes buttons for the most common communications, and templates for communications with varying parameters. Comparison of the data gathered in these phases show that the GUI enabled a faster pace of dialogue while still maintaining high coverage of suitable responses, enabling more efficient targeted data collection, and improvements in natural language understanding using GUI-collected data. As a promising first step towardsinteractivelearning,thisworkshowsthatourapproach enables the collection of useful training data for navigationbased HRI tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Artstein, Ron; Georgila, Kallirroi
DialEdit: Annotations for Spoken Conversational Image Editing Proceedings Article
In: Proceedings of the 14th Joint ACL - ISO Workshop on Interoperable Semantic Annotation, Association for Computational Linguistics, Santa Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{manuvinakurike_dialedit_2018,
title = {DialEdit: Annotations for Spoken Conversational Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Ron Artstein and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-4701/w18-4701},
year = {2018},
date = {2018-08-01},
booktitle = {Proceedings of the 14th Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {Association for Computational Linguistics},
address = {Santa Fe, New Mexico},
abstract = {We present a spoken dialogue corpus and annotation scheme for conversational image editing, where people edit an image interactively through spoken language instructions. Our corpus contains spoken conversations between two human participants: users requesting changes to images and experts performing these modifications in real time. Our annotation scheme consists of 26 dialogue act labels covering instructions, requests, and feedback, together with actions and entities for the content of the edit requests. The corpus supports research and development in areas such as incremental intent recognition, visual reference resolution, image-grounded dialogue modeling, dialogue state tracking, and user modeling.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Bharadwaj, Sumanth; Georgila, Kallirroi
A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change Proceedings Article
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Sante Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{manuvinakurike_dialogue_2018,
title = {A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change},
author = {Ramesh Manuvinakurike and Sumanth Bharadwaj and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03948},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Sante Fe, New Mexico},
abstract = {A dialogue annotation scheme for weight management chat using the trans-theoretical model of health behavior change},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Georgila, Kallirroi
Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario Proceedings Article
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Santa Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{karkada_towards_2018,
title = {Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03950},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Santa Fe, New Mexico},
abstract = {We introduce a dataset containing human-authored descriptions of target locations in an “end of-trip in a taxi ride” scenario. We describe our data collection method and a novel annotation scheme that supports understanding of such descriptions of target locations. Our dataset contains target location descriptions for both synthetic and real-world images as well as visual annotations (ground truth labels, dimensions of vehicles and objects, coordinates of the target location, distance and direction of the target location from vehicles and objects) that can be used in various visual and language tasks. We also perform a pilot experiment on how the corpus could be applied to visual reference resolution in this domain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Bui, Trung; Chang, Walter; Georgila, Kallirroi
Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task Proceedings Article
In: Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue, pp. 284–295, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{manuvinakurike_conversational_2018,
title = {Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task},
author = {Ramesh Manuvinakurike and Trung Bui and Walter Chang and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-5033/w18-5033},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue},
pages = {284–295},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {We present “conversational image editing”, a novel real-world application domain combining dialogue, visual information, and the use of computer vision. We discuss the importance of dialogue incrementality in this task, and build various models for incremental intent identification based on deep learning and traditional classification algorithms. We show how our model based on convolutional neural networks outperforms models based on random forests, long short term memory networks, and conditional random fields. By training embeddings based on image-related dialogue corpora, we outperform pre-trained out-of-the-box embeddings, for intention identification tasks. Our experiments also provide evidence that incremental intent processing may be more efficient for the user and could save time in accomplishing tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Muessig, Kathryn E.; Knudtson, Kelly A.; Soni, Karina; Larsen, Margo Adams; Traum, David; Dong, Willa; Conserve, Donaldson F.; Leuski, Anton; Artstein, Ron; Hightow-Weidman, Lisa B.
In: Digital Culture and Education, vol. 10, pp. 22–48, 2018, ISSN: 1836-8301.
Abstract | Links | BibTeX | Tags:
@article{muessig_i_2018,
title = {“I Didn't Tell You Sooner Because I Didn't Know How to Handle it Myself”: Developing a Virtual Reality Program to Support HIV-Status Disclosure Decisions},
author = {Kathryn E. Muessig and Kelly A. Knudtson and Karina Soni and Margo Adams Larsen and David Traum and Willa Dong and Donaldson F. Conserve and Anton Leuski and Ron Artstein and Lisa B. Hightow-Weidman},
url = {http://www.digitalcultureandeducation.com/s/Muessig-et-al-July-2018.pdf},
issn = {1836-8301},
year = {2018},
date = {2018-07-01},
journal = {Digital Culture and Education},
volume = {10},
pages = {22–48},
abstract = {HIV status disclosure is associated with increased social support and protective behaviors against HIV transmission. Yet disclosure poses significant challenges in the face of persistent societal stigma. Few interventions focus on decision-making, self-efficacy, and communication skills to support disclosing HIV status to an intimate partner. Virtual reality (VR) and artificial intelligence (AI) technologies offer powerful tools to address this gap. Informed by Social Cognitive Theory, we created the Tough Talks VR program for HIV-positive young men who have sex with men (YMSM) to practice status disclosure safely and confidentially. Fifty-eight YMSM (ages 18 – 30, 88% HIV-positive) contributed 132 disclosure dialogues to develop the prototype through focus groups, usability testing, and a technical pilot. The prototype includes three disclosure scenarios (neutral, sympathetic, and negative response) and a database of 125 virtual character utterances. Participants select a VR scenario and realistic virtual character with whom to practice. In a pilot test of the fully automated neutral response scenario, the AI system responded appropriately to 71% of participant utterances. Most pilot study participants agreed Tough Talks was easy to use (9/11) and that they would like to use the system frequently (9/11). Tough Talks demonstrates that VR can be used to practice HIV status disclosure and lessons learned from program development offer insights for the use of AI systems for other areas of health and education.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Marge, Matthew; Henry, Cassidy; Artstein, Ron; Traum, David; Voss, Clare R.
Consequences and Factors of Stylistic Differences in Human-Robot Dialogue Proceedings Article
In: Proceedings of the SIGDIAL 2018 Conference, pp. 110–118, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{lukin_consequences_2018,
title = {Consequences and Factors of Stylistic Differences in Human-Robot Dialogue},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Matthew Marge and Cassidy Henry and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/papers/W/W18/W18-5012/},
doi = {10.18653/v1/W18-5012},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the SIGDIAL 2018 Conference},
pages = {110–118},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {This paper identifies stylistic differences in instruction-giving observed in a corpus of human-robot dialogue. Differences in verbosity and structure (i.e., single-intent vs. multi-intent instructions) arose naturally without restrictions or prior guidance on how users should speak with the robot. Different styles were found to produce different rates of miscommunication, and correlations were found between style differences and individual user variation, trust, and interaction experience with the robot. Understanding potential consequences and factors that influence style can inform design of dialogue systems that are robust to natural variation from human users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Karumbaiah, Shamya; Tokel, S. Tugba; Core, Mark G.; Stratou, Giota; Auerbach, Daniel; Georgila, Kallirroi
Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System Proceedings Article
In: Proceeding of the International Conference on Artificial Intelligence in Education, pp. 352–366, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93842-4 978-3-319-93843-1.
Abstract | Links | BibTeX | Tags:
@inproceedings{nye_engaging_2018,
title = {Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System},
author = {Benjamin D. Nye and Shamya Karumbaiah and S. Tugba Tokel and Mark G. Core and Giota Stratou and Daniel Auerbach and Kallirroi Georgila},
url = {http://link.springer.com/10.1007/978-3-319-93843-1_26},
doi = {10.1007/978-3-319-93843-1_26},
isbn = {978-3-319-93842-4 978-3-319-93843-1},
year = {2018},
date = {2018-06-01},
booktitle = {Proceeding of the International Conference on Artificial Intelligence in Education},
volume = {10947},
pages = {352–366},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Facial expression trackers output measures for facial action units (AUs), and are increasingly being used in learning technologies. In this paper, we compile patterns of AUs seen in related work as well as use factor analysis to search for categories implicit in our corpus. Although there was some overlap between the factors in our data and previous work, we also identified factors seen in the broader literature but not previously reported in the context of learning environments. In a correlational analysis, we found evidence for relationships between factors and self-reported traits such as academic effort, study habits, and interest in the subject. In addition, we saw differences in average levels of factors between a video watching activity, and a decision making activity. However, in this analysis, we were not able to isolate any facial expressions having a significant positive or negative relationship with either learning gain, or performance once question difficulty and related factors were also considered. Given the overall low levels of facial affect in the corpus, further research will explore different populations and learning tasks to test the possible hypothesis that learners may have been in a pattern of “Over-Flow” in which they were engaged with the system, but not deeply thinking about the content or their errors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Lucas, Gale; Traum, David
The Niki and Julie Corpus: Collaborative Multimodal Dialogues between Humans, Robots, and Virtual Agents Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), European Language Resources Association (ELRA), Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
Abstract | Links | BibTeX | Tags:
@inproceedings{artstein_niki_2018,
title = {The Niki and Julie Corpus: Collaborative Multimodal Dialogues between Humans, Robots, and Virtual Agents},
author = {Ron Artstein and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Gale Lucas and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/482.pdf},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
publisher = {European Language Resources Association (ELRA)},
address = {Miyazaki, Japan},
abstract = {The Niki and Julie corpus contains more than 600 dialogues between human participants and a human-controlled robot or virtual agent, engaged in a series of collaborative item-ranking tasks designed to measure influence. Some of the dialogues contain deliberate conversational errors by the robot, designed to simulate the kinds of conversational breakdown that are typical of present-day automated agents. Data collected include audio and video recordings, the results of the ranking tasks, and questionnaire responses; some of the recordings have been transcribed and annotated for verbal and nonverbal feedback. The corpus has been used to study influence and grounding in dialogue. All the dialogues are in American English.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiao, Gang; Georgila, Kallirroi
A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue Proceedings Article
In: Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31), AAAI, Melbourne, FL, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{xiao_comparison_2018,
title = {A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue},
author = {Gang Xiao and Kallirroi Georgila},
url = {https://aaai.org/ocs/index.php/FLAIRS/FLAIRS18/paper/view/17687},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31)},
publisher = {AAAI},
address = {Melbourne, FL},
abstract = {We use reinforcement learning to learn dialogue policies in a collaborative furniture layout negotiation task. We employ a variety of methodologies (i.e., learning against a simulated user versus co-learning) and algorithms. Our policies achieve the best solution or a good solution to this problem for a variety of settings and initial conditions, including in the presence of noise (e.g., due to speech recognition or natural language understanding errors). Also, our policies perform well even in situations not observed during training. Policies trained against a simulated user perform well while interacting with policies trained through co-learning, and vice versa. Furthermore, policies trained in a two-party setting are successfully applied to a three-party setting, and vice versa.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Choi, Hyungtak; Boberg, Jill; Jeon, Heesik; Traum, David
Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS), IWSDS, Singapore, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgila_toward_2018,
title = {Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues},
author = {Kallirroi Georgila and Carla Gordon and Hyungtak Choi and Jill Boberg and Heesik Jeon and David Traum},
url = {http://www.colips.org/conferences/iwsds2018/wp/wp-content/uploads/2018/03/IWSDS-2018_paper_18.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS)},
publisher = {IWSDS},
address = {Singapore},
abstract = {We analyze a corpus of system-user dialogues in the Internet of Things domain. Our corpus is automatically, semi-automatically, and manually annotated with a variety of features both on the utterance level and the full dialogue level. The corpus also includes human ratings of dialogue quality collected via crowdsourcing. We calculate correlations between features and human ratings to identify which features are highly associated with human perceptions about dialogue quality in this domain. We also perform linear regression and derive a variety of dialogue quality evaluation functions. These evaluation functions are then applied to a heldout portion of our corpus, and are shown to be highly predictive of human ratings and outperform standard reward-based evaluation functions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Kim, Doo Soon; Artstein, Ron; Georgila, Kallirroi
Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing Proceedings Article
In: Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC), LREC, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{manuvinakurike_edit_2018,
title = {Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Doo Soon Kim and Ron Artstein and Kallirroi Georgila},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/481.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC)},
publisher = {LREC},
address = {Miyazaki, Japan},
abstract = {This paper introduces the task of interacting with an image editing program through natural language. We present a corpus of image edit requests which were elicited for real world images, and an annotation framework for understanding such natural language instructions and mapping them to actionable computer commands. Finally, we evaluate crowd-sourced annotation as a means of efficiently creating a sizable corpus at a reasonable cost.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Henry, Cassidy; Lukin, Stephanie; Artstein, Ron; Gervitz, Felix; Pollard, Kim; Bonial, Claire; Lei, Su; Voss, Clare R.; Marge, Matthew; Hayes, Cory J.; Hill, Susan G.
Dialogue Structure Annotation for Multi-Floor Interaction Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 104–111, ELRA, Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
Abstract | Links | BibTeX | Tags:
@inproceedings{traum_dialogue_2018,
title = {Dialogue Structure Annotation for Multi-Floor Interaction},
author = {David Traum and Cassidy Henry and Stephanie Lukin and Ron Artstein and Felix Gervitz and Kim Pollard and Claire Bonial and Su Lei and Clare R. Voss and Matthew Marge and Cory J. Hayes and Susan G. Hill},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/672.html},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {104–111},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {We present an annotation scheme for meso-level dialogue structure, specifically designed for multi-floor dialogue. The scheme includes a transaction unit that clusters utterances from multiple participants and floors into units according to realization of an initiator’s intent, and relations between individual utterances within the unit. We apply this scheme to annotate a corpus of multi-floor human-robot interaction dialogues. We examine the patterns of structure observed in these dialogues and present inter-annotator statistics and relative frequencies of types of relations and transaction units. Finally, some example applications of these annotations are introduced.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Pincus, Eli; Artstein, Ron
Chahta Anumpa: A Multimodal Corpus of the Choctaw Language Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 3371–3376, ELRA, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{brixey_chahta_2018,
title = {Chahta Anumpa: A Multimodal Corpus of the Choctaw Language},
author = {Jacqueline Brixey and Eli Pincus and Ron Artstein},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/822.html},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {3371–3376},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {This paper presents a general use corpus for the Native American indigenous language Choctaw. The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for the threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Lukin, Stephanie M.; Foots, Ashley; Henry, Cassidy; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human-Robot Dialogue and Collaboration in Search and Navigation Proceedings Article
In: Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions, AREA 2018, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags:
@inproceedings{bonial_human-robot_2018,
title = {Human-Robot Dialogue and Collaboration in Search and Navigation},
author = {Claire Bonial and Stephanie M. Lukin and Ashley Foots and Cassidy Henry and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {http://www.areaworkshop.org/wp-content/uploads/2018/05/4.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions},
publisher = {AREA 2018},
address = {Miyazaki, Japan},
abstract = {Collaboration with a remotely located robot in tasks such as disaster relief and search and rescue can be facilitated by grounding natural language task instructions into actions executable by the robot in its current physical context. The corpus we describe here provides insight into the translation and interpretation a natural language instruction undergoes starting from verbal human intent, to understanding and processing, and ultimately, to robot execution. We use a ‘Wizard-of-Oz’ methodology to elicit the corpus data in which a participant speaks freely to instruct a robot on what to do and where to move through a remote environment to accomplish collaborativesearchandnavigationtasks. Thisdataoffersthepotentialforexploringandevaluatingactionmodelsbyconnectingnatural language instructions to execution by a physical robot (controlled by a human ‘wizard’). In this paper, a description of the corpus (soon to be openly available) and examples of actions in the dialogue are provided.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scassellati, Brian; Shapiro, Ari; Traum, David; Petitto, Laura-Ann; Brawer, Jake; Tsui, Katherine; Gilani, Setareh Nasihati; Malzkuhn, Melissa; Manini, Barbara; Stone, Adam; Kartheiser, Geo; Merla, Arcangelo
Teaching Language to Deaf Infants with a Robot and a Virtual Human Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 1–13, ACM Press, Montreal, Canada, 2018, ISBN: 978-1-4503-5620-6.
Abstract | Links | BibTeX | Tags:
@inproceedings{scassellati_teaching_2018,
title = {Teaching Language to Deaf Infants with a Robot and a Virtual Human},
author = {Brian Scassellati and Ari Shapiro and David Traum and Laura-Ann Petitto and Jake Brawer and Katherine Tsui and Setareh Nasihati Gilani and Melissa Malzkuhn and Barbara Manini and Adam Stone and Geo Kartheiser and Arcangelo Merla},
url = {http://dl.acm.org/citation.cfm?doid=3173574.3174127},
doi = {10.1145/3173574.3174127},
isbn = {978-1-4503-5620-6},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {1–13},
publisher = {ACM Press},
address = {Montreal, Canada},
abstract = {Children with insufficient exposure to language during critical developmental periods in infancy are at risk for cognitive, language, and social deficits [55]. This is especially difficult for deaf infants, as more than 90% are born to hearing parents with little sign language experience [48]. We created an integrated multi-agent system involving a robot and virtual human designed to augment language exposure for 6-12 month old infants. Human-machine design for infants is challenging, as most screen-based media are unlikely to support learning in [33]. While presently, robots are incapable of the dexterity and expressiveness required for signing, even if it existed, developmental questions remain about the capacity for language from artificial agents to engage infants. Here we engineered the robot and avatar to provide visual language to effect socially contingent human conversational exchange. We demonstrate the successful engagement of our technology through case studies of deaf and hearing infants.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Lei, Su; Lucas, Gale; Johnson, Emmanuel; Tsang, Michael; Gratch, Jonathan; Traum, David
The Importance of Regulatory Fit & Early Success in a Human-Machine Game Proceedings Article
In: Proceedings of the first APA ACM Technology, Mind and Society Conference, pp. 1–6, ACM Press, Washington D.C., 2018, ISBN: 978-1-4503-5420-2.
Abstract | Links | BibTeX | Tags:
@inproceedings{pincus_importance_2018,
title = {The Importance of Regulatory Fit & Early Success in a Human-Machine Game},
author = {Eli Pincus and Su Lei and Gale Lucas and Emmanuel Johnson and Michael Tsang and Jonathan Gratch and David Traum},
url = {http://dl.acm.org/citation.cfm?doid=3183654.3183661},
doi = {10.1145/3183654.3183661},
isbn = {978-1-4503-5420-2},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the first APA ACM Technology, Mind and Society Conference},
pages = {1–6},
publisher = {ACM Press},
address = {Washington D.C.},
abstract = {In this paper, we explore the potential of regulatory focus theory as a framework for personalizing human-machine interactions. We manipulate framing (gain or loss) of a collaborative word-guessing game where a fully-automated virtual human gives clues. Consistent with previous work on regulatory focus, we find evidence of significantly higher perceived task-success when participants have regulatory fit. Inconsistent with previous work, however, fit did not increase task-enjoyment (nor performance). Participants with gain framing had marginally higher enjoyment, regardless of their regulatory focus. We operationalize motivation by number of optional rounds played but failed to find a "fit" effect. Instead, players who achieved early success (scoring more points in initial rounds) were more motivated. Early success was significantly correlated with number of optional rounds played. This finding calls to attention the need for the literature to more thoroughly investigate the relationship between success-timing and total player playtime in the game.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jonathan; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Getting to Know Each Other: The Role of Social Dialogue in Recovery from Errors in Social Robots Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, pp. 344–351, ACM Press, Chicago, IL, 2018, ISBN: 978-1-4503-4953-6.
Abstract | Links | BibTeX | Tags:
@inproceedings{lucas_getting_2018,
title = {Getting to Know Each Other: The Role of Social Dialogue in Recovery from Errors in Social Robots},
author = {Gale M. Lucas and Jill Boberg and David Traum and Ron Artstein and Jonathan Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=3171221.3171258},
doi = {10.1145/3171221.3171258},
isbn = {978-1-4503-4953-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
pages = {344–351},
publisher = {ACM Press},
address = {Chicago, IL},
abstract = {This work explores the extent to which social dialogue can mitigate (or exacerbate) the loss of trust caused when robots make conversational errors. Our study uses a NAO robot programmed to persuade users to agree with its rankings on two tasks. We perform two manipulations: (1) The timing of conversational errors - the robot exhibited errors either in the first task, the second task, or neither; (2) The presence of social dialogue - between the two tasks, users either engaged in a social dialogue with the robot or completed a control task. We found that the timing of the errors matters: replicating previous research, conversational errors reduce the robot's influence in the second task, but not on the first task. Social dialogue interacts with the timing of errors, acting as an intensifier: social dialogue helps the robot recover from prior errors, and actually boosts subsequent influence; but social dialogue backfires if it is followed by errors, because it extends the period of good performance, creating a stronger contrast effect with the subsequent errors. The design of social robots should therefore be more careful to avoid errors after periods of good performance than early on in a dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Bunt, Harry; Petukhova, Volha; Traum, David; Alexandersson, Jan
Dialogue Act Annotation with the ISO 24617-2 Standard Book Section
In: Multimodal Interaction with W3C Standards, pp. 109–135, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-42814-7 978-3-319-42816-1.
Abstract | Links | BibTeX | Tags:
@incollection{bunt_dialogue_2017,
title = {Dialogue Act Annotation with the ISO 24617-2 Standard},
author = {Harry Bunt and Volha Petukhova and David Traum and Jan Alexandersson},
url = {http://link.springer.com/10.1007/978-3-319-42816-1_6},
isbn = {978-3-319-42814-7 978-3-319-42816-1},
year = {2017},
date = {2017-11-01},
booktitle = {Multimodal Interaction with W3C Standards},
pages = {109–135},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {This chapter describes recent and ongoing annotation efforts using the ISO 24617-2 standard for dialogue act annotation. Experimental studies are reported on the annotation by human annotators and by annotation machines of some of the specific features of the ISO annotation scheme, such as its multidimensional annotation of communicative functions, the recognition of each of its nine dimensions, and the recognition of dialogue act qualifiers for certainty, conditionality, and sentiment. The construction of corpora of dialogues, annotated according to ISO 24617-2, is discussed, including the recent DBOX and DialogBank corpora.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Swanson, Reid William; Gordon, Andrew S.; Khooshabeh, Peter; Sagae, Kenji; Huskey, Richard; Mangus, Michael; Amir, Ori; Weber, Rene
An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures Journal Article
In: Dialogue & Discourse, vol. 8, no. 2, pp. 105–128, 2017.
Abstract | Links | BibTeX | Tags:
@article{swanson_empirical_2017,
title = {An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures},
author = {Reid William Swanson and Andrew S. Gordon and Peter Khooshabeh and Kenji Sagae and Richard Huskey and Michael Mangus and Ori Amir and Rene Weber},
url = {https://www.researchgate.net/publication/321170929_An_Empirical_Analysis_of_Subjectivity_and_Narrative_Levels_in_Personal_Weblog_Storytelling_Across_Cultures?_sg=Ck1pqxhW1uuTUe54DX5BLVYey6L6DkwTpjnes1ctAEuGQDHxoEOr887eKWjHIA0_-kk4ya9dXwEZ4OM},
doi = {10.5087/dad.2017.205},
year = {2017},
date = {2017-11-01},
journal = {Dialogue & Discourse},
volume = {8},
number = {2},
pages = {105–128},
abstract = {Storytelling is a universal activity, but the way in which discourse structure is used to persuasively convey ideas and emotions may depend on cultural factors. Because first-person accounts of life experiences can have a powerful impact in how a person is perceived, the storyteller may instinctively employ specific strategies to shape the audience’s perception. Hypothesizing that some of the differences in storytelling can be captured by the use of narrative levels and subjectivity, we analyzed over one thousand narratives taken from personal weblogs. First, we compared stories from three different cultures written in their native languages: English, Chinese and Farsi. Second, we examined the impact of these two discourse properties on a reader’s attitude and behavior toward the narrator. We found surprising similarities and differences in how stories are structured along these two dimensions across cultures. These discourse properties have a small but significant impact on a reader’s behavioral response toward the narrator.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jon; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
The Role of Social Dialogue and Errors in Robots Proceedings Article
In: Proceedings of the 5th International Conference on Human Agent Interaction, pp. 431–433, ACM Press, Bielefeld, Germany, 2017, ISBN: 978-1-4503-5113-3.
Abstract | Links | BibTeX | Tags:
@inproceedings{lucas_role_2017,
title = {The Role of Social Dialogue and Errors in Robots},
author = {Gale M. Lucas and Jill Boberg and David Traum and Ron Artstein and Jon Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=3125739.3132617},
doi = {10.1145/3125739.3132617},
isbn = {978-1-4503-5113-3},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 5th International Conference on Human Agent Interaction},
pages = {431–433},
publisher = {ACM Press},
address = {Bielefeld, Germany},
abstract = {Social robots establish rapport with human users. This work explores the extent to which rapport-building can benefit (or harm) conversations with robots, and under what circumstances this occurs. For example, previous work has shown that agents that make conversational errors are less capable of influencing people than agents that do not make errors [1]. Some work has shown this effect with robots, but prior research has not considered additional factors such as the level of rapport between the person and the robot. We predicted that building rapport through a social dialogue (such as an ice-breaker) could mitigate the detrimental effect of a robot's errors on influence. Our study used a Nao robot programmed to persuade users to agree with its rankings on two "survival tasks" (e.g., lunar survival task). We manipulated both errors and social dialogue:the robot either exhibited errors in the second survival task or not, and users either engaged in an ice-breaker with the robot between the two survival tasks or completed a control task. Replicating previous research, errors tended to reduce the robot's influence in the second survival task. Contrary to our prediction, results revealed that the ice-breaker did not mitigate the effect of errors, and if anything, errors were more harmful after the ice-breaker (intended to build rapport) than in the control condition. This backfiring of attempted rapport-building may be due to a contrast effect, suggesting that the design of social robots should avoid introducing dialogues of incongruent quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Foots, Ashley; Hayes, Cory; Henry, Cassidy; Pollard, Kimberly; Artstein, Ron; Voss, Clare; Traum, David
Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task Proceedings Article
In: Proceedings of the First Workshop on Language Grounding for Robotics, pp. 58–66, Association for Computational Linguistics, Vancouver, Canada, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{marge_exploring_2017,
title = {Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task},
author = {Matthew Marge and Claire Bonial and Ashley Foots and Cory Hayes and Cassidy Henry and Kimberly Pollard and Ron Artstein and Clare Voss and David Traum},
url = {http://www.aclweb.org/anthology/W17-2808},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the First Workshop on Language Grounding for Robotics},
pages = {58–66},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Robot-directed communication is variable, and may change based on human perception of robot capabilities. To collect training data for a dialogue system and to investigate possible communication changes over time, we developed a Wizard-of-Oz study that (a) simulates a robot’s limited understanding, and (b) collects dialogues where human participants build a progressively better mental model of the robot’s understanding. With ten participants, we collected ten hours of human-robot dialogue. We analyzed the structure of instructions that participants gave to a remote robot before it responded. Our findings show a general initial preference for including metric information (e.g., move forward 3 feet) over landmarks (e.g., move to the desk) in motion commands, but this decreased over time, suggesting changes in perception.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Hoegen, Rens; Lan, Wei; Rusow, Joshua; Singla, Karan; Yin, Xusen; Artstein, Ron; Leuski, Anton
SHIHbot: A Facebook chatbot for Sexual Health Information on HIV/AIDS Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 370–373, Association for Computational Linguistics, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{brixey_shihbot_2017,
title = {SHIHbot: A Facebook chatbot for Sexual Health Information on HIV/AIDS},
author = {Jacqueline Brixey and Rens Hoegen and Wei Lan and Joshua Rusow and Karan Singla and Xusen Yin and Ron Artstein and Anton Leuski},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {370–373},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {We present the implementation of an autonomous chatbot, SHIHbot, deployed on Facebook, which answers a wide variety of sexual health questions on HIV/AIDS. The chatbot's response database is compiled from professional medical and public health resources in order to provide reliable information to users. The system's backend is NPCEditor, a response selection platform trained on linked questions and answers; to our knowledge this is the first retrieval-based chatbot deployed on a large public social network.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Artstein, Ron
Lessons in Dialogue System Deployment Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 352–355, Association for Computational Linguistics, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{leuski_lessons_2017,
title = {Lessons in Dialogue System Deployment},
author = {Anton Leuski and Ron Artstein},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {352–355},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {We analyze deployment of an interactive dialogue system in an environment where deep technical expertise might not be readily available. The initial version was created using a collection of research tools. We summarize a number of challenges with its deployment at two museums and describe a new system that simplifies the installation and user interface; reduces reliance on 3rd-party software; and provides a robust data collection mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Du, Yulun; Cai, Edward; Lu, Allen; Pincus, Eli; Traum, David; Ultes, Stefan; Rojas-Barahona, Lina M.; Gasic, Milica; Young, Steve; Eskenazi, Maxine
DialPort, Gone Live: An Update After A Year of Development Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference, Association for Computational Linguistics, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{lee_dialport_2017,
title = {DialPort, Gone Live: An Update After A Year of Development},
author = {Kyusong Lee and Tiancheng Zhao and Yulun Du and Edward Cai and Allen Lu and Eli Pincus and David Traum and Stefan Ultes and Lina M. Rojas-Barahona and Milica Gasic and Steve Young and Maxine Eskenazi},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {DialPort collects user data for connected spoken dialog systems. At present six systems are linked to a central portal that directs the user to the applicable system and suggests systems that the user may be interested in. User data has started to flow into the system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Traum, David
An Incremental Response Policy in an Automatic Word-Game Proceedings Article
In: Proceedings of IVA 2017 Workshop on Conversational Interruptions in Human-Agent Interactions, Stockholm, Sweden, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{pincus_incremental_2017,
title = {An Incremental Response Policy in an Automatic Word-Game},
author = {Eli Pincus and David Traum},
url = {http://people.ict.usc.edu/ traum/Papers/pincus_traum-cihai2017.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of IVA 2017 Workshop on Conversational Interruptions in Human-Agent Interactions},
address = {Stockholm, Sweden},
abstract = {Turn-taking is an important aspect of human-human and human-computer interaction. Rapid turn-taking is a feature of human-human interaction that is difficult for today’s dialogue systems to emulate. For example, typical humanhuman interactions can involve an original sending interlocutor changing or stopping their speech mid-utterance as a result of overlapping speech from the other interlocutor. The overlapping utterances from the other interlocutor are typically called barge-in utterances. An example of this phenomena is seen in the two turns of dialogue in the top half of Figure 1. In this dialogue segment Student A first reveals his test score in the original utterance. Student A then begins to tell student B that he had heard Student B got a perfect score. Student B interrupts Student A with a barge-in utterance that contains new information (that actually he had not performed well on the test) causing Student A to halt his speech and not finish his original utterance. We call the unspoken part of student A’s original utterance Student A’s originally intended utterance. Student A then makes a decision based on the new information to not say his originally intended utterance. This is likely due to the originally intended utterance no longer being appropriate considering the new information made available to Student A. Student A then makes an intelligent next choice of what to say which can be seen in Student A’s updated utterance which takes into account the new information contained in Student B’s barge-in utterance. In this work we refer to Student A’s dialogue move as an intelligent update.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; DeVault, David; Georgila, Kallirroi
Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game Proceedings Article
In: Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, SIGDIAL, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{manuvinakurike_using_2017,
title = {Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game},
author = {Ramesh Manuvinakurike and David DeVault and Kallirroi Georgila},
url = {http://www.manuvinakurike.com/papers/eve-2017.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue},
publisher = {SIGDIAL},
address = {Saarbruecken Germany},
abstract = {We apply Reinforcement Learning (RL) to the problem of incremental dialogue policy learning in the context of a fast-paced dialogue game. We compare the policy learned by RL with a high performance baseline policy which has been shown to perform very efficiently (nearly as well as humans) in this dialogue game. The RL policy outperforms the baseline policy in offline simulations (based on real user data). We provide a detailed comparison of the RL policy and the baseline policy, including information about how much effort and time it took to develop each one of them. We also highlight the cases where the RL policy performs better, and show that understanding the RL policy can provide valuable insights which can inform the creation of an even better rule-based policy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cassidy, Henry; Moolchandani, Pooja; Pollard, Kimberly A.; Bonial, Claire; Foots, Ashley; Artstein, Ron; Hayes, Cory; Voss, Claire R.; Traum, David; Marge, Matthew
Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld Proceedings Article
In: Proceedings of the WiNLP workshop, Vancouver, Canada, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{cassidy_towards_2017,
title = {Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld},
author = {Henry Cassidy and Pooja Moolchandani and Kimberly A. Pollard and Claire Bonial and Ashley Foots and Ron Artstein and Cory Hayes and Claire R. Voss and David Traum and Matthew Marge},
url = {http://www.winlp.org/wp-content/uploads/2017/final_papers_2017/52_Paper.pdf},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the WiNLP workshop},
address = {Vancouver, Canada},
abstract = {Our research aims to develop a natural dialogue interface between robots and humans. We describe two focused efforts to increase data collection efficiency towards this end: creation of an annotated corpus of interaction data, and a robot simulation, allowing greater flexibility in when and where we can run experiments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Ultes, Stefan; Rojas-Barahona, Lina; Pincus, Eli; Traum, David; Eskenazi, Maxine
An Assessment Framework for DialPort Proceedings Article
In: Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS), IWSDS, Farmington, PA, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{lee_assessment_2017,
title = {An Assessment Framework for DialPort},
author = {Kyusong Lee and Tiancheng Zhao and Stefan Ultes and Lina Rojas-Barahona and Eli Pincus and David Traum and Maxine Eskenazi},
url = {https://www.uni-ulm.de/fileadmin/website_uni_ulm/iui.iwsds2017/papers/IWSDS2017_paper_1.pdf},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS)},
publisher = {IWSDS},
address = {Farmington, PA},
abstract = {Collecting a large amount of real human-computer interaction data in various domains is a cornerstone in the development of better data-driven spoken dialog systems. The DialPort project is creating a portal to collect a constant stream of real user conversational data on a variety of topics. In order to keep real users attracted to DialPort, it is crucial to develop a robust evaluation framework to monitor and maintain high performance. Different from earlier spoken dialog systems, Dial-Port has a heterogeneous set of spoken dialog systems gathered under one outwardlooking agent. In order to access this new structure, we have identified some unique challenges that DialPort will encounter so that it can appeal to real users and have created a novel evaluation scheme that quantitatively assesses their performance in these situations. We look at assessment from the point of view of the system developer as well as that of the end user.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS), IWSDS, Farmington, PA, 2017.
Abstract | Links | BibTeX | Tags:
@inproceedings{lycan_direct_2017,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {http://www.uni-ulm.de/fileadmin/website_uni_ulm/iui.iwsds2017/papers/IWSDS2017_paper_13.pdf},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS)},
publisher = {IWSDS},
address = {Farmington, PA},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin; Karumbaiah, Shamya; Tokel, S. Tugba; Core, Mark G.; Stratou, Giota; Auerbach, Daniel; Georgila, Kallirroi
Analyzing Learner Affect in a Scenario-Based Intelligent Tutoring System Proceedings Article
In: Proceedings of the International Conference on Artificial Intelligence in Education, pp. 544–547, Springer, Wuhan, China, 2017, ISBN: 978-3-319-61425-0.
Abstract | Links | BibTeX | Tags:
@inproceedings{nye_analyzing_2017,
title = {Analyzing Learner Affect in a Scenario-Based Intelligent Tutoring System},
author = {Benjamin Nye and Shamya Karumbaiah and S. Tugba Tokel and Mark G. Core and Giota Stratou and Daniel Auerbach and Kallirroi Georgila},
url = {https://link.springer.com/chapter/10.1007/978-3-319-61425-0_60},
doi = {https://doi.org/10.1007/978-3-319-61425-0_60},
isbn = {978-3-319-61425-0},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Conference on Artificial Intelligence in Education},
pages = {544–547},
publisher = {Springer},
address = {Wuhan, China},
abstract = {Scenario-based tutoring systems influence affective states due to two distinct mechanisms during learning: 1) reactions to performance feedback and 2) responses to the scenario context or events. To explore the role of affect and engagement, a scenario-based ITS was instrumented to support unobtrusive facial affect detection. Results from a sample of university students showed relatively few traditional academic affective states such as confusion or frustration, even at decision points and after poor performance (e.g., incorrect responses). This may show evidence of "over-flow," with a high level of engagement and interest but insufficient confusion/disequilibrium for optimal learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron
Inter-annotator Agreement Book Section
In: Handbook of Linguistic Annotation, pp. 297–313, Springer Netherlands, Dordrecht, Netherlands, 2017, ISBN: 978-94-024-0879-9 978-94-024-0881-2.
Abstract | Links | BibTeX | Tags:
@incollection{artstein_inter-annotator_2017,
title = {Inter-annotator Agreement},
author = {Ron Artstein},
url = {http://link.springer.com/10.1007/978-94-024-0881-2_11},
doi = {10.1007/978-94-024-0881-2_11},
isbn = {978-94-024-0879-9 978-94-024-0881-2},
year = {2017},
date = {2017-06-01},
booktitle = {Handbook of Linguistic Annotation},
pages = {297–313},
publisher = {Springer Netherlands},
address = {Dordrecht, Netherlands},
abstract = {This chapter touches upon several issues in the calculation and assessment of interannotator agreement. It gives an introduction to the theory behind agreement coefficients and examples of their application to linguistic annotation tasks. Specific examples explore variation in annotator performance due to heterogeneous data, complex labels, item difficulty, and annotator differences, showing how global agreement coefficients may mask these sources of variation, and how detailed agreement studies can give insight into both the annotation process and the nature of the underlying data. The chapter also reviews recent work on using machine learning to exploit the variation among annotators and learn detailed models from which accurate labels can be inferred. I therefore advocate an approach where agreement studies are not used merely as a means to accept or reject a particular annotation scheme, but as a tool for exploring patterns in the data that are being annotated.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}