Publications
Search
Manuvinakurike, Ramesh; Paetzel, Maike; DeVault, David
Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection Proceedings Article
In: Proceedings of SEMDIAL 2015 goDIAL, pp. 113 – 121, Gothenburg, Sweden, 2015.
@inproceedings{manuvinakurike_reducing_2015,
title = {Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection},
author = {Ramesh Manuvinakurike and Maike Paetzel and David DeVault},
url = {http://ict.usc.edu/pubs/Reducing%20the%20Cost%20of%20Dialogue%20System%20Training%20and%20Evaluation%20with%20Online,%20Crowd-Sourced%20Dialogue%20Data%20Collection.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of SEMDIAL 2015 goDIAL},
pages = {113 – 121},
address = {Gothenburg, Sweden},
abstract = {This paper presents and analyzes an approach to crowd-sourced spoken dialogue data collection. Our approach enables low cost collection of browser-based spoken dialogue interactions between two remote human participants (human-human condition) as well as one remote human participant and an automated dialogue system (human-agent condition). We present a case study in which 200 remote participants were recruited to participate in a fast-paced image matching game, and which included both human-human and human-agent conditions. We discuss several technical challenges encountered in achieving this crowd-sourced data collection, and analyze the costs in time and money of carrying out the study. Our results suggest the potential of crowdsourced spoken dialogue data to lower costs and facilitate a range of research in dialogue modeling, dialogue system design, and system evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Susan; Morency, Louis-Philippe; Pynadath, David; Traum, David
Exploring the Implications of Virtual Human Research for Human-Robot Teams Proceedings Article
In: Virtual, Augmented and Mixed Reality, pp. 186–196, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
@inproceedings{gratch_exploring_2015,
title = {Exploring the Implications of Virtual Human Research for Human-Robot Teams},
author = {Jonathan Gratch and Susan Hill and Louis-Philippe Morency and David Pynadath and David Traum},
url = {http://ict.usc.edu/pubs/Exploring%20the%20Implications%20of%20Virtual%20Human%20Research%20for%20Human-Robot%20Teams.pdf},
doi = {10.1007/978-3-319-21067-4_20},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {186–196},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
abstract = {This article briefly explores potential synergies between the fields of virtual human and human-robot interaction research. We consider challenges in advancing the effectiveness of human-robot teams makes recommendations for enhancing this by facilitating synergies between robotics and virtual human research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale M.; Marsella, Stacy
Negotiation as a Challenge Problem for Virtual Humans Proceedings Article
In: Brinkman, Willem-Paul; Broekens, Joost; Heylen, Dirk (Ed.): Intelligent Virtual Agents, pp. 201–215, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{gratch_negotiation_2015,
title = {Negotiation as a Challenge Problem for Virtual Humans},
author = {Jonathan Gratch and David DeVault and Gale M. Lucas and Stacy Marsella},
editor = {Willem-Paul Brinkman and Joost Broekens and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Negotiation%20as%20a%20Challenge%20Problem%20for%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7_21},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {201–215},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {We argue for the importance of negotiation as a challenge problem for virtual human research, and introduce a virtual conversational agent that allows people to practice a wide range of negotiation skills. We describe the multi-issue bargaining task, which has become a de facto standard for teaching and research on negotiation in both the social and computer sciences. This task is popular as it allows scientists or instructors to create a variety of distinct situations that arise in real-life negotiations, simply by manipulating a small number of mathematical parameters. We describe the development of a virtual human that will allow students to practice the interpersonal skills they need to recognize and navigate these situations. An evaluation of an early wizard-controlled version of the system demonstrates the promise of this technology for teaching negotiation and supporting scientific research on social intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game Proceedings Article
In: Proceeding of AHFE 2015, Las Vegas, NV, 2015.
@inproceedings{nouri_cross_2015,
title = {Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cross%20cultural%20report%20of%20values%20and%20decisions%20in%20the%20multi%20round%20ultimatum%20game%20and%20the%20centipede%20game.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceeding of AHFE 2015},
address = {Las Vegas, NV},
abstract = {This paper investigates the cultural differences in decision making behavior of people from the US and India. We study players from these cultures playing the Multi Round Ultimatum Game and the Centipede Game online. In order to study how people from different cultures evaluate decisions we use criteria from the Multi Attribute Relational Values (MARV) survey. Our results confirm the existence of cultural differences in how people from US and India make decisions in the Ultimatum and Centipede games. We also observe differences in responses to survey questions implying differences in the amount of importance that the two cultures assign to the MARV decision making criteria.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Leuski, Anton
CRMActive: An Active Learning Based Approach for Effective Video Annotation and Retrieval Proceedings Article
In: Proceedings of ACM International Conference on Multimedia Retrieval (ICMR), pp. 535–538, ACM, Shanghai, China, 2015.
@inproceedings{chatterjee_crmactive_2015,
title = {CRMActive: An Active Learning Based Approach for Effective Video Annotation and Retrieval},
author = {Moitreya Chatterjee and Anton Leuski},
url = {http://ict.usc.edu/pubs/CRMActive%20-%20An%20Active%20Learning%20Based%20Approach%20for%20Effective%20Video%20Annotation%20and%20Retrieval.pdf},
doi = {10.1145/2671188.2749342},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of ACM International Conference on Multimedia Retrieval (ICMR)},
pages = {535–538},
publisher = {ACM},
address = {Shanghai, China},
abstract = {Conventional multimedia annotation/retrieval systems such as Normalized Continuous Relevance Model (NormCRM) [7]require a fully labeled training data for a good performance. Active Learning, by determining an order for labeling the training data, allows for a good performance even before the training data is fully annotated. In this work we propose an active learning algorithm, which combines a novel measure of sample uncertainty with a novel clustering-based approach for determining sample density and diversity and integrate it with NormCRM. The clusters are also iteratively re⬚ned to ensure both feature and label-level agreement among samples. We show that our approach outperforms multiple baselines both on a new, open dataset and on the popular TRECVID corpus at both the tasks of annotation and text-based retrieval of videos.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Garten, Justin; Sagae, Kenji; Ustun, Volkan; Dehghani, Morteza
Combining Distributed Vector Representations for Words Proceedings Article
In: Proceedings of NAACL-HLT 2015, pp. 95–101, Association for Computational Linguistics, Denver, Colorado, 2015.
@inproceedings{garten_combining_2015,
title = {Combining Distributed Vector Representations for Words},
author = {Justin Garten and Kenji Sagae and Volkan Ustun and Morteza Dehghani},
url = {http://ict.usc.edu/pubs/Combining%20Distributed%20Vector%20Representations%20for%20Words.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of NAACL-HLT 2015},
pages = {95–101},
publisher = {Association for Computational Linguistics},
address = {Denver, Colorado},
abstract = {Recent interest in distributed vector representations for words has resulted in an increased diversity of approaches, each with strengths and weaknesses. We demonstrate how diverse vector representations may be inexpensively composed into hybrid representations, effectively leveraging strengths of individual components, as evidenced by substantial improvements on a standard word analogy task. We further compare these results over different sizes of training sets and find these advantages are more pronounced when training data is limited. Finally, we explore the relative impacts of the differences in the learning methods themselves and the size of the contexts they access.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Leuski, Anton; Maio, Heather; Mor-Barak, Tomer; Gordon, Carla; Traum, David
How Many Utterances Are Needed to Support Time-Offset Interaction? Proceedings Article
In: Proceedings of FLAIRS 28, pp. 144–149, AAAI Press, Hollywood, FL, 2015, ISBN: 978-1-57735-730-8.
@inproceedings{artstein_how_2015,
title = {How Many Utterances Are Needed to Support Time-Offset Interaction?},
author = {Ron Artstein and Anton Leuski and Heather Maio and Tomer Mor-Barak and Carla Gordon and David Traum},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS15/paper/view/10442},
isbn = {978-1-57735-730-8},
year = {2015},
date = {2015-05-01},
booktitle = {Proceedings of FLAIRS 28},
pages = {144–149},
publisher = {AAAI Press},
address = {Hollywood, FL},
abstract = {A set of several hundred recorded statements by a single speaker is sufficient to address unrestricted questions and sustain short conversations on a circumscribed topic. Statements were recorded by Pinchas Gutter, a Holocaust survivor, talking about his personal experiences before, during and after the Holocaust. These statements were delivered to participants in conversation, using a “Wizard of Oz” system, where live operators select an appropriate reaction to each user utterance in real time. Even though participants were completely unconstrained in the questions they could ask, the recorded statements were able to directly address at least 58% of user questions. The unanswered questions were then analyzed to identify gaps, and additional statements were recorded to fill the gaps. The statements will be put in an automated system using existing language understanding technology, to create the first full working system of time-offset interaction, allowing a live conversation with a real human who is not present for the conversation in real time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shim, Han Suk; Park, Sunghyun; Chatterjee, Moitreya; Scherer, Stefan; Sagae, Kenji; Morency, Louis-Philippe
ACOUSTIC AND PARA-VERBAL INDICATORS OF PERSUASIVENESS IN SOCIAL MULTIMEDIA Proceedings Article
In: Proceeding of ICASSP 2015, pp. 2239 – 2243, IEEE, Brisbane, Australia, 2015.
@inproceedings{shim_acoustic_2015,
title = {ACOUSTIC AND PARA-VERBAL INDICATORS OF PERSUASIVENESS IN SOCIAL MULTIMEDIA},
author = {Han Suk Shim and Sunghyun Park and Moitreya Chatterjee and Stefan Scherer and Kenji Sagae and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/ACOUSTIC%20AND%20PARA-VERBAL%20INDICATORS%20OF%20PERSUASIVENESS%20IN%20SOCIAL%20MULTIMEDIA.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceeding of ICASSP 2015},
pages = {2239 – 2243},
publisher = {IEEE},
address = {Brisbane, Australia},
abstract = {Persuasive communication and interaction play an important and pervasive role in many aspects of our lives. With the rapid growth of social multimedia websites such as YouTube, it has become more important and useful to understand persuasiveness in the context of online social multimedia content. In this paper, we present our resultsof conducting various analyses of persuasiveness in speech with our multimedia corpus of 1,000 movie review videos obtained from ExpoTV.com, a popular social multimedia website. Our experiments firstly show that a speaker’s level of persuasiveness can be predicted from acoustic characteristics and para-verbal cues related to speech fluency. Secondly, we show that taking acoustic cues in different time periods of a movie review can improve the performance of predicting a speaker’s level of persuasiveness. Lastly, we show that a speaker’s positive or negative attitude toward a topic influences the prediction performance as well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Mell, Jonathan; Gratch, Jonathan
Toward Natural Turn-Taking in a Virtual Human Negotiation Agent Proceedings Article
In: AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction, pp. 2–9, AAAI Press, Palo Alto, California, 2015.
@inproceedings{devault_toward_2015,
title = {Toward Natural Turn-Taking in a Virtual Human Negotiation Agent},
author = {David DeVault and Jonathan Mell and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Toward%20Natural%20Turn-Taking%20in%20a%20Virtual%20Human%20Negotiation%20Agent.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction},
pages = {2–9},
publisher = {AAAI Press},
address = {Palo Alto, California},
abstract = {In this paper we assess our progress toward creating a virtual human negotiation agent with fluid turn-taking skills. To facilitate the design of this agent, we have collected a corpus of human-human negotiation roleplays as well as a corpus of Wizard-controlled human-agent negotiations in the same roleplay scenario.We compare the natural turn-taking behavior in our human-human corpus with that achieved in our Wizard-of-Oz corpus, and quantify our virtual human’s turn-taking skills using a combination of subjective and objective metrics. We also discuss our design for a Wizard user interface to support real-time control of the virtual human’s turntaking and dialogue behavior, and analyze our wizard’s usage of this interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ward, Nigel G.; DeVault, David
Ten Challenges in Highly-Interactive Dialog Systems Proceedings Article
In: Proceedings of AAAI 2015 Spring Symposium, Palo Alto, CA, 2015.
@inproceedings{ward_ten_2015,
title = {Ten Challenges in Highly-Interactive Dialog Systems},
author = {Nigel G. Ward and David DeVault},
url = {http://ict.usc.edu/pubs/Ten%20Challenges%20in%20Highly-Interactive%20Dialog%20Systems.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of AAAI 2015 Spring Symposium},
address = {Palo Alto, CA},
abstract = {Systems capable of highly-interactive dialog have recently been developed in several domains. This paper considers how to build on these successes to make systems more robust, easier to develop, more adaptable, and more scientifically significant.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Stratou, Giota; DeVault, David; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Marsella, Stacy; Traum, David; Rizzo, Albert "Skip"
SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications Proceedings Article
In: Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI), Austin, Texas, 2015.
@inproceedings{morency_simsensei_2015,
title = {SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications},
author = {Louis-Philippe Morency and Giota Stratou and David DeVault and Arno Hartholt and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Stacy Marsella and David Traum and Albert "Skip" Rizzo},
url = {http://ict.usc.edu/pubs/SimSensei%20Demonstration%20A%20Perceptive%20Virtual%20Human%20Interviewer%20for%20Healthcare%20Applications.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI)},
address = {Austin, Texas},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. We emphasize on the perception part of the system, a multimodal framework which captures and analyzes user state for both behavioral understanding and interactional purposes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Corbin, Carina; Morbini, Fabrizio; Traum, David
Creating a Virtual Neighbor Proceedings Article
In: Proceedings of International Workshop on Spoken Dialogue Systems, Busan, South Korea, 2015.
@inproceedings{corbin_creating_2015,
title = {Creating a Virtual Neighbor},
author = {Carina Corbin and Fabrizio Morbini and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20a%20Virtual%20Neighbor.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of International Workshop on Spoken Dialogue Systems},
address = {Busan, South Korea},
abstract = {We present the first version of our Virtual Neighbor, who can talk with users about people employed in the same institution. The Virtual Neighbor can discuss information about employees in a medium sized company or institute with users. The system acquires information from three sources: a personnel directory database, public web pages, and through dialogue interaction. Users can interact through face to face spoken dialogue, using components from the ICT Virtual human toolkit, or via a chat interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; DeVault, David
Pair Me Up: A Web Framework for Crowd-Sourced Spoken Dialogue Collection Proceedings Article
In: Proceedings of IWSDS 2015, pp. 1 –12, Busan, South Korea, 2015.
@inproceedings{manuvinakurike_pair_2015,
title = {Pair Me Up: A Web Framework for Crowd-Sourced Spoken Dialogue Collection},
author = {Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/Pair%20Me%20Up-%20A%20Web%20Framework%20for%20Crowd-Sourced%20Spoken%20Dialogue%20Collection.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of IWSDS 2015},
pages = {1 –12},
address = {Busan, South Korea},
abstract = {We describe and analyze a new web-based spoken dialogue data collection framework. The framework enables the capture of conversational speech from two remote users who converse with each other and play a dialogue game entirely through their web browsers.We report on the substantial improvements in the speed and cost of data capture we have observed with this crowd-sourced paradigm. We also analyze a range of data quality factors by comparing a crowd-sourced data set involving 196 remote users to a smaller but more quality controlled lab-based data set. We focus our comparison on aspects that are especially important in our spoken dialogue research, including audio quality, the effect of communication latency on the interaction, our ability to synchronize the collected data, our ability to collect examples of excellent game play, and the naturalness of the resulting interactions. This analysis illustrates some of the current trade-offs between lab-based and crowd-sourced spoken dialogue data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Scherer, Stefan; DeVault, David; Gratch, Jonathan; Artstein, Ron; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis-Philippe
Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent Proceedings Article
In: Proceedings of ICDVRAT 2014, International Journal of Disability and Human Development, Gothenburg, Sweden, 2014.
@inproceedings{rizzo_detection_2014,
title = {Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent},
author = {Albert Rizzo and Stefan Scherer and David DeVault and Jonathan Gratch and Ron Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Detection%20and%20Computational%20Analysis%20of%20Psychological%20Signals%20Using%20a%20Virtual%20Human%20Interviewing%20Agent.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of ICDVRAT 2014},
publisher = {International Journal of Disability and Human Development},
address = {Gothenburg, Sweden},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded “Detection and Computational Analysis of Psychological Signals” project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Shim, Han Suk; Chatterjee, Moitreya; Sagae, Kenji; Morency, Louis-Philippe
Computational Analysis of Persuasiveness in Social Multimedia: A Novel Dataset and Multimodal Prediction Approach Proceedings Article
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 50–57, ACM Press, 2014, ISBN: 978-1-4503-2885-2.
@inproceedings{park_computational_2014,
title = {Computational Analysis of Persuasiveness in Social Multimedia: A Novel Dataset and Multimodal Prediction Approach},
author = {Sunghyun Park and Han Suk Shim and Moitreya Chatterjee and Kenji Sagae and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663260},
doi = {10.1145/2663204.2663260},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {50–57},
publisher = {ACM Press},
abstract = {Our lives are heavily influenced by persuasive communication, and it is essential in almost any types of social interactions from business negotiation to conversation with our friends and family. With the rapid growth of social multimedia websites, it is becoming ever more important and useful to understand persuasiveness in the context of social multimedia content online. In this paper, we introduce our newly created multimedia corpus of 1,000 movie review videos obtained from a social multimedia website called ExpoTV.com, which will be made freely available to the research community. Our research results presented here revolve around the following 3 main research hypotheses. Firstly, we show that computational descriptors derived from verbal and nonverbal behavior can be predictive of persuasiveness. We further show that combining descriptors from multiple communication modalities (audio, text and visual) improve the prediction performance compared to using those from single modality alone. Secondly, we investigate if having prior knowledge of a speaker expressing a positive or negative opinion helps better predict the speaker's persuasiveness. Lastly, we show that it is possible to make comparable prediction of persuasiveness by only looking at thin slices (shorter time windows) of a speaker's behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; DeVault, David; Traum, David
Mr. Clue - A Virtual Agent that can Play Word-Guessing Games Proceedings Article
In: Proceedings of the 3rd Workshop on Games and NLP (GAMNLP-14), Raleigh, NC, 2014.
@inproceedings{pincus_mr_2014,
title = {Mr. Clue - A Virtual Agent that can Play Word-Guessing Games},
author = {Eli Pincus and David DeVault and David Traum},
url = {http://ict.usc.edu/pubs/Mr.%20Clue%20-%20A%20Virtual%20Agent%20that%20can%20Play%20Word-Guessing%20Games.pdf},
year = {2014},
date = {2014-10-01},
booktitle = {Proceedings of the 3rd Workshop on Games and NLP (GAMNLP-14)},
address = {Raleigh, NC},
abstract = {This demonstration showcases a virtual agent, Mr. Clue, capable of acting in the role of clue-giver in a wordguessing game. The agent has the ability to automatically generate clues and update its dialogue policy dynamically based on user input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazarian, Angela; Nouri, Elnaz; Traum, David
Initiative Patterns in Dialogue Genres Proceedings Article
In: Proceedings of Semdial 2014, Edinburgh, UK, 2014.
@inproceedings{nazarian_initiative_2014,
title = {Initiative Patterns in Dialogue Genres},
author = {Angela Nazarian and Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Patterns%20in%20Dialogue%20Genres.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of Semdial 2014},
address = {Edinburgh, UK},
abstract = {One of the ways of distinguishing different dialogue genres is the differences in patterns of interactions between the participants. Morbini et al (2013) informally define dialogue genres on the basis of features like user vs system initiative, amongst other criteria. In this paper, we apply the multi-label initiative annotation scheme and related features from (Nouri and Traum, 2014) to a set of dialogue corpora from different domains. In our initial study, we examine two questionanswering domains, a “slot-filling” service application domain, and several human-human negotiation domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S.; Sagae, Kenji; Demski, Abram
Distributed Vector Representations of Words in the Sigma Cognitive Architecture Proceedings Article
In: Proceedings of the 7th Conference on Artificial General Intelligence 2014, Québec City, Canada, 2014.
@inproceedings{ustun_distributed_2014,
title = {Distributed Vector Representations of Words in the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul S. Rosenbloom and Kenji Sagae and Abram Demski},
url = {http://ict.usc.edu/pubs/Distributed%20Vector%20Representations%20of%20Words%20in%20the%20Sigma%20Cognitive%20Architecture.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the 7th Conference on Artificial General Intelligence 2014},
address = {Québec City, Canada},
abstract = {Recently reported results with distributed-vector word representations in natural language processing make them appealing for incorporation into a general cognitive architecture like Sigma. This paper describes a new algorithm for learning such word representations from large, shallow information resources, and how this algorithm can be implemented via small modifications to Sigma. The effectiveness and speed of the algorithm are evaluated via a comparison of an external simulation of it with state-of-the-art algorithms. The results from more limited experiments with Sigma are also promising, but more work is required for it to reach the effectiveness and speed of the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lubetich, Shannon; Sagae, Kenji
Data-driven Measurement of Child Language Development with Simple Syntactic Templates Proceedings Article
In: Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pp. 2151 – 2160, Dublin, Ireland, 2014.
@inproceedings{lubetich_data-driven_2014,
title = {Data-driven Measurement of Child Language Development with Simple Syntactic Templates},
author = {Shannon Lubetich and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Data-driven%20Measurement%20of%20Child%20Language%20Development%20with%20Simple%20Syntactic%20Templates.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers},
pages = {2151 – 2160},
address = {Dublin, Ireland},
abstract = {When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zadeh, AmirAli B.; Sagae, Kenji; Morency, Louis Philippe
Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words Proceedings Article
In: Intelligent Virtual Agents, pp. 496–503, Springer, Boston, MA, 2014.
@inproceedings{zadeh_towards_2014,
title = {Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words},
author = {AmirAli B. Zadeh and Kenji Sagae and Louis Philippe Morency},
url = {http://ict.usc.edu/pubs/Towards%20Learning%20Nonverbal%20Identities%20from%20the%20Web%20-%20Automatically%20Identifying%20Visually-Accentuated%20Words.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {496–503},
publisher = {Springer},
address = {Boston, MA},
abstract = {This paper presents a novel long-term idea to learn automatically from online multimedia content, such as videos from YouTube channels, a portfolio of nonverbal identities in the form of computational representation of prototypical gestures of a speaker. As a first step towards this vision, this paper presents proof-of-concept experiments to automatically identify visually accentuated words from a collection of online videos of the same person. The experimental results are promising with many accentuated words automatically identified and specific head motion patterns were associated with these words.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2005
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
A Computational Model of Dynamic Perceptual Attention for Virtual Humans Proceedings Article
In: Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation, Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{kim_computational_2005,
title = {A Computational Model of Dynamic Perceptual Attention for Virtual Humans},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Dynamic%20Perceptual%20Attention%20for%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to objects and locations in a virtual environment in a manner that looks believable and serves a functional purpose. We have developed a computational model of perceptual attention that mediates top-down and bottom-up attention processes of virtual humans in virtual environments. In this paper, we propose a perceptual attention model that will integrate perceptual attention toward objects and locations in the environment with the need to look at other parties in a social context.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nijholt, Anton; Traum, David
The Virtuality Continuum Revisited Proceedings Article
In: CHI 2005 Workshop on the Virtuality Continuum Revisited, Portland, OR, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{nijholt_virtuality_2005,
title = {The Virtuality Continuum Revisited},
author = {Anton Nijholt and David Traum},
url = {http://ict.usc.edu/pubs/The%20Virtuality%20Continuum%20Revisited.pdf},
year = {2005},
date = {2005-04-01},
booktitle = {CHI 2005 Workshop on the Virtuality Continuum Revisited},
address = {Portland, OR},
abstract = {We survey the themes and the aims of a workshop devoted to the state-of-the-art virtuality continuum. In this continuum, ranging from fully virtual to real physical environments, allowing for mixed, augmented and desktop virtual reality, several perspectives can be taken. Originally, the emphasis was on display technologies. Here we take the perspective of the inhabited environment, that is, environments positioned somewhere on this continuum that are inhabited by virtual (embodied) agents, that interact with each other and with their human partners. Hence, we look at it from the multi-party interaction perspective. In this workshop we will investigate the current state of the art, its shortcomings and a future research agenda.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy and its linguistic manifestations Proceedings Article
In: Proceedings of Conference on Formal and Informal Negotiation (FINEXIN), Ottowa, Canada, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{martinovski_rejection_2005,
title = {Rejection of empathy and its linguistic manifestations},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20and%20its%20linguistic%20manifestations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of Conference on Formal and Informal Negotiation (FINEXIN)},
address = {Ottowa, Canada},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
Controlling the Focus of Perceptual Attention in Embodied Conversational Agents Proceedings Article
In: Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems, 2005, ISBN: 1-59593-093-0.
Abstract | Links | BibTeX | Tags:
@inproceedings{kim_controlling_2005,
title = {Controlling the Focus of Perceptual Attention in Embodied Conversational Agents},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/Controlling%20the%20Focus%20of%20Perceptual%20Attention%20in%20Embodied%20Conversational%20Agents.pdf},
doi = {10.1145/1082473.1082641},
isbn = {1-59593-093-0},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems},
abstract = {In this paper, we present a computational model of dynamic perceptual attention for virtual humans. The computational models of perceptual attention that we surveyed fell into one of two camps: top-down and bottom-up. Biologically inspired computational models [2] typically focus on the bottom-up aspects of attention, while most virtual humans [1,3,7] implement a top-down form of attention. Bottom-up attention models only consider the sensory information without taking into consideration the saliency based on tasks or goals. As a result, the outcome of a purely bottom-up model will not consistently match the behavior of real humans in certain situations. Modeling perceptual attention as a purely top-down process, however, is also not sufficient for implementing a virtual human. A purely top-down model does not take into account the fact that virtual humans need to react to perceptual stimuli vying for attention. Top-down systems typically handle this in an ad hoc manner by encoding special rules to catch certain conditions in the environment. The problem with this approach is that it does not provide a principled way of integrating the ever-present bottom-up perceptual stimuli with top-down control of attention. This model extends the prior model [7] with perceptual resolution based on psychological theories of human perception [4]. This model allows virtual humans to dynamically interact with objects and other individuals, balancing the demands of goal-directed behavior with those of attending to novel stimuli. This model has been implemented and tested with the MRE Project [5].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Patel, Jigish; Parker, Robert; Traum, David
Simulation of Small Group Discussions for Middle Level of Detail Crowds Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{patel_simulation_2004,
title = {Simulation of Small Group Discussions for Middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Simulation%20of%20Small%20Group%20Discussions%20for%20Middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {We present an algorithm for animating middle level of detail crowds engaged in conversation. Based on previous work from Padilha and Carletta, this algorithm is used to provide gestures for group characters in an embedded virtual world. The algorithm is implemented and used within the Mission Rehearsal Exercise project at ICT to control Bosnian crowd members.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Leuski, Anton; Traum, David
First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{gandhe_first_2004,
title = {First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers},
author = {Sudeep Gandhe and Andrew S. Gordon and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20Toward%20Linking%20Dialogues-%20Mediating%20Between%20Free-text%20Questions%20and%20Pre-recorded%20Video%20Answers.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Pre-recorded video segments can be very compelling for a variety of immersive training purposes, including providing answers to questions in after-action reviews. Answering questions fluently using pre-recorded video poses challenges, however. When humans interact, answers are constructed after questions are posed. When answers are pre-recorded, even if a correct answer exists in a library of video segments, the answer may be phrased in a way that is not coherent with the question. This paper reports on basic research experiments with short "linking dialogues" that mediate between the question and answer to reduce (or eliminate) the incoherence, resulting in more natural human-system interaction. A set of experiments were performed in which links were elicited to bridge between questions from users of an existing training application and selected answers from the system, and then comparisons made with unlinked answers. The results show that a linking dialogue can signiï¬cantly increase the perceived relevance of the system's answers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.; Hartholt, Arno; Marsella, Stacy C.; Gratch, Jonathan; Traum, David
Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{muller_you_2004,
title = {Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue},
author = {T. J. Muller and Arno Hartholt and Stacy C. Marsella and Jonathan Gratch and David Traum},
url = {http://ict.usc.edu/pubs/Do%20you%20want%20to%20talk%20about%20it.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Kloster Irsee, Germany},
abstract = {In this paper, we descrribe an implemented system for emotion-referring dialogue. An agen can engage in emotion-referring dialogue if it first has a model of its own emotions, and secondly has a way of talking about them. We create this facility in MRE Project's virtual humans, building upon the existing emotion and dialogue facilities of these agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Small group discussion simulation for middle Level of Detail Crowds Proceedings Article
In: 8th Workshop on Semantics and Pragmatics of Dialogue, Barcelona, Spain, 2004.
@inproceedings{patel_small_2004,
title = {Small group discussion simulation for middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Small%20group%20discussion%20simulation%20for%20middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {8th Workshop on Semantics and Pragmatics of Dialogue},
address = {Barcelona, Spain},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Robinson, Susan; Stephan, Jens
Evaluation of multi-party virtual reality dialogue interaction Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{traum_evaluation_2004,
title = {Evaluation of multi-party virtual reality dialogue interaction},
author = {David Traum and Susan Robinson and Jens Stephan},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20multi-party%20virtual%20reality%20dialogue%20interaction.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {We describe a dialogue evaluation plan for a multi-character virtual reality training simulation. A multi-component evaluation plan is presented, including user satisfaction, intended task completion, recognition rate, and a new annotation scheme for appropriateness. Preliminary results for formative tests are also presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Garg, Saurabh; Martinovski, Bilyana; Robinson, Susan; Stephan, Jens; Tetreault, Joel; Traum, David
Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{garg_evaluation_2004,
title = {Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus},
author = {Saurabh Garg and Bilyana Martinovski and Susan Robinson and Jens Stephan and Joel Tetreault and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Transcription%20and%20Annotation%20tools%20for%20a%20Multi-modal,%20Multi-party%20dialogue%20corpus.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper reviews nine available transcription and annotation tools, considering in particular the special difï¬culties arising from transcribing and annotating multi-party, multi-modal dialogue. Tools are evaluated as to the ability to support the user's annotation scheme, ability to visualize the form of the data, compatibility with other tools, flexibility of data representation, and general user-friendliness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Marsella, Stacy C.; Gratch, Jonathan
Emotion and Dialogue in the MRE Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 117–127, Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{traum_emotion_2004,
title = {Emotion and Dialogue in the MRE Virtual Humans},
author = {David Traum and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotion%20and%20Dialogue%20in%20the%20MRE%20Virtual%20Humans.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Lecture Notes in Computer Science},
volume = {3068},
pages = {117–127},
address = {Kloster Irsee, Germany},
abstract = {We describe the emotion and dialogue aspects of the virtual agents used in the MRE project at USC. The models of emotion and dialogue started independently, though each makes crucial use of a central task model. In this paper we describe the task model, dialogue model, and emotion model, and the interactions between them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Issues in Multiparty Dialogues Journal Article
In: Advances in Agent Communication, 2004.
Abstract | Links | BibTeX | Tags:
@article{traum_issues_2004,
title = {Issues in Multiparty Dialogues},
author = {David Traum},
editor = {F. Dignum},
url = {http://ict.usc.edu/pubs/Issues%20in%20Multiparty%20Dialogues.pdf},
year = {2004},
date = {2004-01-01},
journal = {Advances in Agent Communication},
abstract = {This article examines some of the issues in representation of, processing, and automated agent participation in natural language dialgue, considering expansion from two-party dialogue to multi-party dialogue. These issues include some regarding the roles agents play in dialogue, interactive factors, and content management factors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Robinson, Susan; Martinovski, Bilyana; Garg, Saurabh; Stephan, Jens; Traum, David
Issues in corpus development for multi-party multi-modal task-oriented dialogue Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{robinson_issues_2004,
title = {Issues in corpus development for multi-party multi-modal task-oriented dialogue},
author = {Susan Robinson and Bilyana Martinovski and Saurabh Garg and Jens Stephan and David Traum},
url = {http://ict.usc.edu/pubs/Issues%20in%20corpus%20development%20for%20multi-party%20multi-modal%20task-oriented%20dialogue.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper describes the development of a multi-modal corpus based on multi-party multi-task driven common goal oriented spoken language interaction. The data consists of approximately 10 hours of audio human simulation radio data and nearly 5 hours of video and audio face-to-face sessions between human trainees and virtual agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Proceedings Article
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Proceedings Article
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Proceedings Article
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151–158},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags:
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Larsson, Staffan
The Information State Approach to Dialogue Management Book Section
In: Current and New Directions in Discourse and Dialogue, pp. 325–353, 2003.
@incollection{traum_information_2003,
title = {The Information State Approach to Dialogue Management},
author = {David Traum and Staffan Larsson},
url = {http://ict.usc.edu/pubs/The%20Information%20State%20Approach%20to%20Dialogue%20Management.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Current and New Directions in Discourse and Dialogue},
pages = {325–353},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Proceedings Article
In: International Workshop on Computational Semantics, 2003.
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2002
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2001
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
1999
Traum, David; Andersen, Carl F.; Chong, Waiyian; Josyula, Darsana; Okamoto, Yoshi; Purang, Khemdut; O'Donovan-Anderson, Michael; Perlis, Don
Representations of Dialogue State for Domain and Task Independent Meta-Dialogue Journal Article
In: Electronic Transactions on Artificial Intelligence, vol. 3, pp. 125–152, 1999.
Abstract | Links | BibTeX | Tags:
@article{traum_representations_1999,
title = {Representations of Dialogue State for Domain and Task Independent Meta-Dialogue},
author = {David Traum and Carl F. Andersen and Waiyian Chong and Darsana Josyula and Yoshi Okamoto and Khemdut Purang and Michael O'Donovan-Anderson and Don Perlis},
url = {http://ict.usc.edu/pubs/Representations%20of%20Dialogue%20State%20for%20Domain%20and%20Task%20Independent%20Meta-Dialogue.pdf},
year = {1999},
date = {1999-01-01},
journal = {Electronic Transactions on Artificial Intelligence},
volume = {3},
pages = {125–152},
abstract = {We propose a representation of local dialogue context motivated by the need to react appropriately to meta-dialogue, such as various sorts of corrections to the sequence of an instruction and response action. Such contexts includes at least the following aspects: the words and linguistic structures uttered, the domain correlates of those linguistic structures, and plans and actions in response. Each of these is needed as part of the context in order to be able to correctly interpret the range of possible corrections. Partitioning knowledge of dialogue structure in this way may lead to an ability to represent generic dialogue structure (e.g., in the form of axioms), which can be particularized to the domain, topic and content of the dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}