Roccabruna G., Cervone A. and Riccardi G. Multifunctional ISO standard Dialogue Act tagging in Italian (Article) 2020. (Links | BibTeX | Tags: Discourse, Natural Language Processing) Tammewar A., Cervone A. and Riccardi G. Emotion Carrier Recognition from Personal Narratives (Article) 2020. (Links | BibTeX | Tags: Affective Computing, Natural Language Processing) Cervone A. and Riccardi G. Is This Dialogue Coherent ? Learning From Dialogue Acts and Entities (Article) 2020. (Links | BibTeX | Tags: Conversational and Interactive Systems , Discourse, Natural Language Processing) Celli F., Ghosh A., Alam F. and Riccardi G. Information Processing and Management, Nov 2015, 2015. (Abstract | Links | BibTeX | Tags: Machine Learning, Natural Language Processing, Signal Annotation and Interpretation) De Mori R., Bechet F., Hakkani-Tur D., McTear M., Riccardi G. and Tur G. Spoken Language Understanding (Article) IEEE Signal Processing Magazine vol. 25, pp.50-58 ,2008, 2008. (BibTeX | Tags: Machine Learning, Natural Language Processing, Speech Processing) Hakkani-Tur D., Riccardi G. and Tur G. An Active Approach to spoken Language Processing (Article) ACM Transactions on Speech and Language Processing, Vol. 3, No. 3, pp 1-31, 2006, 2006. (Abstract | Links | BibTeX | Tags: Machine Learning, Natural Language Processing, Speech Processing) Gupta N., Tur G., Hakkani-Tur D., Bangalore S., Riccardi G. and Rahim M. The AT&T Spoken Language Understanding System (Article) IEEE Trans. on Audio, Speech and Language Processing, volume 14, Issue 1, pp. 213-22, 2006, 2006. (Abstract | Links | BibTeX | Tags: Machine Learning, Natural Language Processing, Speech Processing)2020
title = {Multifunctional ISO standard Dialogue Act tagging in Italian },
author = {Roccabruna G., Cervone A. and Riccardi G.},
editor = {Seventh Italian Conference on Computational Linguistics, 2020},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2020/12/Clicit20-ISODAItalian.pdf},
year = {2020},
date = {2020-11-02},
keywords = {Discourse, Natural Language Processing}
}
title = {Emotion Carrier Recognition from Personal Narratives },
author = {Tammewar A., Cervone A. and Riccardi G.},
editor = {arXiv.org, 2020},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2020/12/2008.07481.pdf},
year = {2020},
date = {2020-08-17},
keywords = {Affective Computing, Natural Language Processing}
}
title = {Is This Dialogue Coherent ? Learning From Dialogue Acts and Entities},
author = {Cervone A. and Riccardi G.},
editor = {SIGDial, Idaho*, 2020},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2020/12/SIGDIAL20-DialogueCoherence.pdf},
year = {2020},
date = {2020-06-01},
keywords = {Conversational and Interactive Systems , Discourse, Natural Language Processing}
}
2015
title = {In the mood for Sharing Contents: Emotions, personality and interaction styles in the diffusion of news},
author = {Celli F., Ghosh A., Alam F. and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2015/11/IPM15-MoodSharing.pdf},
year = {2015},
date = {2015-11-01},
journal = {Information Processing and Management, Nov 2015},
abstract = {In this paper, we analyze the influence of Twitter users in sharing news articles that may affect the readers’ mood. We collected data of more than 2000 Twitter users who shared news articles from Corriere.it, a daily newspaper that provides mood metadata annotated by readers on a voluntary basis. We automatically annotated personality types and communication styles of Twitter users and analyzed the correlations between personality, communication style, Twitter metadata (such as followig and folllowers) and the type of mood associated to the articles they shared. We also run a feature selection task, to find the best predictors of positive and negative mood sharing, and a classification task. We automatically predicted positive and negative mood sharers with 61.7% F1-measure.},
keywords = {Machine Learning, Natural Language Processing, Signal Annotation and Interpretation}
}
2008
title = {Spoken Language Understanding},
author = {De Mori R., Bechet F., Hakkani-Tur D., McTear M., Riccardi G. and Tur G.},
year = {2008},
date = {2008-01-01},
journal = {IEEE Signal Processing Magazine vol. 25, pp.50-58 ,2008},
keywords = {Machine Learning, Natural Language Processing, Speech Processing}
}
2006
title = {An Active Approach to spoken Language Processing},
author = {Hakkani-Tur D., Riccardi G. and Tur G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2014/11/acm-tslp-06.pdf},
year = {2006},
date = {2006-01-01},
journal = {ACM Transactions on Speech and Language Processing, Vol. 3, No. 3, pp 1-31, 2006},
abstract = {State of the art data-driven speech and language processing systems require a large amount of human intervention ranging from data annotation to system prototyping. In the traditional supervised passive approach, the system is trained on a given number of annotated data samples and evaluated using a separate test set. Then more data is collected arbitrarily, annotated, and the whole cycle is repeated. In this article, we propose the active approach where the system itself selects its own training data, evaluates itself and re-trains when necessary. We first employ active learning which aims to automatically select the examples that are likely to be the most informative for a given task. We use active learning for both selecting the examples to label and the examples to re-label in order to correct labeling errors. Furthermore, the system automatically evaluates itself using active evaluation to keep track of the unexpected events and decides on-demand to label more examples. The active approach enables dynamic adaptation of spoken language processing systems to unseen or unexpected events for nonstationary input while reducing the manual annotation effort significantly. We have evaluated the active approach with the AT&T spoken dialog system used for customer care applications. In this article, we present our results for both automatic speech recognition and spoken language understanding. Categories and Subject Descriptors: I.2.7 [Artificial Intelligence]: Natural Language Processing—Speech recognition and synthesis; I.5.1 [Pattern Recognition]: Models—Statistical General Terms: Algorithms, Languages, Performance Additional Key Words and Phrases: Passive learning, active learning, adaptive learning, unsupervised learning, active evaluation, spoken language understanding, automatic speech recognition, spoken dialog systems, speech and language processing},
keywords = {Machine Learning, Natural Language Processing, Speech Processing}
}
title = {The AT&T Spoken Language Understanding System},
author = {Gupta N., Tur G., Hakkani-Tur D., Bangalore S., Riccardi G. and Rahim M.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2014/11/IEEE-SAP-2005-SLU.pdf},
year = {2006},
date = {2006-01-01},
journal = {IEEE Trans. on Audio, Speech and Language Processing, volume 14, Issue 1, pp. 213-22, 2006},
abstract = {Spoken language understanding (SLU) aims at extracting meaning from natural language speech. Over the past decade, a variety of practical goal-oriented spoken dialog systems have been built for limited domains. SLU in these systems ranges from understanding predetermined phrases through fixed grammars, extracting some predefined named entities, extracting users’ intents for call classification, to combinations of users’ intents and named entities. In this paper, we present the SLU system of VoiceTone ® (a service provided by AT&T where AT&T develops, deploys and hosts spoken dialog applications for enterprise customers). The SLU system includes extracting both intents and the named entities from the users’ utterances. For intent determination, we use statistical classifiers trained from labeled data, and for named entity extraction we use rule-based fixed grammars. The focus of our work is to exploit data and to use machine learning techniques to create scalable SLU systems which can be quickly deployed for new domains with minimal human intervention. These objectives are achieved by 1) using the predicate-argument representation of semantic content of an utterance; 2) extending statistical classifiers to seamlessly integrate hand crafted classification rules with the rules learned from data; and 3) developing an active learning framework to minimize the human labeling effort for quickly building the classifier models and adapting them to changes. We present an evaluation of this system using two deployed applications of VoiceTone},
keywords = {Machine Learning, Natural Language Processing, Speech Processing}
}