Dias RD, Conboy HM, Gabany JM, Clarke LA, Osterweil LJ, Arney D, Goldman JM, Riccardi G, Avrunin GS, Yule SJ, Zenati MA. Intelligent Interruption Management System to Enhance Safety and Performance in Complex Surgical and Robotic Procedures (Proceeding) 2018. (BibTeX | Tags: Interactive Systems, Machine Learning) Dias R., Conboy M. H., Gabany M. J., Clarke A. L. , Osterweil J. L., Avrunin S. G., Arney D., Goldman M. J., Riccardi G., Yule J. S., Zenati A. M. 2018. (Links | BibTeX | Tags: Health Analytics, Interactive Systems, Machine Learning, Signal Annotation and Interpretation) Ghosh A., Stepanov E. A., Danieli M., and Riccardi G. Are You Stressed? Detecting High Stress from User Diaries (Proceeding) 8th IEEE International Conference on Cognitive Infocommunications (CogInfoCom 2017) • September 11-14, 2017 • Debrecen, Hungary, 2017. (Abstract | Links | BibTeX | Tags: Health Analytics, Interactive Systems) Mayor Torres Juan M., Stepanov A. E. WI '17 Proceedings of the International Conference on Web Intelligence Pages 939-946, Leipzig, Germany - August 23 - 26, 2017, 2017. (Abstract | Links | BibTeX | Tags: Affective Computing, Interactive Systems, Signal Annotation and Interpretation) Mogessie A. M., Ronchetti M., Riccardi G. Exploring the Role of Online Peer-Assessment as a Tool of Early Intervention (Article) In Wu, Gennari, Huang, Xie and Cao Y. (eds) Emerging Technologies for Education, Lecture Notes in Computer Science, vol 10108, pp. 635-644, 2017, 2017. (Abstract | Links | BibTeX | Tags: Interactive Systems) Bayer A. O., Stepanov A. E. and Riccardi G. Towards End-to-End Spoken Dialogue Systems (Proceeding) Proc. INTERSPEECH , Stockholm, 2017, 2017. (Abstract | Links | BibTeX | Tags: Interactive Systems, Speech Processing) Cervone A., Tortoreto G., Mezza S., Gambi E. and Riccardi G Roving Mind: a balancing act between open–domain and engaging dialogue systems (Conference) 2017. (Links | BibTeX | Tags: Conversational and Interactive Systems , Interactive Systems, Machine Learning, Natural Language Processing, Speech Processing) Alam F. , Chowdhury S. , Danieli M. and Riccardi G. How Interlocutors Coordinate with each other within Emotional Segments? (Proceeding) Proc. COLING, Osaka, 2016., 2016. (Abstract | Links | BibTeX | Tags: Affective Computing, Conversational and Interactive Systems , Discourse, Interactive Systems) Alam F. , Danieli M.. and Riccardi G. Can We Detect Speakers' Empathy? A Real-Life Case Study (Proceeding) Proc. IEEE International Conference on Cognitive Infocommunications, Wrocław, 2016, 2016. (Abstract | Links | BibTeX | Tags: Discourse, Interactive Systems) Mogessie M., Ronchetti M. and Riccardi G. Exploring the Role of Online Peer-Assessment as a Tool of Early Intervention (Proceeding) Proc. International Conference on Web-based Learning, Rome, 2016., 2016. (Abstract | Links | BibTeX | Tags: Interactive Systems) Chowdhury S. , Stepanov A. E. and Riccardi G. Predicting User Satisfaction from Turn-Taking in Spoken Conversations (Proceeding) Proc. INTERSPEECH, San Francisco, 2016., 2016. (Abstract | Links | BibTeX | Tags: Affective Computing, Conversational and Interactive Systems , Discourse, Interactive Systems, Signal Annotation and Interpretation, Speech Processing) Mogessie M., Riccardi G. and Ronchetti M. A Web Based Peer Interaction Framework for Improved Assessment and Supervision of Students (Conference) 2014. (Abstract | Links | BibTeX | Tags: Education Analytics, Interactive Systems)2018
title = {Intelligent Interruption Management System to Enhance Safety and Performance in Complex Surgical and Robotic Procedures},
author = {Dias RD, Conboy HM, Gabany JM, Clarke LA, Osterweil LJ, Arney D, Goldman JM, Riccardi G, Avrunin GS, Yule SJ, Zenati MA.},
editor = {Proc. Workshop on OR 2.0 , Context-Aware Operating Theaters, Computer Assisted Robotic Endoscopy, Clinical Image-Based Procedures, and Skin Image Analysis , Grenada},
year = {2018},
date = {2018-01-01},
keywords = {Interactive Systems, Machine Learning}
}
title = {Development of an Interactive Dashboard to Analyze Cognitive Workload of Surgical Teams During Complex Procedural Care},
author = {Dias R., Conboy M. H., Gabany M. J., Clarke A. L. , Osterweil J. L., Avrunin S. G., Arney D., Goldman M. J., Riccardi G., Yule J. S., Zenati A. M.},
editor = {IEEE Conf. on Cognitive and Computational Aspects of Situation Management, Boston},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2019/11/COGSIMA18ContextAwareDashboardSurgicalTeam.pdf},
year = {2018},
date = {2018-01-01},
keywords = {Health Analytics, Interactive Systems, Machine Learning, Signal Annotation and Interpretation}
}
2017
title = {Are You Stressed? Detecting High Stress from User Diaries},
author = {Ghosh A., Stepanov E. A., Danieli M., and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2017/09/55_PID4964285_55.pdf},
year = {2017},
date = {2017-09-11},
publisher = {8th IEEE International Conference on Cognitive Infocommunications (CogInfoCom 2017) • September 11-14, 2017 • Debrecen, Hungary},
abstract = {Knowledge of the complete clinical history, lifestyle, behaviour, medication adherence data, and underlying symptoms, all affect the treatment outcomes. Collecting, analysing and using all these data, while treating a patient can often be very challenging. A doctor can spend only a limited time
with a patient. This time is often not enough to learn about all the lifestyle and underlying conditions of a patient’s life. Often patients are asked to maintain diaries of their daily activities. Diaries can help to improve adherence by increasing the consciousness of the patients, and can also serve as a way for the doctors to validate this adherence. However, diaries can be cumbersome to parse, and hence increase the task burden of the doctor. In this paper we demonstrate that automatic analysis of diaries can be used to predict the stress level of the diary writers with an F-measure of 0.70.},
keywords = {Health Analytics, Interactive Systems}
}
with a patient. This time is often not enough to learn about all the lifestyle and underlying conditions of a patient’s life. Often patients are asked to maintain diaries of their daily activities. Diaries can help to improve adherence by increasing the consciousness of the patients, and can also serve as a way for the doctors to validate this adherence. However, diaries can be cumbersome to parse, and hence increase the task burden of the doctor. In this paper we demonstrate that automatic analysis of diaries can be used to predict the stress level of the diary writers with an F-measure of 0.70.
title = {Enhanced face/audio emotion recognition: video and instance level classification using ConvNets and restricted Boltzmann Machines},
author = {Mayor Torres Juan M., Stepanov A. E.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2017/09/ACMWI2017MayorStepanov.pdf
https://dl.acm.org/citation.cfm?id=3109423},
year = {2017},
date = {2017-08-23},
publisher = {WI '17 Proceedings of the International Conference on Web Intelligence Pages 939-946, Leipzig, Germany - August 23 - 26, 2017},
abstract = {Face-based and audio-based emotion recognition modalities have been studied profusely obtaining successful classification rates for arousal/valence levels and multiple emotion categories settings. However, recent studies only focus their attention on classifying discrete emotion categories with a single image representation and/or a single set of audio feature descriptors. Face-based emotion recognition systems use a single image channel representations such as principal-components-analysis whitening, isotropic smoothing, or ZCA whitening. Similarly, audio emotion recognition systems use a standardized set of audio descriptors, including only averaged Mel-Frequency Cepstral coefficients. Both approaches imply the inclusion of decision-fusion modalities to compensate the limited feature separability and achieve high classification rates. In this paper, we propose two new methodologies for enhancing face-based and audio-based emotion recognition based on a single classifier decision and using the EU Emotion Stimulus dataset: (1) A combination of a Convolutional Neural Networks for frame-level feature extraction with a k-Nearest Neighbors classifier for the subsequent frame-level aggregation and video-level classification, and (2) a shallow Restricted Boltzmann Machine network for arousal/valence classification.},
keywords = {Affective Computing, Interactive Systems, Signal Annotation and Interpretation}
}
title = {Exploring the Role of Online Peer-Assessment as a Tool of Early Intervention},
author = {Mogessie A. M., Ronchetti M., Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2017/10/PRASE16-PeerAssessmentEarlyIntervention.pdf},
year = {2017},
date = {2017-01-01},
journal = {In Wu, Gennari, Huang, Xie and Cao Y. (eds) Emerging Technologies for Education, Lecture Notes in Computer Science, vol 10108, pp. 635-644, 2017},
abstract = {Peer-assessment in education has a long history. Although the adoption of technological tools is not a recent phenomenon, many peer-assessment studies are conducted in manual environments. Automating peer-assessment tasks improves the efficiency of the practice and provides opportunities for taking advantage of large amounts of studentgenerated data, which will readily be available in electronic format. Data from three undergraduate-level courses, which utilised an electronic peerassessment tool were explored in this study in order to investigate the relationship between participation in online peer-assessment tasks and successful course completion. It was found that students with little or no participation in optional peer-assessment activities had very low course completion rates as opposed to those with high participation. In light of this finding, it is argued that electronic peer-assessment can serve as a tool of early intervention. Further advantages of automated peerassessment are discussed and foreseen extensions of this work are outlined.},
keywords = {Interactive Systems}
}
title = {Towards End-to-End Spoken Dialogue Systems},
author = {Bayer A. O., Stepanov A. E. and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2017/10/2017_IS_Bayer_etal.pdf},
year = {2017},
date = {2017-01-01},
publisher = {Proc. INTERSPEECH , Stockholm, 2017},
abstract = {Training task-oriented dialogue systems requires significant amount of manual effort and integration of many independently built components; moreover, the pipeline is prone to errorpropagation. End-to-end training has been proposed to overcome these problems by training the whole system over the utterances of both dialogue parties. In this paper we present an end-to-end spoken dialogue system architecture that is based on turn embeddings. Turn embeddings encode a robust representation of user turns with a local dialogue history and they are trained using sequence-to-sequence models. Turn embeddings are trained by generating the previous and the next turns of the dialogue and additionally perform spoken language understanding. The end-to-end spoken dialogue system is trained using the pre-trained turn embeddings in a stateful architecture that considers the whole dialogue history. We observe that the proposed spoken dialogue system architecture outperforms the models based on local-only dialogue history and it is robust to automatic speech recognition errors.},
keywords = {Interactive Systems, Speech Processing}
}
title = {Roving Mind: a balancing act between open–domain and engaging dialogue systems},
author = {Cervone A., Tortoreto G., Mezza S., Gambi E. and Riccardi G},
editor = {1st Alexa Prize Conference, Las Vegas},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2019/11/AMZ17Conf-RovingMIndPaper.pdf},
year = {2017},
date = {2017-01-01},
keywords = {Conversational and Interactive Systems , Interactive Systems, Machine Learning, Natural Language Processing, Speech Processing}
}
2016
title = {How Interlocutors Coordinate with each other within Emotional Segments?},
author = {Alam F. , Chowdhury S. , Danieli M. and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2016/11/Coling16-CoordinationEmotionalSegments.pdf},
year = {2016},
date = {2016-11-01},
publisher = {Proc. COLING, Osaka, 2016.},
abstract = {In this paper, we aim to investigate the coordination of interlocutors behavior in different emotional segments. Conversational coordination between the interlocutors is the tendency of speakers to predict and adjust each other accordingly on an ongoing conversation. In order to find such a coordination, we investigated 1) lexical similarities between the speakers in each emotional segments,
2) correlation between the interlocutors using psycholinguistic features, such as linguistic styles, psychological process, personal concerns among others, and 3) relation of interlocutors turn-taking behaviors such as competitiveness. To study the degree of coordination in different emotional segments, we conducted our experiments using real dyadic conversations collected from call centers in which agent’s emotional state include empathy and customer’s emotional states include anger and frustration. Our findings suggest that the most coordination occurs between the interlocutors inside anger segments, where as, a little coordination was observed when the agent was empathic, even though an increase in the amount of non-competitive overlaps was observed. We found no significant difference between anger and frustration segment in terms of turn-taking behaviors. However, the length of pause significantly decreases in the preceding segment of anger where as it increases in the preceding segment of frustration.},
keywords = {Affective Computing, Conversational and Interactive Systems , Discourse, Interactive Systems}
}
2) correlation between the interlocutors using psycholinguistic features, such as linguistic styles, psychological process, personal concerns among others, and 3) relation of interlocutors turn-taking behaviors such as competitiveness. To study the degree of coordination in different emotional segments, we conducted our experiments using real dyadic conversations collected from call centers in which agent’s emotional state include empathy and customer’s emotional states include anger and frustration. Our findings suggest that the most coordination occurs between the interlocutors inside anger segments, where as, a little coordination was observed when the agent was empathic, even though an increase in the amount of non-competitive overlaps was observed. We found no significant difference between anger and frustration segment in terms of turn-taking behaviors. However, the length of pause significantly decreases in the preceding segment of anger where as it increases in the preceding segment of frustration.
title = {Can We Detect Speakers' Empathy? A Real-Life Case Study},
author = {Alam F. , Danieli M.. and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2016/11/CogInfo16-Detect-speakers-empathy.pdf},
year = {2016},
date = {2016-11-01},
publisher = {Proc. IEEE International Conference on Cognitive Infocommunications, Wrocław, 2016},
abstract = {In the context of automatic behavioral analysis, we aim to classify empathy in human-human spoken conversations. Empathy underlies to the human ability to recognize, understand and to react to emotions, attitudes, and beliefs of others. While empathy and its different manifestations (e.g., sympathy, compassion) have been widely studied in psychology, very little has been done in the computational research literature. In this paper, we present a case study where we investigate the occurrences of empathy in call-centers human-human conversations. In order to propose an operational definition of empathy, we adopt the modal model of emotions, where the appraisal processes of the unfolding of emotional states are modeled sequentially. We have designed a binary classification system to detect the presence of empathic manifestations in spoken conversations. The automatic classification system has been evaluated using spoken conversations by exploiting and comparing perform},
keywords = {Discourse, Interactive Systems}
}
title = {Exploring the Role of Online Peer-Assessment as a Tool of Early Intervention},
author = {Mogessie M., Ronchetti M. and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2016/11/PRASE16-PeerAssessmentEarlyIntervention.pdf},
year = {2016},
date = {2016-11-01},
publisher = {Proc. International Conference on Web-based Learning, Rome, 2016.},
abstract = {Peer-assessment in education has a long history. Although the adoption of technological tools is not a recent phenomenon, many peer-assessment studies are conducted in manual environments. Automating peer-assessment tasks improves the efficiency of the practice and provides opportunities for taking advantage of large amounts of studentgenerated data, which will readily be available in electronic format. Data from three undergraduate-level courses, which utilised an electronic peerassessment tool were explored in this study in order to investigate the relationship between participation in online peer-assessment tasks and successful course completion. It was found that students with little or no participation in optional peer-assessment activities had very low course
completion rates as opposed to those with high participation. In light of this finding, it is argued that electronic peer-assessment can serve as a tool of early intervention. Further advantages of automated peerassessment are discussed and foreseen extensions of this work are outlined.},
keywords = {Interactive Systems}
}
completion rates as opposed to those with high participation. In light of this finding, it is argued that electronic peer-assessment can serve as a tool of early intervention. Further advantages of automated peerassessment are discussed and foreseen extensions of this work are outlined.
title = {Predicting User Satisfaction from Turn-Taking in Spoken Conversations},
author = {Chowdhury S. , Stepanov A. E. and Riccardi G.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2016/11/IS16-PredictingUserSatisfactionTurnTaking.pdf},
year = {2016},
date = {2016-11-01},
publisher = {Proc. INTERSPEECH, San Francisco, 2016.},
abstract = {User satisfaction is an important aspect of the user experience while interacting with objects, systems or people. Traditionally user satisfaction is evaluated a-posteriori via spoken or written questionnaires or interviews. In automatic behavioral analysis we aim at measuring the user emotional states and its descriptions as they unfold during the interaction. In our approach, user satisfaction is modeled as the final state of a sequence of emotional states and given ternary values positive, negative, neutral. In this paper, we investigate the discriminating power of turn-taking in predicting user satisfaction in spoken conversations. Turn-taking is used for discourse organization of a conversation by means of explicit phrasing, intonation, and pausing. In this paper, we train different characterization of turn-taking, such as competitiveness of the speech overlaps. To extract turn-taking features we design a turn segmentation and labeling system that incorporates lexical and acoustic information. Given a human-human spoken dialog, our system automatically infers any of the three values of the state of the user satisfaction. We evaluate the classification system on real-life call-center human-human dialogs. The comparative performance analysis shows that the contribution of the turn-taking features outperforms both prosodic and lexical features.},
keywords = {Affective Computing, Conversational and Interactive Systems , Discourse, Interactive Systems, Signal Annotation and Interpretation, Speech Processing}
}
2014
title = {A Web Based Peer Interaction Framework for Improved Assessment and Supervision of Students},
author = {Mogessie M., Riccardi G. and Ronchetti M.},
url = {https://sisl.disi.unitn.it/wp-content/uploads/2014/11/EDMEDIA14-Peer-based-Assessment.pdf},
year = {2014},
date = {2014-01-01},
journal = {Proc. World Conference on Educational Multimedia, Hypermedia and Telecommunications, Tampere, 2014},
abstract = {One of the challenges of both traditional and contemporary instructional media in higher education is creating a sustainable teaching-learning environment that ensures continuous engagement of students and provides efficient means of assessing their performance. We present a peer-based framework designed to increase active participation of students in courses administered in both traditional and blended learning settings. Students are continuously engaged in attention-eliciting tasks and are assessed by their peers. The framework allows semi-automated assignment of tasks to students. In completing these tasks, students ask questions, answer questions from other students, evaluate the quality of question-answer pairs and rate answers provided by their peers. We have implemented this framework in several courses and run extensive experiments to assess the effectiveness of our approach. We discuss the results of students’ surveys of this approach, which, in general, has been perceived as useful in achieving better learning outcomes.},
keywords = {Education Analytics, Interactive Systems}
}