|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:17:18.198339Z" |
|
}, |
|
"title": "Agent Assist through Conversation Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kshitij", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Fadnis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nathaniel", |
|
"middle": [], |
|
"last": "Mills", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jatin", |
|
"middle": [], |
|
"last": "Ganhotra", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Haggai", |
|
"middle": [], |
|
"last": "Roitman", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Pandey", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Doron", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yosi", |
|
"middle": [], |
|
"last": "Mass", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shai", |
|
"middle": [], |
|
"last": "Erera", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Chulaka", |
|
"middle": [], |
|
"last": "Gunasekara", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Danish", |
|
"middle": [], |
|
"last": "Contractor", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Sankalp", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [ |
|
"Vera" |
|
], |
|
"last": "Liao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sachindra", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Lastras", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Konopnicki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Customer support agents play a crucial role as an interface between an organization and its end-users. We propose CAIRAA: Conversational Approach to Information Retrieval for Agent Assistance, to reduce the cognitive workload of support agents who engage with users through conversation systems. CAIRAA monitors an evolving conversation and recommends both responses and URLs of documents the agent can use in replies to their client. We combine traditional information retrieval (IR) approaches with more recent Deep Learning (DL) models to ensure high accuracy and efficient run-time performance in the deployed system. Here, we describe the CAIRAA system and demonstrate its effectiveness in a pilot study via a short video 1 .", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Customer support agents play a crucial role as an interface between an organization and its end-users. We propose CAIRAA: Conversational Approach to Information Retrieval for Agent Assistance, to reduce the cognitive workload of support agents who engage with users through conversation systems. CAIRAA monitors an evolving conversation and recommends both responses and URLs of documents the agent can use in replies to their client. We combine traditional information retrieval (IR) approaches with more recent Deep Learning (DL) models to ensure high accuracy and efficient run-time performance in the deployed system. Here, we describe the CAIRAA system and demonstrate its effectiveness in a pilot study via a short video 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Customer care conversation systems have been used in a variety of domains, including technical support, reservation systems, and banking applications (Acomb et al., 2007) . The majority of such systems provide a dashboard to customer support agents, so that they can interact with multiple end-users in parallel. The support agents use such dashboards to perform diverse tasks to address user requests, such as question-answering, conversational search, document passage extraction and transactions. When identifying the responses to user queries, the support agents either, (1) rely on their own domain knowledge and articulate such knowledge to be sent to the users or, (2) manually extract the keywords from the conversation and use search functions provided in their dashboards to identify the relevant knowledge contained in documents, and send URLs for these documents. The Help@IBM. How may I help you? U: Hi yes. I cannot connect to ibm connections cloud in ios U: yesterday my phone asked for the pw out of the blue and I clicked cancel bec on the road and now I have no connection to server A: No worries.", |
|
"cite_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 170, |
|
"text": "(Acomb et al., 2007)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "First you will need to create a 16 digit password for for ibm connections cloud https://w3.ibm.com/help/#/ article/ios_create_16char_pass then you will need to open on the iphone settingsaccounts and passwords-ibm connections cloud click on your email address and in the password field enter this 16 digit password. U: I have that pw. Can I use my old one or better to create a new one? A: Please always try the existing password first. If it doesn't work, then create a new password. U: Worked. ;) Thanks Table 1 : Sample dialog from Help@IBM where Agent (A) utterance includes a URL to the User (U) query. continuous process of identifying knowledge and responding becomes an immense cognitive workload for the customer support agents.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 506, |
|
"end": 513, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although automated conversation systems have improved immensely in the last decade with advances in natural language processing, machine learning and dialog management (Wen et al. 2016; Li et al. (2016) ; Li et al. (2017) ), these systems still fail to satisfy the sophisticated customer's needs in real-life scenarios. This leads to frustration (Weisz et al., 2019) and less engagement (Vtyurina et al., 2017) . Therefore, having an empathetic human agent in-the-loop supported by efficient and accurate content retrieval, allows better coverage of customer needs and reduces customer frustration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 202, |
|
"text": "Li et al. (2016)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 221, |
|
"text": "Li et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 366, |
|
"text": "(Weisz et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 410, |
|
"text": "(Vtyurina et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With CAIRAA, we propose and showcase a system that provides real-time assistance to the support agents and alleviates their cognitive workload. Our system provides two forms of real-time rec- ommendations to the support agents: (1) URLs of the documents that contain information to resolve user issues, and (2) natural language responses that the agent can use to respond to their customer's queries. The operation of CAIRAA is illustrated in Table 1 with a sample conversation between a user and a support agent. In the example, the utterances made by the support agents are prefixed by A and the utterances made by the user are prefixed by U. The natural language utterances that CAIRAA predicted for the support agent are shown in different font (monospace) while the URLs of the related documents predicted by CAIRAA are shown in blue. In the following sections we describe the different components of our system and their implementation details.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 443, |
|
"end": 450, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The architecture of CAIRAA is illustrated in Figure 1 . It consists of an Agent-dashboard that is used by customer-support agents to interact with customers (users). The agent is assisted by recommendations from two engines -Document Recommendation Engine and the Response Recommendation Engine. As their names suggest, these engines provide realtime recommendations for documents that could be relevant during the chat, as well as, responses to the agent. Figure 1 also depicts components for data storage as well as model training.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 54, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 466, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given a collection of human-to-human conversation logs, we extract the mentions of URLs (doc-uments) from these conversations. We use Selenium 2 to render static as well as dynamic webcontent. With our primary focus on text content, additional cleaning processes that filters selected HTML content (e.g., menus, search bars, side bars, headers and footers) and only preserves text content, along with embedded procedural and multimedia content references is implemented. The processed content is exported in two formats. a) markdown and b) formatted text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Web Content Extraction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Given a set of conversational logs along with documents mentioned in those conversations, we train the Document Recommendation Engine using a pipeline consisting of an information-retrieval model followed by a deep-learning model to recommend URLs relevant to an evolving conversation. This Document Recommendation Engine is trained with the objective of predicting the most relevant web content for the conversation at hand. Once trained, this is provided as a real-time service to the agent dashboard to recommend the appropriate URLs to support agents while they converse.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Recommendation Engine", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The Information Retrieval (IR) model is implemented using an Apache Lucene index, employed with English language analyzer and default BM25 similarity. Documents in the index are represented using two fields, (1) web page content, (2) document's representation augmented with the text of all historic conversations that link to it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Retrieval model", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "For a given (dialog) query d, matching documents are retrieved using four different ranking steps, which are combined using a cascade approach (Wang et al., 2011) . Following (Van Gysel et al., 2016), we obtain an initial pool of candidate documents using a lexical query aggregation approach. To this end, each utterance t i \u2208 d is represented as a separate weighted query-clause, having its weight assigned relatively to its sequence position in the dialog (Van Gysel et al., 2016) . Various sub-queries are then combined using a single disjunctive query. The second ranker evaluates each document y obtained by the first ranker against an expanded query (applying relevance model (Lavrenko and Croft, 2001) ). The third ranker applies a manifold-ranking approach (Xu et al., 2011) , aiming to score content-similar doc-uments (measured by Bhattacharyya languagemodel based similarity) with similar scores.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 162, |
|
"text": "(Wang et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 483, |
|
"text": "(Van Gysel et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 709, |
|
"text": "(Lavrenko and Croft, 2001)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 766, |
|
"end": 783, |
|
"text": "(Xu et al., 2011)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Retrieval model", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "The last ranker in the cascade treats the dialog query d as a verbose query and applies the Fixed-Point (FP) method (Paik and Oard, 2014) for weighting its words. Yet, compared to \"traditional\" verbose queries, dialogs are further segmented into distinct utterances. Using this approach, we implement an utterance-biased extension for enhanced word-weighting. To this end, we first score the various utterances based on the initial FP weights of words they contain and their relative position. We then propagate utterance scores back to their associated words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Retrieval model", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "We use the Enhanced Sequential Inference Model (ESIM) proposed by Chen et al. (2017) with the same goal as the IR model but it uses dense vectors to represent conversation-contexts and documents. The objective is to predict the relevant URL given the dialog history (context). The multi-turn dialog history is concatenated together to form the context of length m, represented as", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 84, |
|
"text": "Chen et al. (2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Learning Model", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "C = (c 1 , c 2 , ..., c i , ..., c m )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Learning Model", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": ", where c i is the ith word in context. Given a web page content U as U = (r 1 , r 2 , ..., r j , ..., r n ), where r j is the jth word in web page content, the web page is selected using the conditional probability P (y = 1|C, U ), which shows the confidence of selecting the web page U given context C.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Learning Model", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "We observe that the IR model is much faster than the neural ESIM model, but the ESIM model provides improved performance in comparison. We combine the ESIM model with the IR model using a re-ranking of latter's candidate pool, which provides a combination of both ranker models. For example, the IR model returns the top-k relevant web pages (k = 20) and then the ESIM model is used to re-rank them and show a subset to the agent based on their confidence scores. We refer to this two-stage pipeline as a hybrid approach, which combines the best of both worlds and deliver near real-time experience with better performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Learning Model", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "In addition to providing a ranked list of webpages, the Document Recommendation Engine provides a rating and confidence estimate for each recommended web content, allowing to better guide the agent to the best solutions. The rating and confidence per each single recommended content URL is estimated using a novel query performance prediction (QPP) model , trained over a multitude of features obtained from the conversation context and recommended content analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rating and Confidence Estimation", |
|
"sec_num": "2.2.3" |
|
}, |
|
{ |
|
"text": "Besides, at every step of the conversation, a crucial decision that the Document Recommendation Engine has to make is to decide whether or not to present the recommendations to the agent. In case of a low confidence, the human agent may ask for further clarifications from the end-user. Such a decision is taken by training another confidence estimation model that considers the confidence of each individual recommended URL. Here, the system further exploits the interaction between the recommendations made by the IR and ESIM models, with the observation that higher agreement (measured by ranking-similarity) usually translates to higher overall confidence (Roitman and Kurland, 2019) . We assessed the quality of our confidence estimation model by measuring its accuracy and log-loss per each task. For a single recommended URL, the model is trained to classify it as relevant or not. For a top-k recommended URLs list, the model is trained to determine whether it contains at least one relevant URL.", |
|
"cite_spans": [ |
|
{ |
|
"start": 660, |
|
"end": 687, |
|
"text": "(Roitman and Kurland, 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rating and Confidence Estimation", |
|
"sec_num": "2.2.3" |
|
}, |
|
{ |
|
"text": "As mentioned above, an agent is also shown recommended responses based on how other agents have responded to similar conversation contexts in the pasts. Thus, given the current dialog input context C = (c 1 , c 2 , ..c i ..c m ), CAIRAA generates recommendations using a combination of generative as well as retrieval based methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Response Recommendation Engine", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We use a hierarchical encoder (Serban et al., 2016) to encode conversation contexts. Specifically, the encoder first encodes each conversation turn and generates turn-level representations for the dialog. A secondary encoder then generates the overall context representation using the turn-level encodings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 51, |
|
"text": "(Serban et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "We utilize this context encoding to generate response recommendations in three ways: (1) Using a vanilla decoder (Serban et al., 2016) (2) Using a decoder that additionally validates whether a subsequence at each time-step is likely to be relevant.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 134, |
|
"text": "(Serban et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "(3) Using the encoded representations in a Siamese dual-encoder (Lowe et al., 2017) that also encodes the responses. Vanilla Decoder: The decoder is initialized using the context encoding. The decoder generates the response autoregressively, that is, the token at each time-step is generated conditioned on the previous tokens of the response. The decoder is trained to minimize the log-perplexity of each word in the gold response. Decoder with sub-sequence validation: When trained on actual conversation logs, vanilla decoders often resort to generic responses or responses that are irrelevant to the context. Hence, to enforce relevance, we enhance the decoder with a classifier for each time-step of decoding. At each time-step, the classifier predicts the relevance of the response so-far for the given conversational context. The classifier is trained to predict a relevance of 1 for a prefix of the gold response and 0 for a prefix of any other randomly sampled response at each time-step of decoding. Simultaneously, the decoder is also trained to minimize the word loss, that is, log-perplexity of each word in the gold response. For any response r, the relevance loss can be written as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 83, |
|
"text": "(Lowe et al., 2017)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "loss r (r) = \u2212 T t=1 log p(y t |w 1 , . . . , w t ) ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "where y t = 1 for the gold response and 0 for the randomly sampled response. During inference, the token at each time-step is generated so as to maximize the sum of logprobability of the token and the log-relevance of the resultant partial response. Siamese Dual-Encoder: Finally, the context encoding is also fed to a Siamese network. To train the Siamese network, we randomly sample k \u2212 1 negative responses for each conversation context. The negative responses as well as the gold response are fed to a recurrent encoder (bidirectional LSTM) to generate the corresponding response embeddings. The context embedding as well as the corresponding response embeddings are fed to a 1-in-k classifier, where the k labels correspond to the k responses. The classifier is trained to predict the class-label that corresponds to the gold response. If the gold response r has label , the Siamese loss can be computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "loss s (r) = \u2212 log p( |r 1 , . . . , r k ) .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "(2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Multi-task training Objective: The final loss is the sum of the loss for each of the above models. The model is trained until the loss on an independent validation set stops decreasing. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task training", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "In order to build a retrieval based model, we encode the dialog contexts using the pre-trained Universal Sentence Encoder (USE) (Cer et al., 2018) as well as the context encoder trained using multi-task objective discussed above. For each encoder, we create an annoy index 3 which stores the context embeddings and the corresponding responses from the training data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 146, |
|
"text": "(Cer et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieval-based model", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "In order to return a response recommendation, a given dialog context is encoded either using USE or the context encoder. We fetch the responses of the k-nearest neighbours in the annoy index.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieval-based model", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "Before the retrieved and generated responses are presented to the user, they are scored from 0 to 1. We use a voting-based scoring mechanism, where each response votes for all the other responses (generated as well as retrieved). To achieve this, we encode each response using the pre-trained USE. The score of a response is the mean of the inner-product between the corresponding embedding and all the other response embeddings. Since USE embeddings are normalized, these inner products range between 0 and 1. Finally, the responses are sorted based on their scores and presented to the user.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring the responses", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "3 https://github.com/spotify/annoy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring the responses", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "Once the recommendation engines are trained, CAIRAA is tasked with the live operation of the agent dashboard. CAIRAA adopts optimized user interface design to deliver precise information with minimal agent interactions. Prioritizing scalability, each operational component is intentionally designed to be modular and stateless.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deployment Details", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The Agent dashboard ( Figure 2) is an interactive web application that is integrated with customer support applications (e.g., LivePerson). Implemented using Javascript/ HTML5, it comprises of a chat interceptor; document and response recommendations panels; a full-text search; and a feedback facility runnable in an ES6 compliant browser. Chat interceptor monitors customer support applications (e.g., LivePerson) for conversation updates. Both document and response recommendations panels in their minimalist design limits information overload, where the former shows the title and a short description of the web content and the latter displays the recommended utterance. The agent interactivity with recommendations is limited to copy, view and reject. A full-text search allows the expert agents to perform simple keyword searches based on their domain knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 31, |
|
"text": "Figure 2)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Agent Dashboard", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "While we have automated the agent assistance for recommending documents and responses, we have retained the support agent in the loop to provide final edits and control over what is presented to the end-user. This allows a support agent to adjust the tone of responses, and to include documents outside recommended ones in their responses. Our framework captures these responses and their embedded recommendations for future retraining / retesting so that the system can self learn and automatically adjust to support agent activities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Agent Dashboard", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "By providing real-time assistance to agents to support their clients, we leverage the speed and accuracy of automated recommendation engines while retaining the agents' expertise. Learning from conversation logs, CAIRAA promotes more uniform support by keeping all agents aware of the latest information to address current end-user needs. Combining traditional information retrieval approaches with modern deep learning models ensures high accuracy and efficient run-time performance in our deployed system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "http://www.seleniumhq.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Technical support dialog systems: issues, problems, and solutions", |
|
"authors": [ |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Acomb", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Bloom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krishna", |
|
"middle": [], |
|
"last": "Dayanidhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phillip", |
|
"middle": [], |
|
"last": "Hunter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Krogh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Esther", |
|
"middle": [], |
|
"last": "Levin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Pieraccini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Workshop on Bridging the Gap: Academic and Industrial Research in Dialog Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kate Acomb, Jonathan Bloom, Krishna Dayanidhi, Phillip Hunter, Peter Krogh, Esther Levin, and Roberto Pieraccini. 2007. Technical support di- alog systems: issues, problems, and solutions. In Proceedings of the Workshop on Bridging the Gap: Academic and Industrial Research in Dialog Technologies, pages 25-31. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Yi", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Limtiaco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rhomni", |
|
"middle": [], |
|
"last": "St", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mario", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Guajardo-Cespedes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Cer, Yinfei Yang, Sheng-yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St. John, Noah Constant, Mario Guajardo-Cespedes, Steve Yuan, Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2018. Universal sentence encoder. CoRR, abs/1803.11175.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Enhanced lstm for natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen-Hua", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Si", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Inkpen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1657--1668", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. 2017. Enhanced lstm for natural language inference. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1657-1668.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Association for Computing Machinery", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Lavrenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W. Bruce", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 24th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '01", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "120--127", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/383952.383972" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Lavrenko and W. Bruce Croft. 2001. Relevance based language models. In Proceedings of the 24th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '01, page 120-127, New York, NY, USA. As- sociation for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Deep reinforcement learning for dialogue generation", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.01541" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Michel Galley, Jianfeng Gao, and Dan Jurafsky. 2016. Deep rein- forcement learning for dialogue generation. arXiv preprint arXiv:1606.01541.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Adversarial learning for neural dialogue generation", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlin", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S\u00e9bastien", |
|
"middle": [], |
|
"last": "Jean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1701.06547" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, Tianlin Shi, S\u00e9bastien Jean, Alan Ritter, and Dan Jurafsky. 2017. Adversar- ial learning for neural dialogue generation. arXiv preprint arXiv:1701.06547.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Training end-to-end dialogue systems with the ubuntu dialogue corpus", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Thomas Lowe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nissan", |
|
"middle": [], |
|
"last": "Pow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iulian", |
|
"middle": [], |
|
"last": "Vlad Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Charlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chia-Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Dialogue & Discourse", |
|
"volume": "8", |
|
"issue": "1", |
|
"pages": "31--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Thomas Lowe, Nissan Pow, Iulian Vlad Serban, Laurent Charlin, Chia-Wei Liu, and Joelle Pineau. 2017. Training end-to-end dialogue systems with the ubuntu dialogue corpus. Dialogue & Discourse, 8(1):31-65.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A fixedpoint method for weighting terms in verbose informational queries", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Jiaul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Paik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Oard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 23rd ACM International Conference on Conference on Information and Knowledge Management, CIKM '14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "131--140", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2661829.2661957" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiaul H. Paik and Douglas W. Oard. 2014. A fixed- point method for weighting terms in verbose in- formational queries. In Proceedings of the 23rd ACM International Conference on Conference on Information and Knowledge Management, CIKM '14, page 131-140, New York, NY, USA. Associa- tion for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A study of query performance prediction for answer quality determination", |
|
"authors": [ |
|
{ |
|
"first": "Shai", |
|
"middle": [], |
|
"last": "Haggai Roitman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Erera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Feigenblat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACM SIGIR International Conference on Theory of Information Retrieval, ICTIR '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--46", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3341981.3344219" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haggai Roitman, Shai Erera, and Guy Feigenblat. 2019. A study of query performance prediction for answer quality determination. In Proceedings of the 2019 ACM SIGIR International Conference on Theory of Information Retrieval, ICTIR '19, page 43-46, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Query performance prediction for pseudo-feedbackbased retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Haggai", |
|
"middle": [], |
|
"last": "Roitman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Kurland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 42nd", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3331184.3331369" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haggai Roitman and Oren Kurland. 2019. Query performance prediction for pseudo-feedback- based retrieval. In Proceedings of the 42nd", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "International ACM SIGIR Conference on Research and Development in Information Retrieval, SI-GIR'19", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1261--1264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "International ACM SIGIR Conference on Research and Development in Information Retrieval, SI- GIR'19, page 1261-1264, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Building end-to-end dialogue systems using generative hierarchical neural network models", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Iulian V Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Thirtieth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iulian V Serban, Alessandro Sordoni, Yoshua Bengio, Aaron Courville, and Joelle Pineau. 2016. Building end-to-end dialogue systems using generative hier- archical neural network models. In Thirtieth AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Lexical query modeling in session search", |
|
"authors": [ |
|
{ |
|
"first": "Christophe", |
|
"middle": [], |
|
"last": "Van Gysel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evangelos", |
|
"middle": [], |
|
"last": "Kanoulas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "De Rijke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 ACM International Conference on the Theory of Information Retrieval, ICTIR '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "69--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2970398.2970422" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christophe Van Gysel, Evangelos Kanoulas, and Maarten de Rijke. 2016. Lexical query model- ing in session search. In Proceedings of the 2016 ACM International Conference on the Theory of Information Retrieval, ICTIR '16, page 69-72, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Exploring conversational search with humans, assistants, and wizards", |
|
"authors": [ |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Vtyurina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denis", |
|
"middle": [], |
|
"last": "Savenkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Agichtein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"L A" |
|
], |
|
"last": "Clarke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems, CHI EA '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2187--2193", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3027063.3053175" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandra Vtyurina, Denis Savenkov, Eugene Agichtein, and Charles L. A. Clarke. 2017. Exploring conversational search with humans, assistants, and wizards. In Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems, CHI EA '17, page 2187-2193, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A cascade ranking model for efficient ranked retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Lidan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donald", |
|
"middle": [], |
|
"last": "Metzler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '11", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "105--114", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2009916.2009934" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lidan Wang, Jimmy Lin, and Donald Metzler. 2011. A cascade ranking model for efficient ranked retrieval. In Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '11, page 105-114, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Bigbluebot: teaching strategies for successful human-agent interactions", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Justin D Weisz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Narendra", |
|
"middle": [ |
|
"Nath" |
|
], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingrid", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lange", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 24th International Conference on Intelligent User Interfaces", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "448--459", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin D Weisz, Mohit Jain, Narendra Nath Joshi, James Johnson, and Ingrid Lange. 2019. Bigbluebot: teaching strategies for successful human-agent inter- actions. In Proceedings of the 24th International Conference on Intelligent User Interfaces, pages 448-459.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Stefan Ultes, and Steve Young. 2016. A networkbased end-to-end trainable task-oriented dialogue system", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Tsung-Hsien Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Vandyke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Mrksic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lina", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Gasic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Rojas-Barahona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1604.04562" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Hsien Wen, David Vandyke, Nikola Mrksic, Milica Gasic, Lina M Rojas-Barahona, Pei-Hao Su, Stefan Ultes, and Steve Young. 2016. A network- based end-to-end trainable task-oriented dialogue system. arXiv preprint arXiv:1604.04562.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Efficient manifold ranking for image retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Bu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deng", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaofei", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiebo", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '11", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "525--534", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2009916.2009988" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bin Xu, Jiajun Bu, Chun Chen, Deng Cai, Xiaofei He, Wei Liu, and Jiebo Luo. 2011. Efficient mani- fold ranking for image retrieval. In Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '11, page 525-534, New York, NY, USA. As- sociation for Computing Machinery.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Hello. Thank you for contacting", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"text": "CAIRAA System Architecture: (a) Agent Dashboard -a web application (b) Orchestrator APIsserver side communications, controls data and process flow (c) Document Recommendation Engine -retrieves and recommends documents relevant to conversational context (d) Response Recommendation Engine -recommends agent responses based on conversational context (f) Storage -retains document content, conversation and system logs, agents feedback and activity (g) Training infrastructure -a dedicated cluster for deep learning models training (In development)", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"text": "CAIRAA In Action; (A & B) Document and Response recommendations panel respectively; (C) Top Recommendation; (D) Confidence score of the recommendation and a button to copy URLs/responses into agent's chat with the end-user; (E) Agent's response; (F) Full-text search bar for agents to perform manual search.", |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |