|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:51:30.166275Z" |
|
}, |
|
"title": "Contemporary NLP Modeling in Six Comprehensive Programming Assignments", |
|
"authors": [ |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jifan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shrey", |
|
"middle": [], |
|
"last": "Desai", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tanya", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Kabela", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yasumasa", |
|
"middle": [], |
|
"last": "Onoe", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Texas at Austin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present a series of programming assignments, adaptable to a range of experience levels from advanced undergraduate to PhD, to teach students design and implementation of modern NLP systems. These assignments build from the ground up and emphasize fullstack understanding of machine learning models: initially, students implement inference and gradient computation by hand, then use Py-Torch to build nearly state-of-the-art neural networks using current best practices. Topics are chosen to cover a wide range of modeling and inference techniques that one might encounter, ranging from linear models suitable for industry applications to state-of-theart deep learning models used in NLP research. The assignments are customizable, with constrained options to guide less experienced students or open-ended options giving advanced students freedom to explore. All of them can be deployed in a fully autogradable fashion, and have collectively been tested on over 300 students across several semesters. 1 * Corresponding author. Subsequent authors listed alphabetically. 1 See https://cs.utexas.edu/~gdurrett for past offerings and static versions of these assignments; contact Greg Durrett for access to the repository with instructor solutions. A2. Sentiment analysis with feedforward \"deep averaging\" networks (Iyyer et al., 2015) using GloVe embeddings (Pennington et al., 2014). A3. Hidden Markov Models and linear-chain conditional random fields (CRFs) for named entity recognition (NER) (Tjong Kim Sang and De Meulder, 2003), using features similar to those from Zhang and Johnson (2003). A4. Character-level RNN language modeling (Mikolov et al., 2010).", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present a series of programming assignments, adaptable to a range of experience levels from advanced undergraduate to PhD, to teach students design and implementation of modern NLP systems. These assignments build from the ground up and emphasize fullstack understanding of machine learning models: initially, students implement inference and gradient computation by hand, then use Py-Torch to build nearly state-of-the-art neural networks using current best practices. Topics are chosen to cover a wide range of modeling and inference techniques that one might encounter, ranging from linear models suitable for industry applications to state-of-theart deep learning models used in NLP research. The assignments are customizable, with constrained options to guide less experienced students or open-ended options giving advanced students freedom to explore. All of them can be deployed in a fully autogradable fashion, and have collectively been tested on over 300 students across several semesters. 1 * Corresponding author. Subsequent authors listed alphabetically. 1 See https://cs.utexas.edu/~gdurrett for past offerings and static versions of these assignments; contact Greg Durrett for access to the repository with instructor solutions. A2. Sentiment analysis with feedforward \"deep averaging\" networks (Iyyer et al., 2015) using GloVe embeddings (Pennington et al., 2014). A3. Hidden Markov Models and linear-chain conditional random fields (CRFs) for named entity recognition (NER) (Tjong Kim Sang and De Meulder, 2003), using features similar to those from Zhang and Johnson (2003). A4. Character-level RNN language modeling (Mikolov et al., 2010).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "This paper presents a series of assignments designed to give a survey of modern NLP through the lens of system-building. These assignments provide hands-on experience with concepts and implementation practices that we consider critical for students to master, ranging from linear feature-based models to cutting-edge deep learning approaches. The assignments are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A1. Sentiment analysis with linear models (Pang et al., 2002) on the Stanford Sentiment Treebank (Socher et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 61, |
|
"text": "(Pang et al., 2002)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 118, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A5. Semantic parsing with seq2seq models (Jia and Liang, 2016) on the GeoQuery dataset (Zelle and Mooney, 1996) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 62, |
|
"text": "(Jia and Liang, 2016)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 87, |
|
"end": 111, |
|
"text": "(Zelle and Mooney, 1996)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A6. Reading comprehension on SQuAD (Rajpurkar et al., 2016) using a simplified version of the DrQA model (Chen et al., 2017) , similar to BiDAF (Seo et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 124, |
|
"text": "(Chen et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 162, |
|
"text": "(Seo et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A1-A5 come with autograders. These train each student's model from scratch and evaluate performance on the development set of each task, verifying whether their code behaves as intended. The autograders are bundled to be deployable on Gradescope using their Docker framework. 2 These coding assignments can also be supplemented with conceptual questions for hybrid assignments, though we do not distribute those as part of this release.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Other Courses and Materials Several other widely-publicized courses like Stanford CS224N and CMU CS 11-747 are much more \"neural-first\" views of NLP: their assignments delve more heavily into word embeddings and low-level neural implementation like backpropagation. By contrast, this course is designed to be a survey that also covers topics like linear classification, generative modeling (HMMs), and structured inference. Other hands-on courses discussed in prior Teaching NLP papers (Klein, 2005; Madnani and Dorr, 2008; Baldridge and Erk, 2008) make some similar choices about how to blend linguistics and CS concepts, but our desire to integrate deep learning as a primary (but not the sole) focus area guides us towards a different set of assignment topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 499, |
|
"text": "(Klein, 2005;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 523, |
|
"text": "Madnani and Dorr, 2008;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 548, |
|
"text": "Baldridge and Erk, 2008)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This set of assignments was designed after we asked ourselves, what should a student taking NLP know how to build? NLP draws on principles from machine learning, statistics, linguistics, algorithms, and more, and we set out to expose students to a range of ideas from these disciplines through the lens of implementation. This choice follows the \"text processing first\" (Bird, 2008) or \"core tools\" (Klein, 2005) views of the field, with the idea that students can study undertake additional study of particular topic areas and quickly get up to speed on modeling approaches given the building blocks presented here.", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 382, |
|
"text": "(Bird, 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 412, |
|
"text": "(Klein, 2005)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There are far too many NLP tasks and models to cover in a single course. Rather than focus on exposing students to the most important applications, we instead designed these assignments to feature a range of models along the following typological dimensions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Covering Model Types", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Output space The prediction spaces of models considered here include binary/multiclass (A1, A2), structured (sequence in A3, span in A6), and natural language (sequence of words in A4, executable query in A5). While structured models have fallen out of favor with the advent of neural networks, we view tagging and parsing as fundamental ped-agogical tools for getting students to think about linguistic structure and ambiguity, and these are emphasized in our courses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Covering Model Types", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Modeling framework We cover generative models with categorical distributions (A3), linear feature-based models including logistic regression (A1) and CRFs (A3), and neural networks (A2, A4, A5, A6). These particularly highlight differences in training, optimization, and inference required for these different techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Covering Model Types", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We cover feedforward networks (A2), recurrent neural encoders (A4, A5, A6), seq2seq models (A5), and attention (A5, A6). From these, Transformers (Vaswani et al., 2017) naturally emerge even though they are not explicitly implemented in an assignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 168, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architectures", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A major consideration in designing these assignments was to enable understanding without large-scale computational resources. Maintaining simplicity and tractability is the major reason we do not feature more exploration of pre-trained models (Devlin et al., 2019) . These factors are also why we choose character-level language modeling (rather than word-level) and seq2seq semantic parsing (rather than translation): training large autoregressive models to perform well when output vocabularies are in the tens of thousands requires significant engineering expertise. While we teach students skills like debugging and testing models on simplified settings, we still found it less painful to build our projects around these more tractable tasks where students can iterate quickly. Another core goal was to allow students to build systems from the ground-up using simple, understandable code. We build on PyTorch primitives (Paszke et al., 2019) , but otherwise avoid using frameworks like Keras, Huggingface, or Al-lenNLP. The code is also somewhat \"underengineered:\" we avoid an overly heavy reliance on Pythonic constructs like list comprehensions or generators as not all students come in with a high level of familiarity with Python.", |
|
"cite_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 264, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 945, |
|
"text": "(Paszke et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other Desiderata", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "What's missing Parsing is notably absent from these assignments; we judged that both chart parsers and transition-based parsers involved too many engineering details specific to these settings. All of our classes do cover parsing and in some cases have other hands-on components that engage with parsing, but students do not actually build a parser. Instead, sequence models are taken as an example of structured inference, and other classification tasks are used instead of transition systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other Desiderata", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "From a system-building perspective, the biggest omissions are pre-training and Transformers. These can be explored in the context of final projects, as we describe in the next section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other Desiderata", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Finally, our courses integrate additional discussion around ethics, with specific discussions surrounding bias in word embeddings (Bolukbasi et al., 2016; Gonen and Goldberg, 2019) and ethical considerations of pre-trained models (Bender et al., 2021) , as well as an open-ended discussion surrounding social impact and ethical considerations of NLP, deep learning, and machine learning. These are not formally assessed at present, but we are considering this for future iterations of the course given these topics' importance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 154, |
|
"text": "(Bolukbasi et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 180, |
|
"text": "Gonen and Goldberg, 2019)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 251, |
|
"text": "(Bender et al., 2021)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other Desiderata", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "These assignments have been used in four different versions of an NLP survey course: an upperlevel undergraduate course, a masters level course (delivered online), and two PhD-level courses. In the online MS course, these constitute the only assessment. For courses delivered in a traditional classroom format, we recommend choosing a subset of the assignments and supplementing with additional written assignments testing conceptual understanding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deployment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our undergrad courses use A1, A2, A4, and a final project based on A6. We use additional written assignments covering word embedding techniques, syntactic parsing, machine translation, and pre-trained models. Our PhD-level courses use A1, A2, A3, A5, and an independent final project. The assignments also support further \"extension\" options: for example, in A3, beam search is presented as optional and students can also explore Assignment Eisenstein Jurafsky + Martin A1 2, 4 4, 5 A2 3 7 A3 7 8 A4 6 7, 9 A5 12, 18 11, 15 A6 17.5 23.2 parallel decoding for the CRF or features for NER to work better on German. For the seq2seq model, they could experiment with Transformers or implement constrained decoding to always produce valid logical forms. We believe that A1 and A2 could be adapted to use in a wide range of courses, but A3-A6 are most appropriate for advanced undergraduates or graduate students.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 523, |
|
"text": "Eisenstein Jurafsky + Martin A1 2, 4 4, 5 A2 3 7 A3 7 8 A4 6 7, 9 A5", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Deployment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Syllabus Table 2 pairs these assignments with readings in texts by Jurafsky and Martin (2021) and Eisenstein (2019) . See Greg Durrett's course pages for complete sets of readings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 93, |
|
"text": "Jurafsky and Martin (2021)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 115, |
|
"text": "Eisenstein (2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 16, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Deployment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Logistics We typically provide students around 2 weeks per assignment. Their submission either consists of just the code or a code with a brief report, depending on the course format. Students collaborate on assignments through a discussion board on Piazza as well as in person. We have relatively low incidence of students copying code, assessed using Moss over several semesters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deployment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Pain Points Especially on A3, A4, and A5, we come across students who find debugging to be a major challenge. In the assignments, we suggest strategies to verify parts of inference code independently of training, as well as simplified tasks to test models on, but some students find it challenging or are unwilling to pursue these avenues. On a similar note, students often do not have a prior on what the system should do. It might not raise a red flag that their code takes an hour per epoch, or gets 3% accuracy on the development set, and they end up getting stuck as a result. Understanding what these failures mean is something we emphasize. Finally, students sometimes have (real or perceived) lack of background on either coding or the mathematical fundamentals of the course; however, many such students end up doing well in these courses as their first ML/NLP courses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deployment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For the CRF and seq2seq modeling assignments, a custom framework must be used, as Gradescope autograders cannot handle these. We grade these in a batch fashion on a single instructional machine, which poses some logistical challenges.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to acknowledge the additional graduate and undergraduate TAs for various offerings of these courses: Christopher Crabtree, Uday Kusupati, Shivangi Mahto, Abhilash Potluri, Shivang Singh, Xi Ye, and Didi Zhou. Our thanks also go out to all of the students who have taken these courses, whose comments and experiences have helped make them stronger. Thanks as well to the anonymous reviewers for their helpful comments.In the development of these materials, we consulted courses and teaching materials by Emily Bender, Sam Bowman, Chris Dyer, Mohit Iyyer, Vivek Srikumar, and many others. We would also like to thank Jacob Eisenstein, Dan Jurafsky, James H. Martin, and Yoav Goldberg for their helpful textbooks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Teaching computational linguistics to a large, diverse student body: Courses, tools, and interdepartmental interaction", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Issues in Teaching Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Baldridge and Katrin Erk. 2008. Teaching com- putational linguistics to a large, diverse student body: Courses, tools, and interdepartmental interaction. In Proceedings of the Third Workshop on Issues in Teaching Computational Linguistics, pages 1- 9, Columbus, Ohio. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angelina", |
|
"middle": [], |
|
"last": "Mcmillan-Major", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shmargaret", |
|
"middle": [], |
|
"last": "Shmitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "610--623", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3442188.3445922" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M. Bender, Timnit Gebru, Angelina McMillan- Major, and Shmargaret Shmitchell. 2021. On the Dangers of Stochastic Parrots: Can Language Mod- els Be Too Big? In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Trans- parency, FAccT '21, page 610-623, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Defining a core body of knowledge for the introductory computational linguistics curriculum", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Issues in Teaching Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "27--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bird. 2008. Defining a core body of knowledge for the introductory computational linguistics cur- riculum. In Proceedings of the Third Workshop on Issues in Teaching Computational Linguistics, pages 27-35, Columbus, Ohio. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 30th International Conference on Neural Information Processing Systems, NIPS'16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4356--4364", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai. 2016. Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings. In Proceedings of the 30th International Conference on Neural Information Processing Systems, NIPS'16, page 4356-4364, Red Hook, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Reading Wikipedia to Answer Open-Domain Questions", |
|
"authors": [ |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Fisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1870--1879", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1171" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading Wikipedia to Answer Open- Domain Questions. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1870- 1879, Vancouver, Canada. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Introduction to Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Eisenstein. 2019. Introduction to Natural Lan- guage Processing. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them", |
|
"authors": [ |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "609--614", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1061" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hila Gonen and Yoav Goldberg. 2019. Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 609-614, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Deep Unordered Composition Rivals Syntactic Methods for Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varun", |
|
"middle": [], |
|
"last": "Manjunatha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1681--1691", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Iyyer, Varun Manjunatha, Jordan Boyd-Graber, and Hal Daum\u00e9 III. 2015. Deep Unordered Com- position Rivals Syntactic Methods for Text Classi- fication. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1681-1691, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Data Recombination for Neural Semantic Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "12--22", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2016. Data Recombination for Neural Semantic Parsing. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 12-22, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Speech and Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Jurafsky and James H. Martin. 2021. Speech and Language Processing, 3rd Ed. Online.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A core-tools statistical NLP course", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Second ACL Workshop on Effective Tools and Methodologies for Teaching NLP and CL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Klein. 2005. A core-tools statistical NLP course. In Proceedings of the Second ACL Workshop on Ef- fective Tools and Methodologies for Teaching NLP and CL, pages 23-27, Ann Arbor, Michigan. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Combining open-source with research to re-engineer a handson introductory NLP course", |
|
"authors": [ |
|
{ |
|
"first": "Nitin", |
|
"middle": [], |
|
"last": "Madnani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dorr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Issues in Teaching Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "71--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitin Madnani and Bonnie J. Dorr. 2008. Combining open-source with research to re-engineer a hands- on introductory NLP course. In Proceedings of the Third Workshop on Issues in Teaching Compu- tational Linguistics, pages 71-79, Columbus, Ohio. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Recurrent neural network based language model", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Karafi\u00e1t", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Burget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "\u010cernock\u00fd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, M. Karafi\u00e1t, L. Burget, J.\u010cernock\u00fd, and S. Khudanpur. 2010. Recurrent neural network based language model. In Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Thumbs up? Sentiment Classification using Machine Learning Techniques", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivakumar", |
|
"middle": [], |
|
"last": "Vaithyanathan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 2002 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1118693.1118704" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? Sentiment Classification using Machine Learning Techniques. In Proceedings of the 2002 Conference on Empirical Methods in Natu- ral Language Processing (EMNLP 2002), pages 79- 86. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Massa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Killeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Gimelshein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Desmaison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "K\u00f6pf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Devito", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas K\u00f6pf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Jun- jie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. arXiv cs.CL 1912.01703.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "GloVe: Global Vectors for Word Representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global Vectors for Word Representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "SQuAD: 100,000+ Questions for Machine Comprehension of Text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2383--2392", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1264" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ Questions for Machine Comprehension of Text. In Proceed- ings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Bidirectional attention flow for machine comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional atten- tion flow for machine comprehension. arXiv cs.CL 1611.01603.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Perelygin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1631--1642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive Deep Models for Semantic Compositionality Over a Sentiment Tree- bank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1631-1642, Seattle, Washington, USA. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition. In Proceedings of the Seventh Conference on Natu- ral Language Learning at HLT-NAACL 2003, pages 142-147.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Attention is All you Need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is All you Need. In Advances in Neural Information Pro- cessing Systems (NeurIPS).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Learning to Parse Database Queries Using Inductive Logic Programming", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Zelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of the Thirteenth National Conference on Artificial Intelligence", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John M. Zelle and Raymond J. Mooney. 1996. Learn- ing to Parse Database Queries Using Inductive Logic Programming. In Proceedings of the Thirteenth Na- tional Conference on Artificial Intelligence -Volume 2 (AAAI).", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A Robust Risk Minimization based Named Entity Recognition System", |
|
"authors": [ |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "204--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tong Zhang and David Johnson. 2003. A Robust Risk Minimization based Named Entity Recognition Sys- tem. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003, pages 204-207.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"content": "<table><tr><td>: Breakdown of assignments. The concepts and model components in each are designed to build on one</td></tr><tr><td>another. A gray square indicates partial engagement with a concept, typically when students are already given the</td></tr><tr><td>needed component or it isn't a focus of the assignment.</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>: Book chapters associated with each assign-</td></tr><tr><td>ment; gray indicates an imperfect match. Our courses</td></tr><tr><td>use a combination of Eisenstein, ad hoc lecture notes</td></tr><tr><td>on certain topics, and academic papers.</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |