|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T04:34:13.121457Z" |
|
}, |
|
"title": "Itih\u0101sa: A large-scale corpus for Sanskrit to English translation", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Aralikatte", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Copenhagen", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Miryam", |
|
"middle": [], |
|
"last": "De Lhoneux", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Copenhagen", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft AI and Research", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Copenhagen", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This work introduces Itih\u0101sa, a large-scale translation dataset containing 93,000 pairs of Sanskrit shlokas and their English translations. The shlokas are extracted from two Indian epics viz., The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata. We first describe the motivation behind the curation of such a dataset and follow up with empirical analysis to bring out its nuances. We then benchmark the performance of standard translation models on this corpus and show that even state-of-the-art transformer architectures perform poorly, emphasizing the complexity of the dataset. 1", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This work introduces Itih\u0101sa, a large-scale translation dataset containing 93,000 pairs of Sanskrit shlokas and their English translations. The shlokas are extracted from two Indian epics viz., The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata. We first describe the motivation behind the curation of such a dataset and follow up with empirical analysis to bring out its nuances. We then benchmark the performance of standard translation models on this corpus and show that even state-of-the-art transformer architectures perform poorly, emphasizing the complexity of the dataset. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Sanskrit is one of the oldest languages in the world and most Indo-European languages are influenced by it (Beekes, 1995) . There are about 30 million pieces of Sanskrit literature available to us today (Goyal et al., 2012) , most of which have not been digitized. Among those that have been, few have been translated. The main reason for this is the lack of expertise and funding. An automatic translation system would not only aid and accelerate this process, but it would also help in democratizing the knowledge, history, and culture present in this literature. In this work, we present Itih\u0101sa, a large-scale Sanskrit-English translation corpus consisting of more than 93,000 shlokas and their translations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 121, |
|
"text": "(Beekes, 1995)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 223, |
|
"text": "(Goyal et al., 2012)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Itih\u0101sa, literally meaning 'it happened this way' is a collection of historical records of important events in Indian history. These bodies of work are mostly composed in the form of verses or shlokas, a poetic form which usually consists of four parts containing eight syllables each (Fig. 1) . The most important among these works are The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata. The R\u0101m\u0101yana, which describes the events in the life of Lord R\u0101ma, consists of 24,000 verses. The Mah\u0101bh\u0101rata details the war between cousins of the Kuru dynasty, in 100,000 verses. The Mah\u0101bh\u0101rata is the longest poem ever written with about 1.8 million words in total and is roughly ten times the length of the Iliad and the Odyssey combined.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 293, |
|
"text": "(Fig. 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Only two authors have attempted to translate the unabridged versions of both The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata to English: Manmatha N\u0101th Dutt in the 1890s and Bibek Debroy in the 2010s. M. N. Dutt was a prolific translator whose works are now in the public domain. These works are published in a shloka-wise format as shown in Fig. 1 which makes it easy to automatically align shlokas with their translations. Though many of M. N. Dutt's works are freely available, we choose to extract data from The R\u0101m\u0101yana (V\u0101lmiki and Dutt, 1891) , and The Mah\u0101bh\u0101rata (Dwaip\u0101yana and Dutt, 1895), mainly due to its size and popularity. As per our knowledge, this is the biggest Sanskrit-English translation dataset to be released in the public domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 509, |
|
"end": 533, |
|
"text": "(V\u0101lmiki and Dutt, 1891)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 332, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We also train and evaluate standard translation systems on this dataset. In both translation directions, we use Moses as an SMT baseline, and Transformer-based seq2seq models as NMT baselines (see \u00a74). We find that models which are generally on-par with human performance on other translation tasks, perform poorly on Itih\u0101sa, with the best models scoring between 7-8 BLEU points. This indicates the complex nature of the dataset (see \u00a73 for a detailed analysis of the dataset and its vocabulary).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Motivation The main motivation behind this work is to provide an impetus for the Indic NLP community to build better translation systems for Sanskrit. Additionally, since The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata are so pervasive in Indian culture, and have been translated to all major Indian languages, there is a possibility of creating an n-way parallel corpus with Sanskrit as the pivot language, similar to Europarl (Koehn, 2005) and PMIndia (Haddow and Kirefu, 2020) datasets. The existence of Sanskrit-English parallel data has other advantages as well. Due to Sanskrit being a morphologically rich, agglutinative, and highly inflexive, complex concepts can be expressed in compact forms by combining individual words through Sandhi and Samasa. 2 This also enables a speaker to potentially create an infinite number of unique words in Sanskrit. Having a parallel corpus can help us induce word translations through bilingual dictionary induction (S\u00f8gaard et al., 2018) . It also allows us to use English as a surrogate language for tasks like knowledge base population. Constituency or dependency parsing, NER, and word sense disambiguation can be improved using indirect supervision (T\u00e4ckstr\u00f6m, 2013) . Essentially, a parallel corpus allows us to apply a plethora of transfer learning techniques to improve 2 Sandhi refers to the concatenation of words, where the edge characters combine to form a new one. Samasa can be thought of as being similar to elliptic constructions in English where certain phrases are elided since their meaning is obvious from the context. NLP tools for Sanskrit.", |
|
"cite_spans": [ |
|
{ |
|
"start": 413, |
|
"end": 426, |
|
"text": "(Koehn, 2005)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 464, |
|
"text": "(Haddow and Kirefu, 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 945, |
|
"end": 967, |
|
"text": "(S\u00f8gaard et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1183, |
|
"end": 1200, |
|
"text": "(T\u00e4ckstr\u00f6m, 2013)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The translated works of The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata were published in four and nine volumes respectively. 3 All volumes have a standard two-column format as shown in Fig. 2 . Each page has a header with the chapter name and page number separated from the main text by a horizontal line. The two columns of text are separated by a vertical line. The process of data preparation can be divided into (i) automatic OCR extraction, and (ii) manual inspection for alignment errors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 177, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Preparation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The OCR systems we experimented with performed poorly on digitized documents due to their two-column format. They often fail to recognize line breaks which result in the concatenation of text present in different columns. To mitigate this issue, we use an edge detector 4 to find the largest horizontal and vertical lines, and using the indices of the detected lines, split the original page horizontally and vertically to remove the header and separate the columns (see Fig. 2 ). We then input the single-column documents to Google Cloud's OCR API 5 to extract text from them. To verify the accuracy of the extracted text, one chapter from each volume (13 chapters in total) is manually checked for mistakes. We find that the extracted text is more than 99% and 97% accurate in Sanskrit and English respectively. The surprising accuracy of Devanagari OCR can be attributed to the distinctness of its alphabet. For English, this number decreases as the OCR system often misclassifies similar-looking characters (viz., e and c, i and l, etc.).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 471, |
|
"end": 477, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Extraction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Manual Inspection An important limitation of the OCR system is its misclassification of alignment spaces and line breaks. It sometimes wrongly treats large gaps between words as line breaks and the rest of the text on the line is moved to the end of the paragraph which results in translations being misaligned with its shlokas. Therefore, the output of all 13 volumes was manually inspected and such misalignments were corrected. 6 Upon manual inspection, other kinds of errors were discovered and corrected where possible. 7 These errors can be categorized as follows: (i) print errors: this type of error is caused by occluded or faded text, smudged ink, etc. An example can be seen in Fig. 3a, ( ii) input errors: these are human errors during typesetting the volumes which include typos (Fig, 3b) , exclusion of words, inclusion of spurious words, etc., (iii) subjective errors: these are contextual errors in the translation itself. For example, in Fig. 3c , the word dharma is incorrectly translated as 'religion' instead of 'righteousness', and (iv) OCR errors: these errors arise from the underlying OCR system. An example of such errors is the improper handling of split words across lines in the Devanagari script. If the OCR system encounters a hyphen as the last character of a line, the entire line is ignored. In general, print errors are corrected as much as possible, subjective errors are retained for originality, and other types of errors are corrected when encountered. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 431, |
|
"end": 432, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 526, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 689, |
|
"end": 699, |
|
"text": "Fig. 3a, (", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 792, |
|
"end": 801, |
|
"text": "(Fig, 3b)", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 955, |
|
"end": 962, |
|
"text": "Fig. 3c", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Extraction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In total, we extract 19,371 translation pairs from 642 chapters of The R\u0101m\u0101yana and 73,659 translation pairs from 2,110 chapters of The Mah\u0101bh\u0101rata. It should be noted that these numbers do not correspond to the number of shlokas because, in the original volumes, shlokas are sometimes split and often combined to make the English translations flow better. We reserve 80% of the data from each text for training MT systems and use the rest for evaluation. From the evaluation set, 33% is used for development and 67% for testing. The absolute sizes of the split data are shown in Tab. 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Due to Sanskrit's agglutinative nature, the dataset is asymmetric in the sense that, the number of words required to convey the same information, is less in Sanskrit when compared with English. The R\u0101m\u0101yana's English translations, on average, have 2.54 words for every word in its shloka. This value is even larger in The Mah\u0101bh\u0101rata with 2.82 translated words per shloka word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This effect is clearly seen when we consider the vocabulary sizes and the percentage of common tokens between the texts. For this, we tokenize the data with two different tokenization schemes: wordlevel and byte-pair encoding (Sennrich et al., 2016, BPE) . For word-level tokenization, the translations of The R\u0101m\u0101yana (The Mah\u0101bh\u0101rata) have 16,820 (31,055) unique word tokens, and the shlokas have 66,072 (184,407) tokens. The English vocabularies have 11,579 common tokens which is 68.8% of The R\u0101m\u0101yana's and 37.3% of The Mah\u0101bh\u0101rata's. But the overlap percentages drop significantly for the Sanskrit vocabularies. In this case, we find 21,635 common tokens which amount to an overlap of 32.7% and 11.7% respectively. As shown in Fig. 4 , this trend holds for BPE tokenization as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 254, |
|
"text": "(Sennrich et al., 2016, BPE)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 733, |
|
"end": 739, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We train one SMT and five NMT systems in both directions and report the (i) character n-gram F-score, (ii) token accuracy, (iii) BLEU (Papineni et al., 2002) , and (iv) Translation Edit Ratio (Snover et al., 2006, TER) scores in Tab. 2. For SMT, we use Moses (Koehn et al., 2007) and for NMT, we use sequence-to-sequence (seq2seq) Transformers (Vaswani et al., 2017) . We train the seq2seq models from scratch by initializing the encoders and decoders with standard BERT (B2B) architectures. These Tiny, Mini, Small, Medium, and Base models have 2/128, 4/256, 4/512, 8/512, and 12/768 layers/dimensions respectively. See Turc et al. (2019) for more details. In our early experiments, we also tried initializing the encoders and decoders with weights from pre-trained Indic language models like MuRIL (Khanuja et al., 2021) , but they showed poor performance and thus are not reported here.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 218, |
|
"text": "(Snover et al., 2006, TER)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 279, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 366, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 639, |
|
"text": "Turc et al. (2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 800, |
|
"end": 822, |
|
"text": "(Khanuja et al., 2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Implementation Details All models are trained using HuggingFace Transformers (Wolf et al., 2020) . Both source and target sequences are truncated at 128 tokens. We train WordPiece tokenizers on our dataset and use them for all models. Adam optimizer (Kingma and Ba, 2014) with weight- decay of 0.01, and learning rate of 5 \u00d7 10 \u22125 is used. All models are trained for 100 epochs. The learning rate is warmed up over 8,000 steps and decayed later with a linear scheduler. We use a batch size of 128, and use standard cross-entropy loss with no label smoothing. We run into memory errors on bigger models (medium and base), but maintain the effective batch-size and optimization steps by introducing gradient accumulation and increasing the number of epochs, respectively. Also, to reduce the total training time of bigger models, we stop training if the BLEU score does not improve over 10 epochs. During generation, we use a beam size of 5 and compute all metrics against truncated references.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 96, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We see that all models perform poorly, with low token accuracy and high TER.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the English to Sanskrit (E2S) models get better with size, this pattern is not clearly seen in Sanskrit to English (S2E) models. Surprisingly for S2E models, the token accuracy progressively decreases as their size increases. Also, Moses has the best TER among S2E models which suggests that the seq2seq models have not been able to learn even simple co-occurrences between source and target tokens. This leads us to hypothesize that the Sanskrit encoders produce sub-optimal representations. One way to improve them would be to add a Sandhi-splitting step to the tokenization pipeline, thereby decreasing the Sanskrit vocabulary size. Another natural extension to improve the quality of representations would be to initialize the encoders with a pre-trained language model. \u0935\u0936\u094d\u0935\u093e \u092e\u0924\u094d\u0930\u0935\u091a\u0903 \u0936\u094d\u0930\u0941 \u0924\u094d\u0935\u093e \u0932\u0915\u094d\u0937\u094d\u092e\u0923\u0903 \u0938\u0939\u0932\u0915\u094d\u0937\u094d\u092e\u0923\u0903\u0964 \u0935\u0936\u094d\u0935\u093e \u092e\u0924\u094d\u0930\u0935\u091a\u0903 \u0936\u094d\u0930\u0941 \u0924\u094d\u0935\u093e \u0935\u0936\u094d\u0935\u093e \u092e\u0924\u094d\u0930\u094b\u093d\u092c\u094d\u0930\u0935\u0940 \u0926\u0926\u092e \u094d\u0965 Though it is clear that there is a large scope for improvement, the models are able to learn some interesting features of the dataset. Fig. 5 shows a random gold translation pair and the small model's prediction. Though we see repetitions of phrases and semantic errors, the prediction follows the meter in which the original shlokas are written, i.e. it also consists of 4 parts containing 8 syllables each.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1010, |
|
"end": 1016, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Early translation efforts from Sanskrit to English were limited to the construction of dictionaries by Western Indologists (M\u00fcller, 1866; Monier-Williams, 1899) . Over the years, though notable translation works like Ganguli (1883) have been published, the lack of digitization has been a bottleneck hindering any meaningful progress towards automatic translation systems. This has changed recently, at least for monolingual data, with the curation of digital libraries like GRETIL 8 and DCS 9 . Currently, the largest freely available repository of translations are for The Bhagavadgita (Prabhakar et al., 2000) and The R\u0101m\u0101yana (Geervani et al., 1989) . However, labeled datasets for other tasks, like the ones proposed in (Kulkarni, 2013; Bhardwaj et al., 2018; have resulted in parsers (Krishna et al., 2020 (Krishna et al., , 2021 and sandhi splitters (Aralikatte et al., 2018; which are pre-cursors to modular translation systems. Though there have been attempts at building Sanskrit translation tools (Bharati and Kulkarni, 2009) , they are mostly rule-based and rely on manual intervention. We hope that the availability of the Itih\u0101sa corpus pushes the domain towards endto-end systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 137, |
|
"text": "(M\u00fcller, 1866;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 160, |
|
"text": "Monier-Williams, 1899)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 612, |
|
"text": "(Prabhakar et al., 2000)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 653, |
|
"text": "(Geervani et al., 1989)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 741, |
|
"text": "(Kulkarni, 2013;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 764, |
|
"text": "Bhardwaj et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 811, |
|
"text": "(Krishna et al., 2020", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 812, |
|
"end": 835, |
|
"text": "(Krishna et al., , 2021", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 882, |
|
"text": "(Aralikatte et al., 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1008, |
|
"end": 1036, |
|
"text": "(Bharati and Kulkarni, 2009)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this work, we introduce Itih\u0101sa, a large-scale dataset containing more than 93,000 pairs of Sanskrit shlokas and their English translations from The R\u0101m\u0101yana and The Mah\u0101bh\u0101rata. First, we detail the extraction process which includes an automated OCR phase and a manual alignment phase. Next, we analyze the dataset to give an intuition of its asymmetric nature and to showcase its complexities. Lastly, we train state-of-the-art translation models which perform poorly, proving the necessity for more work in this area.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The processed and split dataset can be found at https://github.com/rahular/itihasa and a human-readable version can be found at http://rahular. com/itihasa.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The digitized (scanned) PDF versions of these books are available at https://hinduscriptures.in4 We invert the color scheme and apply a small dilation for better edge detection using OpenCV(Bradski, 2000).5 More information can be found at https://cloud. google.com/vision/docs/pdf", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This was a time-consuming process and the first author inspected the output manually over the course of one year.7 It was not feasible for the authors to correct every error, especially the lexical ones. The most common error that exists in the corpus is the swapping of e and c. For example, 'thcir' instead of 'their'. Though these errors can easily be corrected using automated tools like the one proposed in(Boyd, 2018), it is out-of-scope of this paper and is left for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://gretil.sub.uni-goettingen.de/ gretil.html 9 http://www.sanskrit-linguistics.org/ dcs/index.php", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the reviewers for their valuable feedback. Rahul Aralikatte and Anders S\u00f8gaard are funded by a Google Focused Research Award.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Sanskrit sandhi splitting using seq2(seq)2", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Aralikatte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neelamadhav", |
|
"middle": [], |
|
"last": "Gantayat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Panwar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anush", |
|
"middle": [], |
|
"last": "Sankaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Senthil", |
|
"middle": [], |
|
"last": "Mani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4909--4914", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1530" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul Aralikatte, Neelamadhav Gantayat, Naveen Pan- war, Anush Sankaran, and Senthil Mani. 2018. San- skrit sandhi splitting using seq2(seq)2. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4909-4914, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Comparative Indo-European Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Stephen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Beekes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Stephen Paul Beekes. 1995. Comparative Indo- European Linguistics. Benjamins Amsterdam.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Anusaaraka: an accessor cum machine translator", |
|
"authors": [ |
|
{ |
|
"first": "Akshar", |
|
"middle": [], |
|
"last": "Bharati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amba", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akshar Bharati and Amba Kulkarni. 2009. Anusaaraka: an accessor cum machine transla- tor. Department of Sanskrit Studies, University of Hyderabad, Hyderabad, pages 1-75.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "SandhiKosh: A benchmark corpus for evaluating Sanskrit sandhi tools", |
|
"authors": [ |
|
{ |
|
"first": "Shubham", |
|
"middle": [], |
|
"last": "Bhardwaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neelamadhav", |
|
"middle": [], |
|
"last": "Gantayat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Chaturvedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumeet", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shubham Bhardwaj, Neelamadhav Gantayat, Nikhil Chaturvedi, Rahul Garg, and Sumeet Agarwal. 2018. SandhiKosh: A benchmark corpus for evaluat- ing Sanskrit sandhi tools. In Proceedings of the Eleventh International Conference on Language Re- sources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Using Wikipedia edits in low resource grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Adriane", |
|
"middle": [], |
|
"last": "Boyd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop W-NUT: The 4th Workshop on Noisy User-generated Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--84", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6111" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adriane Boyd. 2018. Using Wikipedia edits in low resource grammatical error correction. In Proceed- ings of the 2018 EMNLP Workshop W-NUT: The 4th Workshop on Noisy User-generated Text, pages 79-84, Brussels, Belgium. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The OpenCV Library. Dr. Dobb's Journal of Software Tools", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Bradski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Bradski. 2000. The OpenCV Library. Dr. Dobb's Journal of Software Tools.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Mahabharata", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kisari Mohan Ganguli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kisari Mohan Ganguli. 1883. The Mahabharata.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Valmiki ramayana", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Geervani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Kamala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Subba Rao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P Geervani, K Kamala, and V. V. Subba Rao. 1989. Valmiki ramayana.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A distributed platform for Sanskrit processing", |
|
"authors": [ |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00e9rard", |
|
"middle": [], |
|
"last": "Huet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amba", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Scharf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Bunker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "The COLING 2012 Organizing Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1011--1028", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pawan Goyal, G\u00e9rard Huet, Amba Kulkarni, Peter Scharf, and Ralph Bunker. 2012. A distributed plat- form for Sanskrit processing. In Proceedings of COLING 2012, pages 1011-1028, Mumbai, India. The COLING 2012 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Pmindia -a collection of parallel corpora of languages of india", |
|
"authors": [ |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faheem", |
|
"middle": [], |
|
"last": "Kirefu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barry Haddow and Faheem Kirefu. 2020. Pmindia -a collection of parallel corpora of languages of india.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Subhash Chandra Bose Gali, Vish Subramanian, and Partha Talukdar. 2021. Muril: Multilingual representations for indian languages", |
|
"authors": [ |
|
{ |
|
"first": "Simran", |
|
"middle": [], |
|
"last": "Khanuja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diksha", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarvesh", |
|
"middle": [], |
|
"last": "Mehtani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Savya", |
|
"middle": [], |
|
"last": "Khosla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atreyee", |
|
"middle": [], |
|
"last": "Dey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Balaji", |
|
"middle": [], |
|
"last": "Gopalan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilip", |
|
"middle": [], |
|
"last": "Kumar Margam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pooja", |
|
"middle": [], |
|
"last": "Aggarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajiv", |
|
"middle": [], |
|
"last": "Teja Nagipogu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shachi", |
|
"middle": [], |
|
"last": "Dave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simran Khanuja, Diksha Bansal, Sarvesh Mehtani, Savya Khosla, Atreyee Dey, Balaji Gopalan, Dilip Kumar Margam, Pooja Aggarwal, Rajiv Teja Nagipogu, Shachi Dave, Shruti Gupta, Subhash Chandra Bose Gali, Vish Subramanian, and Partha Talukdar. 2021. Muril: Multilingual representations for indian languages.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Europarl: A Parallel Corpus for Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Conference Proceedings: the tenth Machine Translation Summit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2005. Europarl: A Parallel Corpus for Statistical Machine Translation. In Conference Proceedings: the tenth Machine Translation Summit, pages 79-86, Phuket, Thailand. AAMT, AAMT.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Constantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Herbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the As- sociation for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Ses- sions, pages 177-180, Prague, Czech Republic. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Keep it surprisingly simple: A simple first order graph based parsing model for joint morphosyntactic parsing in Sanskrit", |
|
"authors": [ |
|
{ |
|
"first": "Amrith", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashim", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Garasangi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavankumar", |
|
"middle": [], |
|
"last": "Satuluri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4791--4797", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.388" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amrith Krishna, Ashim Gupta, Deepak Garasangi, Pa- vankumar Satuluri, and Pawan Goyal. 2020. Keep it surprisingly simple: A simple first order graph based parsing model for joint morphosyntactic pars- ing in Sanskrit. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 4791-4797, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Pavankumar Satuluri, and Pawan Goyal. 2021. A Graph-Based Framework for Structured Prediction Tasks in Sanskrit", |
|
"authors": [ |
|
{ |
|
"first": "Amrith", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bishal", |
|
"middle": [], |
|
"last": "Santra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashim", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Computational Linguistics", |
|
"volume": "46", |
|
"issue": "4", |
|
"pages": "785--845", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/coli_a_00390" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amrith Krishna, Bishal Santra, Ashim Gupta, Pavanku- mar Satuluri, and Pawan Goyal. 2021. A Graph- Based Framework for Structured Prediction Tasks in Sanskrit. Computational Linguistics, 46(4):785- 845.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Sanskrit segmentation revisited", |
|
"authors": [ |
|
{ |
|
"first": "Sriram", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amba", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sriram Krishnan and Amba Kulkarni. 2020. Sanskrit segmentation revisited.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Validation and normalization of dcs corpus using sanskrit heritage tools to build a tagged gold corpus", |
|
"authors": [ |
|
{ |
|
"first": "Sriram", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amba", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00e9rard", |
|
"middle": [], |
|
"last": "Huet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sriram Krishnan, Amba Kulkarni, and G\u00e9rard Huet. 2020. Validation and normalization of dcs corpus using sanskrit heritage tools to build a tagged gold corpus.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A deterministic dependency parser with dynamic programming for Sanskrit", |
|
"authors": [ |
|
{ |
|
"first": "Amba", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Second International Conference on Dependency Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "157--166", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amba Kulkarni. 2013. A deterministic dependency parser with dynamic programming for Sanskrit. In Proceedings of the Second International Conference on Dependency Linguistics (DepLing 2013), pages 157-166, Prague, Czech Republic. Charles Univer- sity in Prague, Matfyzpress, Prague, Czech Repub- lic.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A Sanskrit-English dictionary : etymologically and philologically arranged with special reference to cognate Indo-European languages", |
|
"authors": [ |
|
{ |
|
"first": "Monier", |
|
"middle": [], |
|
"last": "Monier-Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Monier Monier-Williams. 1899. A Sanskrit-English dictionary : etymologically and philologically ar- ranged with special reference to cognate Indo- European languages. Clarendon Press.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A Sanskrit Grammar for Beginners: In Devan\u00e2gar\u00ee and Roman Letters Throughout. Handbooks for the study of Sanskrit. Longmans, Green, and Company", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F.M. M\u00fcller. 1866. A Sanskrit Grammar for Begin- ners: In Devan\u00e2gar\u00ee and Roman Letters Through- out. Handbooks for the study of Sanskrit. Long- mans, Green, and Company.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A study of translation error rate with targeted human annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Association for Machine Transaltion in the Americas", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and Ralph Weischedel. 2006. A study of translation error rate with targeted human annota- tion. In In Proceedings of the Association for Ma- chine Transaltion in the Americas (AMTA 2006.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "On the limitations of unsupervised bilingual dictionary induction", |
|
"authors": [ |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "778--788", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1072" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anders S\u00f8gaard, Sebastian Ruder, and Ivan Vuli\u0107. 2018. On the limitations of unsupervised bilingual dictionary induction. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 778- 788, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Predicting Linguistic Structure with Incomplete and Cross-Lingual Supervision", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar T\u00e4ckstr\u00f6m. 2013. Predicting Linguistic Struc- ture with Incomplete and Cross-Lingual Supervision. Ph.D. thesis, Uppsala University.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Well-read students learn better: On the importance of pre-training compact models", |
|
"authors": [ |
|
{ |
|
"first": "Iulia", |
|
"middle": [], |
|
"last": "Turc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.08962v2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iulia Turc, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Well-read students learn better: On the importance of pre-training compact models. arXiv preprint arXiv:1908.08962v2.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaiser", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS'17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6000--6010", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, undefine- dukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of the 31st Interna- tional Conference on Neural Information Processing Systems, NIPS'17, page 6000-6010, Red Hook, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-demos.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "\u092e\u093e \u0928\u0937\u093e\u0926 \u092a\u094d\u0930 \u0924\u0937\u094d\u0920\u093e\u0902 \u0924\u094d\u0935\u092e\u0917\u092e\u0936\u094d\u0936\u093e\u0936\u094d\u0935\u0924\u0940\u0938\u094d\u0938\u092e\u093e:\u0964 \u092f\u0924\u094d\u0915\u094d\u0930\u094c\u091e\u094d\u091a \u092e\u0925\u0941 \u0928\u093e\u0926\u0947 \u0915\u092e\u0935\u0927\u0940: \u0915\u093e\u092e\u092e\u094b \u0939\u0924\u092e \u094d\u0965 O fowler, since you have slain one of a pair of Krau\u00f1cas, you shall never attain prosperity (respect)! Figure 1: An introductory shloka from The R\u0101m\u0101yana. The four parts with eight syllables each are highlighted with different shades of gray.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Pre-processing Pipeline: The three steps shown here are: (i) invert the colour scheme of the PDF and dilate every detectable edge, (ii) find the indices of the longest vertical and horizontal lines in the page, and (iii) split the original PDF along the found separator lines.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "(a) Print error. (b) Input error. (c) Subjective error.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "Different types of errors found in the original text while performing manual inspection.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"text": "Comparison of vocabulary sizes. Sanskrit's morphological and agglutinative nature accounts for the large number of unique tokens in the vocabularies.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"text": "A gold sentence and shloka from the test set, and its corresponding small model prediction.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Size of training, development, and test sets.", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>: Character F1, Token accuracy, BLEU, and</td></tr><tr><td>TER scores for Moses and Transformer models. Scores</td></tr><tr><td>marked with (\u2193) are better if they are lower.</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"text": "\u0935\u0936\u094d\u0935\u093e \u092e\u0924\u094d\u0930\u0935\u091a\u0903 \u0936\u094d\u0930\u0941 \u0924\u094d\u0935\u093e \u0930\u093e\u0918\u0935\u0903 \u0938\u0939\u0932\u0915\u094d\u0937\u094d\u092e\u0923\u0903\u0964 \u0935\u0938\u094d\u092e\u092f\u0902 \u092a\u0930\u092e\u0902 \u0917\u0924\u094d\u0935\u093e \u0935\u0936\u094d\u0935\u093e \u092e\u0924\u094d\u0930\u092e\u0925\u093e\u092c\u094d\u0930\u0935\u0940\u0924 \u094d\u0965", |
|
"num": null, |
|
"content": "<table><tr><td>Pred.</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |