diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..247839551770ff8ae24a7f51c282d1e07fbe8d77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py @@ -0,0 +1,1961 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + + +# This defines data locations. +# Right below are test sets. +# Beneath each test set, we define the location to download the test data. +# The other keys are each language pair contained in the tarball, and the respective locations of the source and reference data within each. +# Many of these are *.sgm files, which are processed to produced plain text that can be used by this script. +# The canonical location of unpacked, processed data is $SACREBLEU_DIR/$TEST/$SOURCE-$TARGET.{$SOURCE,$TARGET} +from .fake_sgml import FakeSGMLDataset, WMTAdditionDataset +from .iwslt_xml import IWSLTXMLDataset +from .plain_text import PlainTextDataset +from .tsv import TSVDataset +from .wmt_xml import WMTXMLDataset + +# Detailed document metadata annotation in form DocumentID -> CountryCode - Domain - OptionalFinegrainedCountryCode +# While the annotation is subjective with many unclear cases, it may provide useful insights +# when applied on large data (TODO: annotate all documents from recent WMT years, at least for origlang=en, consider renaming "world" to "other"). +_SUBSETS = { + "wmt18": "rt.com.68098=US-crime guardian.181611=US-politics bbc.310963=GB-sport washpost.116881=US-politics scotsman.104228=GB-sport timemagazine.75207=OTHER-world-ID " + "euronews-en.117981=OTHER-crime-AE smh.com.au.242810=US-crime msnbc.53726=US-politics euronews-en.117983=US-politics msnbc.53894=US-crime theglobeandmail.com.62700=US-business " + "bbc.310870=OTHER-world-AF reuters.196698=US-politics latimes.231739=US-sport thelocal.51929=OTHER-world-SE cbsnews.198694=US-politics reuters.196718=OTHER-sport-RU " + "abcnews.255599=EU-sport nytimes.127256=US-entertainment scotsman.104225=GB-politics dailymail.co.uk.233026=GB-scitech independent.181088=GB-entertainment " + "brisbanetimes.com.au.181614=OTHER-business-AU washpost.116837=US-politics dailymail.co.uk.232928=GB-world thelocal.51916=OTHER-politics-IT bbc.310871=US-crime " + "nytimes.127392=EU-business-DE euronews-en.118001=EU-scitech-FR washpost.116866=OTHER-crime-MX dailymail.co.uk.233025=OTHER-scitech-CA latimes.231829=US-crime " + "guardian.181662=US-entertainment msnbc.53731=US-crime rt.com.68127=OTHER-sport-RU latimes.231782=US-business latimes.231840=US-sport reuters.196711=OTHER-scitech " + "guardian.181666=GB-entertainment novinite.com.24019=US-politics smh.com.au.242750=OTHER-scitech guardian.181610=US-politics telegraph.364393=OTHER-crime-ZA " + "novinite.com.23995=EU-world dailymail.co.uk.233028=GB-scitech independent.181071=GB-sport telegraph.364538=GB-scitech timemagazine.75193=US-politics " + "independent.181096=US-entertainment upi.140602=OTHER-world-AF bbc.310946=GB-business independent.181052=EU-sport ", + "wmt19": "bbc.381790=GB-politics rt.com.91337=OTHER-politics-MK nytimes.184853=US-world upi.176266=US-crime guardian.221754=GB-business dailymail.co.uk.298595=GB-business " + "cnbc.com.6790=US-politics nytimes.184837=OTHER-world-ID upi.176249=GB-sport euronews-en.153835=OTHER-world-ID dailymail.co.uk.298732=GB-crime telegraph.405401=GB-politics " + "newsweek.51331=OTHER-crime-CN abcnews.306815=US-world cbsnews.248384=US-politics reuters.218882=GB-politics cbsnews.248387=US-crime abcnews.306764=OTHER-world-MX " + "reuters.218888=EU-politics bbc.381780=GB-crime bbc.381746=GB-sport euronews-en.153800=EU-politics bbc.381679=GB-crime bbc.381735=GB-crime newsweek.51338=US-world " + "bbc.381765=GB-crime cnn.304489=US-politics reuters.218863=OTHER-world-ID nytimes.184860=OTHER-world-ID cnn.304404=US-crime bbc.381647=US-entertainment " + "abcnews.306758=OTHER-politics-MX cnbc.com.6772=US-business reuters.218932=OTHER-politics-MK upi.176251=GB-sport reuters.218921=US-sport cnn.304447=US-politics " + "guardian.221679=GB-politics scotsman.133765=GB-sport scotsman.133804=GB-entertainment guardian.221762=OTHER-politics-BO cnbc.com.6769=US-politics " + "dailymail.co.uk.298692=EU-entertainment scotsman.133744=GB-world reuters.218911=US-sport newsweek.51310=US-politics independent.226301=US-sport reuters.218923=EU-sport " + "reuters.218861=US-politics dailymail.co.uk.298759=US-world scotsman.133791=GB-sport cbsnews.248484=EU-scitech dailymail.co.uk.298630=US-scitech " + "newsweek.51329=US-entertainment bbc.381701=GB-crime dailymail.co.uk.298738=GB-entertainment bbc.381669=OTHER-world-CN foxnews.94512=US-politics " + "guardian.221718=GB-entertainment dailymail.co.uk.298686=GB-politics cbsnews.248471=US-politics newsweek.51318=US-entertainment rt.com.91335=US-politics " + "newsweek.51300=US-politics cnn.304478=US-politics upi.176275=US-politics telegraph.405422=OTHER-world-ID reuters.218933=US-politics newsweek.51328=US-politics " + "newsweek.51307=US-business bbc.381692=GB-world independent.226346=GB-entertainment bbc.381646=GB-sport reuters.218914=US-sport scotsman.133758=EU-sport " + "rt.com.91350=EU-world scotsman.133773=GB-scitech rt.com.91334=EU-crime bbc.381680=GB-politics guardian.221756=US-politics scotsman.133783=GB-politics cnn.304521=US-sport " + "dailymail.co.uk.298622=GB-politics bbc.381789=GB-sport dailymail.co.uk.298644=GB-business dailymail.co.uk.298602=GB-world scotsman.133753=GB-sport " + "independent.226317=GB-entertainment nytimes.184862=US-politics thelocal.65969=OTHER-world-SY nytimes.184825=US-politics cnbc.com.6784=US-politics nytimes.184804=US-politics " + "nytimes.184830=US-politics scotsman.133801=GB-sport cnbc.com.6770=US-business bbc.381760=GB-crime reuters.218865=OTHER-world-ID newsweek.51339=US-crime " + "euronews-en.153797=OTHER-world-ID abcnews.306774=US-crime dailymail.co.uk.298696=GB-politics abcnews.306755=US-politics reuters.218909=US-crime " + "independent.226349=OTHER-sport-RU newsweek.51330=US-politics bbc.381705=GB-sport newsweek.51340=OTHER-world-ID cbsnews.248411=OTHER-world-FM abcnews.306776=US-crime " + "bbc.381694=GB-entertainment rt.com.91356=US-world telegraph.405430=GB-entertainment telegraph.405404=EU-world bbc.381749=GB-world telegraph.405413=US-politics " + "bbc.381736=OTHER-politics-KP cbsnews.248394=US-politics nytimes.184822=US-world telegraph.405408=US-politics euronews-en.153799=OTHER-politics-SY " + "euronews-en.153826=EU-sport cnn.304400=US-world", +} + +SUBSETS = { + k: {d.split("=")[0]: d.split("=")[1] for d in v.split()} + for (k, v) in _SUBSETS.items() +} +COUNTRIES = sorted(list({v.split("-")[0] for v in SUBSETS["wmt19"].values()})) +DOMAINS = sorted(list({v.split("-")[1] for v in SUBSETS["wmt19"].values()})) + +DATASETS = { + # wmt + "wmt23": WMTXMLDataset( + "wmt23", + data=["https://github.com/wmt-conference/wmt23-news-systems/archive/refs/tags/v.0.1.tar.gz"], + description="Official evaluation and system data for WMT23.", + md5=["63576405e4ce07130a19ad76ba7eb75b"], + langpairs={ + "cs-uk": ["wmt23-news-systems-v.0.1/xml/wmttest2023.cs-uk.all.xml"], + "de-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.de-en.all.xml"], + "en-cs": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-cs.all.xml"], + "en-de": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-de.all.xml"], + "en-he": { + "path": "wmt23-news-systems-v.0.1/xml/wmttest2023.en-he.all.xml", + "refs": ["refB"], + }, + "en-ja": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-ja.all.xml"], + "en-ru": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-ru.all.xml"], + "en-uk": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-uk.all.xml"], + "en-zh": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-zh.all.xml"], + "he-en": { + "path": "wmt23-news-systems-v.0.1/xml/wmttest2023.he-en.all.xml", + "refs": ["refB"], + }, + "ja-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.ja-en.all.xml"], + "ru-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.ru-en.all.xml"], + "uk-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.uk-en.all.xml"], + "zh-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.zh-en.all.xml"], + }, + refs=["refA"], + ), + "wmt22": WMTXMLDataset( + "wmt22", + data=["https://github.com/wmt-conference/wmt22-news-systems/archive/refs/tags/v1.1.tar.gz"], + description="Official evaluation and system data for WMT22.", + md5=["0840978b9b50b9ac3b2b081e37d620b9"], + langpairs={ + "cs-en": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.cs-en.all.xml", + "refs": ["B"], + }, + "cs-uk": ["wmt22-news-systems-1.1/xml/wmttest2022.cs-uk.all.xml"], + "de-en": ["wmt22-news-systems-1.1/xml/wmttest2022.de-en.all.xml"], + "de-fr": ["wmt22-news-systems-1.1/xml/wmttest2022.de-fr.all.xml"], + "en-cs": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.en-cs.all.xml", + "refs": ["B"], + }, + "en-de": ["wmt22-news-systems-1.1/xml/wmttest2022.en-de.all.xml"], + "en-hr": ["wmt22-news-systems-1.1/xml/wmttest2022.en-hr.all.xml"], + "en-ja": ["wmt22-news-systems-1.1/xml/wmttest2022.en-ja.all.xml"], + "en-liv": ["wmt22-news-systems-1.1/xml/wmttest2022.en-liv.all.xml"], + "en-ru": ["wmt22-news-systems-1.1/xml/wmttest2022.en-ru.all.xml"], + "en-uk": ["wmt22-news-systems-1.1/xml/wmttest2022.en-uk.all.xml"], + "en-zh": ["wmt22-news-systems-1.1/xml/wmttest2022.en-zh.all.xml"], + "fr-de": ["wmt22-news-systems-1.1/xml/wmttest2022.fr-de.all.xml"], + "ja-en": ["wmt22-news-systems-1.1/xml/wmttest2022.ja-en.all.xml"], + "liv-en": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.liv-en.all.xml", + # no translator because data is English-original + "refs": [""], + }, + "ru-en": ["wmt22-news-systems-1.1/xml/wmttest2022.ru-en.all.xml"], + "ru-sah": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.ru-sah.all.xml", + # no translator because data is Yakut-original + "refs": [""], + }, + "sah-ru": ["wmt22-news-systems-1.1/xml/wmttest2022.sah-ru.all.xml"], + "uk-cs": ["wmt22-news-systems-1.1/xml/wmttest2022.uk-cs.all.xml"], + "uk-en": ["wmt22-news-systems-1.1/xml/wmttest2022.uk-en.all.xml"], + "zh-en": ["wmt22-news-systems-1.1/xml/wmttest2022.zh-en.all.xml"], + }, + # the default reference to use with this dataset + refs=["A"], + ), + "wmt21/systems": WMTXMLDataset( + "wmt21/systems", + data=["https://github.com/wmt-conference/wmt21-news-systems/archive/refs/tags/v1.3.tar.gz"], + description="WMT21 system output.", + md5=["a6aee4099da58f98f71eb3fac1694237"], + langpairs={ + "de-fr": ["wmt21-news-systems-1.3/xml/newstest2021.de-fr.all.xml"], + "en-de": ["wmt21-news-systems-1.3/xml/newstest2021.en-de.all.xml"], + "en-ha": ["wmt21-news-systems-1.3/xml/newstest2021.en-ha.all.xml"], + "en-is": ["wmt21-news-systems-1.3/xml/newstest2021.en-is.all.xml"], + "en-ja": ["wmt21-news-systems-1.3/xml/newstest2021.en-ja.all.xml"], + "fr-de": ["wmt21-news-systems-1.3/xml/newstest2021.fr-de.all.xml"], + "ha-en": ["wmt21-news-systems-1.3/xml/newstest2021.ha-en.all.xml"], + "is-en": ["wmt21-news-systems-1.3/xml/newstest2021.is-en.all.xml"], + "ja-en": ["wmt21-news-systems-1.3/xml/newstest2021.ja-en.all.xml"], + "zh-en": ["wmt21-news-systems-1.3/xml/newstest2021.zh-en.all.xml"], + "en-zh": ["wmt21-news-systems-1.3/xml/newstest2021.en-zh.all.xml"], + "cs-en": ["wmt21-news-systems-1.3/xml/newstest2021.cs-en.all.xml"], + "de-en": ["wmt21-news-systems-1.3/xml/newstest2021.de-en.all.xml"], + "en-cs": ["wmt21-news-systems-1.3/xml/newstest2021.en-cs.all.xml"], + "en-ru": ["wmt21-news-systems-1.3/xml/newstest2021.en-ru.all.xml"], + "ru-en": ["wmt21-news-systems-1.3/xml/newstest2021.ru-en.all.xml"], + "bn-hi": ["wmt21-news-systems-1.3/xml/florestest2021.bn-hi.all.xml"], + "hi-bn": ["wmt21-news-systems-1.3/xml/florestest2021.hi-bn.all.xml"], + "xh-zu": ["wmt21-news-systems-1.3/xml/florestest2021.xh-zu.all.xml"], + "zu-xh": ["wmt21-news-systems-1.3/xml/florestest2021.zu-xh.all.xml"], + }, + # the reference to use with this dataset + refs=["A"], + ), + "wmt21": WMTXMLDataset( + "wmt21", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21.", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "de-fr": ["test/newstest2021.de-fr.xml"], + "en-de": ["test/newstest2021.en-de.xml"], + "en-ha": ["test/newstest2021.en-ha.xml"], + "en-is": ["test/newstest2021.en-is.xml"], + "en-ja": ["test/newstest2021.en-ja.xml"], + "fr-de": ["test/newstest2021.fr-de.xml"], + "ha-en": ["test/newstest2021.ha-en.xml"], + "is-en": ["test/newstest2021.is-en.xml"], + "ja-en": ["test/newstest2021.ja-en.xml"], + "zh-en": ["test/newstest2021.zh-en.xml"], + "en-zh": ["test/newstest2021.en-zh.xml"], + "cs-en": ["test/newstest2021.cs-en.xml"], + "de-en": ["test/newstest2021.de-en.xml"], + "en-cs": ["test/newstest2021.en-cs.xml"], + "en-ru": ["test/newstest2021.en-ru.xml"], + "ru-en": ["test/newstest2021.ru-en.xml"], + "bn-hi": ["test/florestest2021.bn-hi.xml"], + "hi-bn": ["test/florestest2021.hi-bn.xml"], + "xh-zu": ["test/florestest2021.xh-zu.xml"], + "zu-xh": ["test/florestest2021.zu-xh.xml"], + }, + # the reference to use with this dataset + refs=["A"], + ), + "wmt21/B": WMTXMLDataset( + "wmt21/B", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with reference B.", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "cs-en": ["test/newstest2021.cs-en.xml"], + "de-en": ["test/newstest2021.de-en.xml"], + "en-cs": ["test/newstest2021.en-cs.xml"], + "en-ru": ["test/newstest2021.en-ru.xml"], + "en-zh": ["test/newstest2021.en-zh.xml"], + "ru-en": ["test/newstest2021.ru-en.xml"], + }, + # the reference to use with this dataset + refs=["B"], + ), + "wmt21/AB": WMTXMLDataset( + "wmt21/AB", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with references A and B.", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "cs-en": ["test/newstest2021.cs-en.xml"], + "de-en": ["test/newstest2021.de-en.xml"], + "en-de": ["test/newstest2021.en-de.xml"], + "en-cs": ["test/newstest2021.en-cs.xml"], + "en-ru": ["test/newstest2021.en-ru.xml"], + "en-zh": ["test/newstest2021.en-zh.xml"], + "ru-en": ["test/newstest2021.ru-en.xml"], + }, + # the reference to use with this dataset + refs=["A", "B"], + ), + "wmt21/C": WMTXMLDataset( + "wmt21/C", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with reference C", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "en-de": ["test/newstest2021.en-de.xml"], + }, + # the reference to use with this dataset + refs=["C"], + ), + "wmt21/AC": WMTXMLDataset( + "wmt21/AC", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with references A and C", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "en-de": ["test/newstest2021.en-de.xml"], + }, + # the reference to use with this dataset + refs=["A", "C"], + ), + "wmt21/D": WMTXMLDataset( + "wmt21/D", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with reference D", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "en-de": ["test/newstest2021.en-de.xml"], + }, + # the reference to use with this dataset + refs=["D"], + ), + "wmt21/dev": WMTXMLDataset( + "wmt21/dev", + data=["https://data.statmt.org/wmt21/translation-task/dev.tgz"], + description="Development data for WMT21,if multiple references are available, the first one is used.", + md5=["165da59ac8dfb5b7cafd7e90b1cac672"], + langpairs={ + "en-ha": ["dev/xml/newsdev2021.en-ha.xml"], + "ha-en": ["dev/xml/newsdev2021.ha-en.xml"], + "en-is": ["dev/xml/newsdev2021.en-is.xml"], + "is-en": ["dev/xml/newsdev2021.is-en.xml"], + }, + # datasets are bidirectional in origin, so use both refs + refs=["A", ""], + ), + "wmt20/tworefs": FakeSGMLDataset( + "wmt20/tworefs", + data=["https://data.statmt.org/wmt20/translation-task/test.tgz"], + description="WMT20 news test sets with two references", + md5=["3b1f777cfd2fb15ccf66e9bfdb2b1699"], + langpairs={ + "de-en": [ + "sgm/newstest2020-deen-src.de.sgm", + "sgm/newstest2020-deen-ref.en.sgm", + "sgm/newstestB2020-deen-ref.en.sgm", + ], + "en-de": [ + "sgm/newstest2020-ende-src.en.sgm", + "sgm/newstest2020-ende-ref.de.sgm", + "sgm/newstestB2020-ende-ref.de.sgm", + ], + "en-zh": [ + "sgm/newstest2020-enzh-src.en.sgm", + "sgm/newstest2020-enzh-ref.zh.sgm", + "sgm/newstestB2020-enzh-ref.zh.sgm", + ], + "ru-en": [ + "sgm/newstest2020-ruen-src.ru.sgm", + "sgm/newstest2020-ruen-ref.en.sgm", + "sgm/newstestB2020-ruen-ref.en.sgm", + ], + "zh-en": [ + "sgm/newstest2020-zhen-src.zh.sgm", + "sgm/newstest2020-zhen-ref.en.sgm", + "sgm/newstestB2020-zhen-ref.en.sgm", + ], + }, + ), + "wmt20": FakeSGMLDataset( + "wmt20", + data=["https://data.statmt.org/wmt20/translation-task/test.tgz"], + description="Official evaluation data for WMT20", + md5=["3b1f777cfd2fb15ccf66e9bfdb2b1699"], + langpairs={ + "cs-en": [ + "sgm/newstest2020-csen-src.cs.sgm", + "sgm/newstest2020-csen-ref.en.sgm", + ], + "de-en": [ + "sgm/newstest2020-deen-src.de.sgm", + "sgm/newstest2020-deen-ref.en.sgm", + ], + "de-fr": [ + "sgm/newstest2020-defr-src.de.sgm", + "sgm/newstest2020-defr-ref.fr.sgm", + ], + "en-cs": [ + "sgm/newstest2020-encs-src.en.sgm", + "sgm/newstest2020-encs-ref.cs.sgm", + ], + "en-de": [ + "sgm/newstest2020-ende-src.en.sgm", + "sgm/newstest2020-ende-ref.de.sgm", + ], + "en-iu": [ + "sgm/newstest2020-eniu-src.en.sgm", + "sgm/newstest2020-eniu-ref.iu.sgm", + ], + "en-ja": [ + "sgm/newstest2020-enja-src.en.sgm", + "sgm/newstest2020-enja-ref.ja.sgm", + ], + "en-km": [ + "sgm/newstest2020-enkm-src.en.sgm", + "sgm/newstest2020-enkm-ref.km.sgm", + ], + "en-pl": [ + "sgm/newstest2020-enpl-src.en.sgm", + "sgm/newstest2020-enpl-ref.pl.sgm", + ], + "en-ps": [ + "sgm/newstest2020-enps-src.en.sgm", + "sgm/newstest2020-enps-ref.ps.sgm", + ], + "en-ru": [ + "sgm/newstest2020-enru-src.en.sgm", + "sgm/newstest2020-enru-ref.ru.sgm", + ], + "en-ta": [ + "sgm/newstest2020-enta-src.en.sgm", + "sgm/newstest2020-enta-ref.ta.sgm", + ], + "en-zh": [ + "sgm/newstest2020-enzh-src.en.sgm", + "sgm/newstest2020-enzh-ref.zh.sgm", + ], + "fr-de": [ + "sgm/newstest2020-frde-src.fr.sgm", + "sgm/newstest2020-frde-ref.de.sgm", + ], + "iu-en": [ + "sgm/newstest2020-iuen-src.iu.sgm", + "sgm/newstest2020-iuen-ref.en.sgm", + ], + "ja-en": [ + "sgm/newstest2020-jaen-src.ja.sgm", + "sgm/newstest2020-jaen-ref.en.sgm", + ], + "km-en": [ + "sgm/newstest2020-kmen-src.km.sgm", + "sgm/newstest2020-kmen-ref.en.sgm", + ], + "pl-en": [ + "sgm/newstest2020-plen-src.pl.sgm", + "sgm/newstest2020-plen-ref.en.sgm", + ], + "ps-en": [ + "sgm/newstest2020-psen-src.ps.sgm", + "sgm/newstest2020-psen-ref.en.sgm", + ], + "ru-en": [ + "sgm/newstest2020-ruen-src.ru.sgm", + "sgm/newstest2020-ruen-ref.en.sgm", + ], + "ta-en": [ + "sgm/newstest2020-taen-src.ta.sgm", + "sgm/newstest2020-taen-ref.en.sgm", + ], + "zh-en": [ + "sgm/newstest2020-zhen-src.zh.sgm", + "sgm/newstest2020-zhen-ref.en.sgm", + ], + }, + ), + "wmt20/dev": FakeSGMLDataset( + "wmt20/dev", + data=["https://data.statmt.org/wmt20/translation-task/dev.tgz"], + description="Development data for tasks new to 2020.", + md5=["037f2b37aab74febbb1b2307dc2afb54"], + langpairs={ + "iu-en": [ + "dev/newsdev2020-iuen-src.iu.sgm", + "dev/newsdev2020-iuen-ref.en.sgm", + ], + "en-iu": [ + "dev/newsdev2020-eniu-src.en.sgm", + "dev/newsdev2020-eniu-ref.iu.sgm", + ], + "ja-en": [ + "dev/newsdev2020-jaen-src.ja.sgm", + "dev/newsdev2020-jaen-ref.en.sgm", + ], + "en-ja": [ + "dev/newsdev2020-enja-src.en.sgm", + "dev/newsdev2020-enja-ref.ja.sgm", + ], + "pl-en": [ + "dev/newsdev2020-plen-src.pl.sgm", + "dev/newsdev2020-plen-ref.en.sgm", + ], + "en-pl": [ + "dev/newsdev2020-enpl-src.en.sgm", + "dev/newsdev2020-enpl-ref.pl.sgm", + ], + "ta-en": [ + "dev/newsdev2020-taen-src.ta.sgm", + "dev/newsdev2020-taen-ref.en.sgm", + ], + "en-ta": [ + "dev/newsdev2020-enta-src.en.sgm", + "dev/newsdev2020-enta-ref.ta.sgm", + ], + }, + ), + "wmt20/robust/set1": PlainTextDataset( + "wmt20/robust/set1", + data=["https://data.statmt.org/wmt20/robustness-task/robustness20-3-sets.zip"], + md5=["a12ac9ebe89b72195041518dffc4a9d5"], + description="WMT20 robustness task, set 1", + langpairs={ + "en-ja": [ + "robustness20-3-sets/robustness20-set1-enja.en", + "robustness20-3-sets/robustness20-set1-enja.ja", + ], + "en-de": [ + "robustness20-3-sets/robustness20-set1-ende.en", + "robustness20-3-sets/robustness20-set1-ende.de", + ], + }, + ), + "wmt20/robust/set2": PlainTextDataset( + "wmt20/robust/set2", + data=["https://data.statmt.org/wmt20/robustness-task/robustness20-3-sets.zip"], + md5=["a12ac9ebe89b72195041518dffc4a9d5"], + description="WMT20 robustness task, set 2", + langpairs={ + "en-ja": [ + "robustness20-3-sets/robustness20-set2-enja.en", + "robustness20-3-sets/robustness20-set2-enja.ja", + ], + "ja-en": [ + "robustness20-3-sets/robustness20-set2-jaen.ja", + "robustness20-3-sets/robustness20-set2-jaen.en", + ], + }, + ), + "wmt20/robust/set3": PlainTextDataset( + "wmt20/robust/set3", + data=["https://data.statmt.org/wmt20/robustness-task/robustness20-3-sets.zip"], + md5=["a12ac9ebe89b72195041518dffc4a9d5"], + description="WMT20 robustness task, set 3", + langpairs={ + "de-en": [ + "robustness20-3-sets/robustness20-set3-deen.de", + "robustness20-3-sets/robustness20-set3-deen.en", + ], + }, + ), + "wmt19": FakeSGMLDataset( + "wmt19", + data=["https://data.statmt.org/wmt19/translation-task/test.tgz"], + description="Official evaluation data.", + md5=["84de7162d158e28403103b01aeefc39a"], + citation=r"""@proceedings{ws-2019-machine, + title = "Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers)", + editor = "Bojar, Ond{\v{r}}ej and + Chatterjee, Rajen and + Federmann, Christian and + Fishel, Mark and + Graham, Yvette and + Haddow, Barry and + Huck, Matthias and + Yepes, Antonio Jimeno and + Koehn, Philipp and + Martins, Andr{\'e} and + Monz, Christof and + Negri, Matteo and + N{\'e}v{\'e}ol, Aur{\'e}lie and + Neves, Mariana and + Post, Matt and + Turchi, Marco and + Verspoor, Karin", + month = aug, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/W19-5200", +}""", + langpairs={ + "cs-de": [ + "sgm/newstest2019-csde-src.cs.sgm", + "sgm/newstest2019-csde-ref.de.sgm", + ], + "de-cs": [ + "sgm/newstest2019-decs-src.de.sgm", + "sgm/newstest2019-decs-ref.cs.sgm", + ], + "de-en": [ + "sgm/newstest2019-deen-src.de.sgm", + "sgm/newstest2019-deen-ref.en.sgm", + ], + "de-fr": [ + "sgm/newstest2019-defr-src.de.sgm", + "sgm/newstest2019-defr-ref.fr.sgm", + ], + "en-cs": [ + "sgm/newstest2019-encs-src.en.sgm", + "sgm/newstest2019-encs-ref.cs.sgm", + ], + "en-de": [ + "sgm/newstest2019-ende-src.en.sgm", + "sgm/newstest2019-ende-ref.de.sgm", + ], + "en-fi": [ + "sgm/newstest2019-enfi-src.en.sgm", + "sgm/newstest2019-enfi-ref.fi.sgm", + ], + "en-gu": [ + "sgm/newstest2019-engu-src.en.sgm", + "sgm/newstest2019-engu-ref.gu.sgm", + ], + "en-kk": [ + "sgm/newstest2019-enkk-src.en.sgm", + "sgm/newstest2019-enkk-ref.kk.sgm", + ], + "en-lt": [ + "sgm/newstest2019-enlt-src.en.sgm", + "sgm/newstest2019-enlt-ref.lt.sgm", + ], + "en-ru": [ + "sgm/newstest2019-enru-src.en.sgm", + "sgm/newstest2019-enru-ref.ru.sgm", + ], + "en-zh": [ + "sgm/newstest2019-enzh-src.en.sgm", + "sgm/newstest2019-enzh-ref.zh.sgm", + ], + "fi-en": [ + "sgm/newstest2019-fien-src.fi.sgm", + "sgm/newstest2019-fien-ref.en.sgm", + ], + "fr-de": [ + "sgm/newstest2019-frde-src.fr.sgm", + "sgm/newstest2019-frde-ref.de.sgm", + ], + "gu-en": [ + "sgm/newstest2019-guen-src.gu.sgm", + "sgm/newstest2019-guen-ref.en.sgm", + ], + "kk-en": [ + "sgm/newstest2019-kken-src.kk.sgm", + "sgm/newstest2019-kken-ref.en.sgm", + ], + "lt-en": [ + "sgm/newstest2019-lten-src.lt.sgm", + "sgm/newstest2019-lten-ref.en.sgm", + ], + "ru-en": [ + "sgm/newstest2019-ruen-src.ru.sgm", + "sgm/newstest2019-ruen-ref.en.sgm", + ], + "zh-en": [ + "sgm/newstest2019-zhen-src.zh.sgm", + "sgm/newstest2019-zhen-ref.en.sgm", + ], + }, + ), + "wmt19/dev": FakeSGMLDataset( + "wmt19/dev", + data=["https://data.statmt.org/wmt19/translation-task/dev.tgz"], + description="Development data for tasks new to 2019.", + md5=["f2ec7af5947c19e0cacb3882eb208002"], + langpairs={ + "lt-en": [ + "dev/newsdev2019-lten-src.lt.sgm", + "dev/newsdev2019-lten-ref.en.sgm", + ], + "en-lt": [ + "dev/newsdev2019-enlt-src.en.sgm", + "dev/newsdev2019-enlt-ref.lt.sgm", + ], + "gu-en": [ + "dev/newsdev2019-guen-src.gu.sgm", + "dev/newsdev2019-guen-ref.en.sgm", + ], + "en-gu": [ + "dev/newsdev2019-engu-src.en.sgm", + "dev/newsdev2019-engu-ref.gu.sgm", + ], + "kk-en": [ + "dev/newsdev2019-kken-src.kk.sgm", + "dev/newsdev2019-kken-ref.en.sgm", + ], + "en-kk": [ + "dev/newsdev2019-enkk-src.en.sgm", + "dev/newsdev2019-enkk-ref.kk.sgm", + ], + }, + ), + "wmt19/google/ar": WMTAdditionDataset( + "wmt19/google/ar", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-ar.ref", + ], + description="Additional high-quality reference for WMT19/en-de.", + md5=["84de7162d158e28403103b01aeefc39a", "d66d9e91548ced0ac476f2390e32e2de"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_ar.wmt19-ende-ar.ref"], + }, + ), + "wmt19/google/arp": WMTAdditionDataset( + "wmt19/google/arp", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-arp.ref", + ], + description="Additional paraphrase of wmt19/google/ar.", + md5=["84de7162d158e28403103b01aeefc39a", "c70ea808cf2bff621ad7a8fddd4deca9"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_arp.wmt19-ende-arp.ref"], + }, + ), + "wmt19/google/wmtp": WMTAdditionDataset( + "wmt19/google/wmtp", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-wmtp.ref", + ], + description="Additional paraphrase of the official WMT19 reference.", + md5=["84de7162d158e28403103b01aeefc39a", "587c660ee5fd44727f0db025b71c6a82"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_wmtp.wmt19-ende-wmtp.ref"], + }, + ), + "wmt19/google/hqr": WMTAdditionDataset( + "wmt19/google/hqr", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-hqr.ref", + ], + description="Best human selected-reference between wmt19 and wmt19/google/ar.", + md5=["84de7162d158e28403103b01aeefc39a", "d9221135f62d7152de041f5bfc8efaea"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_hqr.wmt19-ende-hqr.ref"], + }, + ), + "wmt19/google/hqp": WMTAdditionDataset( + "wmt19/google/hqp", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-hqp.ref", + ], + description="Best human-selected reference between wmt19/google/arp and wmt19/google/wmtp.", + md5=["84de7162d158e28403103b01aeefc39a", "b7c3a07a59c8eccea5367e9ec5417a8a"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_hqp.wmt19-ende-hqp.ref"], + }, + ), + "wmt19/google/hqall": WMTAdditionDataset( + "wmt19/google/hqall", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-hqall.ref", + ], + description="Best human-selected reference among original official reference and the Google reference and paraphrases.", + md5=["84de7162d158e28403103b01aeefc39a", "edecf10ced59e10b703a6fbcf1fa9dfa"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_hqall.wmt19-ende-hqall.ref"], + }, + ), + "wmt18": FakeSGMLDataset( + "wmt18", + data=["https://data.statmt.org/wmt18/translation-task/test.tgz"], + md5=["f996c245ecffea23d0006fa4c34e9064"], + description="Official evaluation data.", + citation='@inproceedings{bojar-etal-2018-findings,\n title = "Findings of the 2018 Conference on Machine Translation ({WMT}18)",\n author = "Bojar, Ond{\v{r}}ej and\n Federmann, Christian and\n Fishel, Mark and\n Graham, Yvette and\n Haddow, Barry and\n Koehn, Philipp and\n Monz, Christof",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6401",\n pages = "272--303",\n}', + langpairs={ + "cs-en": [ + "test/newstest2018-csen-src.cs.sgm", + "test/newstest2018-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2018-deen-src.de.sgm", + "test/newstest2018-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2018-encs-src.en.sgm", + "test/newstest2018-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2018-ende-src.en.sgm", + "test/newstest2018-ende-ref.de.sgm", + ], + "en-et": [ + "test/newstest2018-enet-src.en.sgm", + "test/newstest2018-enet-ref.et.sgm", + ], + "en-fi": [ + "test/newstest2018-enfi-src.en.sgm", + "test/newstest2018-enfi-ref.fi.sgm", + ], + "en-ru": [ + "test/newstest2018-enru-src.en.sgm", + "test/newstest2018-enru-ref.ru.sgm", + ], + "et-en": [ + "test/newstest2018-eten-src.et.sgm", + "test/newstest2018-eten-ref.en.sgm", + ], + "fi-en": [ + "test/newstest2018-fien-src.fi.sgm", + "test/newstest2018-fien-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2018-ruen-src.ru.sgm", + "test/newstest2018-ruen-ref.en.sgm", + ], + "en-tr": [ + "test/newstest2018-entr-src.en.sgm", + "test/newstest2018-entr-ref.tr.sgm", + ], + "tr-en": [ + "test/newstest2018-tren-src.tr.sgm", + "test/newstest2018-tren-ref.en.sgm", + ], + "en-zh": [ + "test/newstest2018-enzh-src.en.sgm", + "test/newstest2018-enzh-ref.zh.sgm", + ], + "zh-en": [ + "test/newstest2018-zhen-src.zh.sgm", + "test/newstest2018-zhen-ref.en.sgm", + ], + }, + ), + "wmt18/test-ts": FakeSGMLDataset( + "wmt18/test-ts", + data=["https://data.statmt.org/wmt18/translation-task/test-ts.tgz"], + md5=["5c621a34d512cc2dd74162ae7d00b320"], + description="Official evaluation sources with extra test sets interleaved.", + langpairs={ + "cs-en": ["test-ts/newstest2018-csen-src-ts.cs.sgm", "test-ts/newstest2018-csen-ref-ts.en.sgm"], + "de-en": ["test-ts/newstest2018-deen-src-ts.de.sgm", "test-ts/newstest2018-deen-ref-ts.en.sgm"], + "en-cs": ["test-ts/newstest2018-encs-src-ts.en.sgm", "test-ts/newstest2018-encs-ref-ts.cs.sgm"], + "en-de": ["test-ts/newstest2018-ende-src-ts.en.sgm", "test-ts/newstest2018-ende-ref-ts.de.sgm"], + "en-et": ["test-ts/newstest2018-enet-src-ts.en.sgm", "test-ts/newstest2018-enet-ref-ts.et.sgm"], + "en-fi": ["test-ts/newstest2018-enfi-src-ts.en.sgm", "test-ts/newstest2018-enfi-ref-ts.fi.sgm"], + "en-ru": ["test-ts/newstest2018-enru-src-ts.en.sgm", "test-ts/newstest2018-enru-ref-ts.ru.sgm"], + "et-en": ["test-ts/newstest2018-eten-src-ts.et.sgm", "test-ts/newstest2018-eten-ref-ts.en.sgm"], + "fi-en": ["test-ts/newstest2018-fien-src-ts.fi.sgm", "test-ts/newstest2018-fien-ref-ts.en.sgm"], + "ru-en": ["test-ts/newstest2018-ruen-src-ts.ru.sgm", "test-ts/newstest2018-ruen-ref-ts.en.sgm"], + "en-tr": ["test-ts/newstest2018-entr-src-ts.en.sgm", "test-ts/newstest2018-entr-ref-ts.tr.sgm"], + "tr-en": ["test-ts/newstest2018-tren-src-ts.tr.sgm", "test-ts/newstest2018-tren-ref-ts.en.sgm"], + "en-zh": ["test-ts/newstest2018-enzh-src-ts.en.sgm", "test-ts/newstest2018-enzh-ref-ts.zh.sgm"], + "zh-en": ["test-ts/newstest2018-zhen-src-ts.zh.sgm", "test-ts/newstest2018-zhen-ref-ts.en.sgm"], + }, + ), + "wmt18/dev": FakeSGMLDataset( + "wmt18/dev", + data=["https://data.statmt.org/wmt18/translation-task/dev.tgz"], + md5=["486f391da54a7a3247f02ebd25996f24"], + description="Development data (Estonian<>English).", + langpairs={ + "et-en": [ + "dev/newsdev2018-eten-src.et.sgm", + "dev/newsdev2018-eten-ref.en.sgm", + ], + "en-et": [ + "dev/newsdev2018-enet-src.en.sgm", + "dev/newsdev2018-enet-ref.et.sgm", + ], + }, + ), + "wmt17": FakeSGMLDataset( + "wmt17", + data=["https://data.statmt.org/wmt17/translation-task/test.tgz"], + md5=["86a1724c276004aa25455ae2a04cef26"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}", + langpairs={ + "cs-en": [ + "test/newstest2017-csen-src.cs.sgm", + "test/newstest2017-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2017-deen-src.de.sgm", + "test/newstest2017-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2017-encs-src.en.sgm", + "test/newstest2017-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2017-ende-src.en.sgm", + "test/newstest2017-ende-ref.de.sgm", + ], + "en-fi": [ + "test/newstest2017-enfi-src.en.sgm", + "test/newstest2017-enfi-ref.fi.sgm", + ], + "en-lv": [ + "test/newstest2017-enlv-src.en.sgm", + "test/newstest2017-enlv-ref.lv.sgm", + ], + "en-ru": [ + "test/newstest2017-enru-src.en.sgm", + "test/newstest2017-enru-ref.ru.sgm", + ], + "en-tr": [ + "test/newstest2017-entr-src.en.sgm", + "test/newstest2017-entr-ref.tr.sgm", + ], + "en-zh": [ + "test/newstest2017-enzh-src.en.sgm", + "test/newstest2017-enzh-ref.zh.sgm", + ], + "fi-en": [ + "test/newstest2017-fien-src.fi.sgm", + "test/newstest2017-fien-ref.en.sgm", + ], + "lv-en": [ + "test/newstest2017-lven-src.lv.sgm", + "test/newstest2017-lven-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2017-ruen-src.ru.sgm", + "test/newstest2017-ruen-ref.en.sgm", + ], + "tr-en": [ + "test/newstest2017-tren-src.tr.sgm", + "test/newstest2017-tren-ref.en.sgm", + ], + "zh-en": [ + "test/newstest2017-zhen-src.zh.sgm", + "test/newstest2017-zhen-ref.en.sgm", + ], + }, + ), + "wmt17/B": FakeSGMLDataset( + "wmt17/B", + data=["https://data.statmt.org/wmt17/translation-task/test.tgz"], + md5=["86a1724c276004aa25455ae2a04cef26"], + description="Additional reference for EN-FI and FI-EN.", + langpairs={ + "en-fi": [ + "test/newstestB2017-enfi-src.en.sgm", + "test/newstestB2017-enfi-ref.fi.sgm", + ], + }, + ), + "wmt17/tworefs": FakeSGMLDataset( + "wmt17/tworefs", + data=["https://data.statmt.org/wmt17/translation-task/test.tgz"], + md5=["86a1724c276004aa25455ae2a04cef26"], + description="Systems with two references.", + langpairs={ + "en-fi": [ + "test/newstest2017-enfi-src.en.sgm", + "test/newstest2017-enfi-ref.fi.sgm", + "test/newstestB2017-enfi-ref.fi.sgm", + ], + }, + ), + "wmt17/improved": FakeSGMLDataset( + "wmt17/improved", + data=["https://data.statmt.org/wmt17/translation-task/test-update-1.tgz"], + md5=["91dbfd5af99bc6891a637a68e04dfd41"], + description="Improved zh-en and en-zh translations.", + langpairs={ + "en-zh": ["newstest2017-enzh-src.en.sgm", "newstest2017-enzh-ref.zh.sgm"], + "zh-en": ["newstest2017-zhen-src.zh.sgm", "newstest2017-zhen-ref.en.sgm"], + }, + ), + "wmt17/dev": FakeSGMLDataset( + "wmt17/dev", + data=["https://data.statmt.org/wmt17/translation-task/dev.tgz"], + md5=["9b1aa63c1cf49dccdd20b962fe313989"], + description="Development sets released for new languages in 2017.", + langpairs={ + "en-lv": [ + "dev/newsdev2017-enlv-src.en.sgm", + "dev/newsdev2017-enlv-ref.lv.sgm", + ], + "en-zh": [ + "dev/newsdev2017-enzh-src.en.sgm", + "dev/newsdev2017-enzh-ref.zh.sgm", + ], + "lv-en": [ + "dev/newsdev2017-lven-src.lv.sgm", + "dev/newsdev2017-lven-ref.en.sgm", + ], + "zh-en": [ + "dev/newsdev2017-zhen-src.zh.sgm", + "dev/newsdev2017-zhen-ref.en.sgm", + ], + }, + ), + "wmt17/ms": WMTAdditionDataset( + "wmt17/ms", + data=[ + "https://github.com/MicrosoftTranslator/Translator-HumanParityData/archive/master.zip", + "https://data.statmt.org/wmt17/translation-task/test-update-1.tgz", + ], + md5=["18fdaa7a3c84cf6ef688da1f6a5fa96f", "91dbfd5af99bc6891a637a68e04dfd41"], + description="Additional Chinese-English references from Microsoft Research.", + citation="@inproceedings{achieving-human-parity-on-automatic-chinese-to-english-news-translation,\n author = {Hassan Awadalla, Hany and Aue, Anthony and Chen, Chang and Chowdhary, Vishal and Clark, Jonathan and Federmann, Christian and Huang, Xuedong and Junczys-Dowmunt, Marcin and Lewis, Will and Li, Mu and Liu, Shujie and Liu, Tie-Yan and Luo, Renqian and Menezes, Arul and Qin, Tao and Seide, Frank and Tan, Xu and Tian, Fei and Wu, Lijun and Wu, Shuangzhi and Xia, Yingce and Zhang, Dongdong and Zhang, Zhirui and Zhou, Ming},\n title = {Achieving Human Parity on Automatic Chinese to English News Translation},\n booktitle = {},\n year = {2018},\n month = {March},\n abstract = {Machine translation has made rapid advances in recent years. Millions of people are using it today in online translation systems and mobile applications in order to communicate across language barriers. The question naturally arises whether such systems can approach or achieve parity with human translations. In this paper, we first address the problem of how to define and accurately measure human parity in translation. We then describe Microsoft’s machine translation system and measure the quality of its translations on the widely used WMT 2017 news translation task from Chinese to English. We find that our latest neural machine translation system has reached a new state-of-the-art, and that the translation quality is at human parity when compared to professional human translations. We also find that it significantly exceeds the quality of crowd-sourced non-professional translations.},\n publisher = {},\n url = {https://www.microsoft.com/en-us/research/publication/achieving-human-parity-on-automatic-chinese-to-english-news-translation/},\n address = {},\n pages = {},\n journal = {},\n volume = {},\n chapter = {},\n isbn = {},\n}", + langpairs={ + "zh-en": [ + "newstest2017-zhen-src.zh.sgm", + "newstest2017-zhen-ref.en.sgm", + "Translator-HumanParityData-master/Translator-HumanParityData/References/Translator-HumanParityData-Reference-HT.txt", + "Translator-HumanParityData-master/Translator-HumanParityData/References/Translator-HumanParityData-Reference-PE.txt", + ], + }, + ), + "wmt16": FakeSGMLDataset( + "wmt16", + data=["https://data.statmt.org/wmt16/translation-task/test.tgz"], + md5=["3d809cd0c2c86adb2c67034d15c4e446"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2016:WMT1,\n author = {Bojar, Ond\\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huck, Matthias and Jimeno Yepes, Antonio and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Neveol, Aurelie and Neves, Mariana and Popel, Martin and Post, Matt and Rubino, Raphael and Scarton, Carolina and Specia, Lucia and Turchi, Marco and Verspoor, Karin and Zampieri, Marcos},\n title = {Findings of the 2016 Conference on Machine Translation},\n booktitle = {Proceedings of the First Conference on Machine Translation},\n month = {August},\n year = {2016},\n address = {Berlin, Germany},\n publisher = {Association for Computational Linguistics},\n pages = {131--198},\n url = {http://www.aclweb.org/anthology/W/W16/W16-2301}\n}", + langpairs={ + "cs-en": [ + "test/newstest2016-csen-src.cs.sgm", + "test/newstest2016-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2016-deen-src.de.sgm", + "test/newstest2016-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2016-encs-src.en.sgm", + "test/newstest2016-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2016-ende-src.en.sgm", + "test/newstest2016-ende-ref.de.sgm", + ], + "en-fi": [ + "test/newstest2016-enfi-src.en.sgm", + "test/newstest2016-enfi-ref.fi.sgm", + ], + "en-ro": [ + "test/newstest2016-enro-src.en.sgm", + "test/newstest2016-enro-ref.ro.sgm", + ], + "en-ru": [ + "test/newstest2016-enru-src.en.sgm", + "test/newstest2016-enru-ref.ru.sgm", + ], + "en-tr": [ + "test/newstest2016-entr-src.en.sgm", + "test/newstest2016-entr-ref.tr.sgm", + ], + "fi-en": [ + "test/newstest2016-fien-src.fi.sgm", + "test/newstest2016-fien-ref.en.sgm", + ], + "ro-en": [ + "test/newstest2016-roen-src.ro.sgm", + "test/newstest2016-roen-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2016-ruen-src.ru.sgm", + "test/newstest2016-ruen-ref.en.sgm", + ], + "tr-en": [ + "test/newstest2016-tren-src.tr.sgm", + "test/newstest2016-tren-ref.en.sgm", + ], + }, + ), + "wmt16/B": FakeSGMLDataset( + "wmt16/B", + data=["https://data.statmt.org/wmt16/translation-task/test.tgz"], + md5=["3d809cd0c2c86adb2c67034d15c4e446"], + description="Additional reference for EN-FI.", + langpairs={ + "en-fi": [ + "test/newstest2016-enfi-src.en.sgm", + "test/newstestB2016-enfi-ref.fi.sgm", + ], + }, + ), + "wmt16/tworefs": FakeSGMLDataset( + "wmt16/tworefs", + data=["https://data.statmt.org/wmt16/translation-task/test.tgz"], + md5=["3d809cd0c2c86adb2c67034d15c4e446"], + description="EN-FI with two references.", + langpairs={ + "en-fi": [ + "test/newstest2016-enfi-src.en.sgm", + "test/newstest2016-enfi-ref.fi.sgm", + "test/newstestB2016-enfi-ref.fi.sgm", + ], + }, + ), + "wmt16/dev": FakeSGMLDataset( + "wmt16/dev", + data=["https://data.statmt.org/wmt16/translation-task/dev.tgz"], + md5=["4a3dc2760bb077f4308cce96b06e6af6"], + description="Development sets released for new languages in 2016.", + langpairs={ + "en-ro": [ + "dev/newsdev2016-enro-src.en.sgm", + "dev/newsdev2016-enro-ref.ro.sgm", + ], + "en-tr": [ + "dev/newsdev2016-entr-src.en.sgm", + "dev/newsdev2016-entr-ref.tr.sgm", + ], + "ro-en": [ + "dev/newsdev2016-roen-src.ro.sgm", + "dev/newsdev2016-roen-ref.en.sgm", + ], + "tr-en": [ + "dev/newsdev2016-tren-src.tr.sgm", + "dev/newsdev2016-tren-ref.en.sgm", + ], + }, + ), + "wmt15": FakeSGMLDataset( + "wmt15", + data=["https://statmt.org/wmt15/test.tgz"], + md5=["67e3beca15e69fe3d36de149da0a96df"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2015:WMT,\n author = {Bojar, Ond\\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Haddow, Barry and Huck, Matthias and Hokamp, Chris and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Scarton, Carolina and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2015 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Tenth Workshop on Statistical Machine Translation},\n month = {September},\n year = {2015},\n address = {Lisbon, Portugal},\n publisher = {Association for Computational Linguistics},\n pages = {1--46},\n url = {http://aclweb.org/anthology/W15-3001}\n}", + langpairs={ + "en-fr": [ + "test/newsdiscusstest2015-enfr-src.en.sgm", + "test/newsdiscusstest2015-enfr-ref.fr.sgm", + ], + "fr-en": [ + "test/newsdiscusstest2015-fren-src.fr.sgm", + "test/newsdiscusstest2015-fren-ref.en.sgm", + ], + "cs-en": [ + "test/newstest2015-csen-src.cs.sgm", + "test/newstest2015-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2015-deen-src.de.sgm", + "test/newstest2015-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2015-encs-src.en.sgm", + "test/newstest2015-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2015-ende-src.en.sgm", + "test/newstest2015-ende-ref.de.sgm", + ], + "en-fi": [ + "test/newstest2015-enfi-src.en.sgm", + "test/newstest2015-enfi-ref.fi.sgm", + ], + "en-ru": [ + "test/newstest2015-enru-src.en.sgm", + "test/newstest2015-enru-ref.ru.sgm", + ], + "fi-en": [ + "test/newstest2015-fien-src.fi.sgm", + "test/newstest2015-fien-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2015-ruen-src.ru.sgm", + "test/newstest2015-ruen-ref.en.sgm", + ], + }, + ), + "wmt14": FakeSGMLDataset( + "wmt14", + data=["https://statmt.org/wmt14/test-filtered.tgz"], + md5=["84c597844c1542e29c2aff23aaee4310"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2014:W14-33,\n author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale\\v{s}},\n title = {Findings of the 2014 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation},\n month = {June},\n year = {2014},\n address = {Baltimore, Maryland, USA},\n publisher = {Association for Computational Linguistics},\n pages = {12--58},\n url = {http://www.aclweb.org/anthology/W/W14/W14-3302}\n}", + langpairs={ + "cs-en": [ + "test/newstest2014-csen-src.cs.sgm", + "test/newstest2014-csen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2014-csen-src.en.sgm", + "test/newstest2014-csen-ref.cs.sgm", + ], + "de-en": [ + "test/newstest2014-deen-src.de.sgm", + "test/newstest2014-deen-ref.en.sgm", + ], + "en-de": [ + "test/newstest2014-deen-src.en.sgm", + "test/newstest2014-deen-ref.de.sgm", + ], + "en-fr": [ + "test/newstest2014-fren-src.en.sgm", + "test/newstest2014-fren-ref.fr.sgm", + ], + "fr-en": [ + "test/newstest2014-fren-src.fr.sgm", + "test/newstest2014-fren-ref.en.sgm", + ], + "en-hi": [ + "test/newstest2014-hien-src.en.sgm", + "test/newstest2014-hien-ref.hi.sgm", + ], + "hi-en": [ + "test/newstest2014-hien-src.hi.sgm", + "test/newstest2014-hien-ref.en.sgm", + ], + "en-ru": [ + "test/newstest2014-ruen-src.en.sgm", + "test/newstest2014-ruen-ref.ru.sgm", + ], + "ru-en": [ + "test/newstest2014-ruen-src.ru.sgm", + "test/newstest2014-ruen-ref.en.sgm", + ], + }, + ), + "wmt14/full": FakeSGMLDataset( + "wmt14/full", + data=["https://statmt.org/wmt14/test-full.tgz"], + md5=["a8cd784e006feb32ac6f3d9ec7eb389a"], + description="Evaluation data released after official evaluation for further research.", + langpairs={ + "cs-en": [ + "test-full/newstest2014-csen-src.cs.sgm", + "test-full/newstest2014-csen-ref.en.sgm", + ], + "en-cs": [ + "test-full/newstest2014-csen-src.en.sgm", + "test-full/newstest2014-csen-ref.cs.sgm", + ], + "de-en": [ + "test-full/newstest2014-deen-src.de.sgm", + "test-full/newstest2014-deen-ref.en.sgm", + ], + "en-de": [ + "test-full/newstest2014-deen-src.en.sgm", + "test-full/newstest2014-deen-ref.de.sgm", + ], + "en-fr": [ + "test-full/newstest2014-fren-src.en.sgm", + "test-full/newstest2014-fren-ref.fr.sgm", + ], + "fr-en": [ + "test-full/newstest2014-fren-src.fr.sgm", + "test-full/newstest2014-fren-ref.en.sgm", + ], + "en-hi": [ + "test-full/newstest2014-hien-src.en.sgm", + "test-full/newstest2014-hien-ref.hi.sgm", + ], + "hi-en": [ + "test-full/newstest2014-hien-src.hi.sgm", + "test-full/newstest2014-hien-ref.en.sgm", + ], + "en-ru": [ + "test-full/newstest2014-ruen-src.en.sgm", + "test-full/newstest2014-ruen-ref.ru.sgm", + ], + "ru-en": [ + "test-full/newstest2014-ruen-src.ru.sgm", + "test-full/newstest2014-ruen-ref.en.sgm", + ], + }, + ), + "wmt13": FakeSGMLDataset( + "wmt13", + data=["https://statmt.org/wmt13/test.tgz"], + md5=["48eca5d02f637af44e85186847141f67"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2013:WMT,\n author = {Bojar, Ond\\v{r}ej and Buck, Christian and Callison-Burch, Chris and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Monz, Christof and Post, Matt and Soricut, Radu and Specia, Lucia},\n title = {Findings of the 2013 {Workshop on Statistical Machine Translation}},\n booktitle = {Proceedings of the Eighth Workshop on Statistical Machine Translation},\n month = {August},\n year = {2013},\n address = {Sofia, Bulgaria},\n publisher = {Association for Computational Linguistics},\n pages = {1--44},\n url = {http://www.aclweb.org/anthology/W13-2201}\n}", + langpairs={ + "cs-en": ["test/newstest2013-src.cs.sgm", "test/newstest2013-src.en.sgm"], + "en-cs": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.cs.sgm"], + "de-en": ["test/newstest2013-src.de.sgm", "test/newstest2013-src.en.sgm"], + "en-de": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.de.sgm"], + "es-en": ["test/newstest2013-src.es.sgm", "test/newstest2013-src.en.sgm"], + "en-es": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.es.sgm"], + "fr-en": ["test/newstest2013-src.fr.sgm", "test/newstest2013-src.en.sgm"], + "en-fr": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.fr.sgm"], + "ru-en": ["test/newstest2013-src.ru.sgm", "test/newstest2013-src.en.sgm"], + "en-ru": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.ru.sgm"], + }, + ), + "wmt12": FakeSGMLDataset( + "wmt12", + data=["https://statmt.org/wmt12/test.tgz"], + md5=["608232d34ebc4ba2ff70fead45674e47"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2012:WMT,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Post, Matt and Soricut, Radu and Specia, Lucia},\n title = {Findings of the 2012 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Seventh Workshop on Statistical Machine Translation},\n month = {June},\n year = {2012},\n address = {Montr{'e}al, Canada},\n publisher = {Association for Computational Linguistics},\n pages = {10--51},\n url = {http://www.aclweb.org/anthology/W12-3102}\n}", + langpairs={ + "cs-en": ["test/newstest2012-src.cs.sgm", "test/newstest2012-src.en.sgm"], + "en-cs": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.cs.sgm"], + "de-en": ["test/newstest2012-src.de.sgm", "test/newstest2012-src.en.sgm"], + "en-de": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.de.sgm"], + "es-en": ["test/newstest2012-src.es.sgm", "test/newstest2012-src.en.sgm"], + "en-es": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.es.sgm"], + "fr-en": ["test/newstest2012-src.fr.sgm", "test/newstest2012-src.en.sgm"], + "en-fr": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.fr.sgm"], + }, + ), + "wmt11": FakeSGMLDataset( + "wmt11", + data=["https://statmt.org/wmt11/test.tgz"], + md5=["b0c9680adf32d394aefc2b24e3a5937e"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2011:WMT,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Zaidan, Omar},\n title = {Findings of the 2011 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Sixth Workshop on Statistical Machine Translation},\n month = {July},\n year = {2011},\n address = {Edinburgh, Scotland},\n publisher = {Association for Computational Linguistics},\n pages = {22--64},\n url = {http://www.aclweb.org/anthology/W11-2103}\n}", + langpairs={ + "cs-en": ["newstest2011-src.cs.sgm", "newstest2011-src.en.sgm"], + "en-cs": ["newstest2011-src.en.sgm", "newstest2011-src.cs.sgm"], + "de-en": ["newstest2011-src.de.sgm", "newstest2011-src.en.sgm"], + "en-de": ["newstest2011-src.en.sgm", "newstest2011-src.de.sgm"], + "fr-en": ["newstest2011-src.fr.sgm", "newstest2011-src.en.sgm"], + "en-fr": ["newstest2011-src.en.sgm", "newstest2011-src.fr.sgm"], + "es-en": ["newstest2011-src.es.sgm", "newstest2011-src.en.sgm"], + "en-es": ["newstest2011-src.en.sgm", "newstest2011-src.es.sgm"], + }, + ), + "wmt10": FakeSGMLDataset( + "wmt10", + data=["https://statmt.org/wmt10/test.tgz"], + md5=["491cb885a355da5a23ea66e7b3024d5c"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2010:WMT,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Peterson, Kay and Przybocki, Mark and Zaidan, Omar},\n title = {Findings of the 2010 Joint Workshop on Statistical Machine Translation and Metrics for Machine Translation},\n booktitle = {Proceedings of the Joint Fifth Workshop on Statistical Machine Translation and MetricsMATR},\n month = {July},\n year = {2010},\n address = {Uppsala, Sweden},\n publisher = {Association for Computational Linguistics},\n pages = {17--53},\n note = {Revised August 2010},\n url = {http://www.aclweb.org/anthology/W10-1703}\n}", + langpairs={ + "cs-en": ["test/newstest2010-src.cz.sgm", "test/newstest2010-src.en.sgm"], + "en-cs": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.cz.sgm"], + "de-en": ["test/newstest2010-src.de.sgm", "test/newstest2010-src.en.sgm"], + "en-de": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.de.sgm"], + "es-en": ["test/newstest2010-src.es.sgm", "test/newstest2010-src.en.sgm"], + "en-es": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.es.sgm"], + "fr-en": ["test/newstest2010-src.fr.sgm", "test/newstest2010-src.en.sgm"], + "en-fr": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.fr.sgm"], + }, + ), + "wmt09": FakeSGMLDataset( + "wmt09", + data=["https://statmt.org/wmt09/test.tgz"], + md5=["da227abfbd7b666ec175b742a0d27b37"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2009:WMT-09,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Schroeder, Josh},\n title = {Findings of the 2009 {W}orkshop on {S}tatistical {M}achine {T}ranslation},\n booktitle = {Proceedings of the Fourth Workshop on Statistical Machine Translation},\n month = {March},\n year = {2009},\n address = {Athens, Greece},\n publisher = {Association for Computational Linguistics},\n pages = {1--28},\n url = {http://www.aclweb.org/anthology/W/W09/W09-0401}\n}", + langpairs={ + "cs-en": ["test/newstest2009-src.cz.sgm", "test/newstest2009-src.en.sgm"], + "en-cs": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.cz.sgm"], + "de-en": ["test/newstest2009-src.de.sgm", "test/newstest2009-src.en.sgm"], + "en-de": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.de.sgm"], + "es-en": ["test/newstest2009-src.es.sgm", "test/newstest2009-src.en.sgm"], + "en-es": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.es.sgm"], + "fr-en": ["test/newstest2009-src.fr.sgm", "test/newstest2009-src.en.sgm"], + "en-fr": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.fr.sgm"], + "hu-en": ["test/newstest2009-src.hu.sgm", "test/newstest2009-src.en.sgm"], + "en-hu": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.hu.sgm"], + "it-en": ["test/newstest2009-src.it.sgm", "test/newstest2009-src.en.sgm"], + "en-it": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.it.sgm"], + }, + ), + "wmt08": FakeSGMLDataset( + "wmt08", + data=["https://statmt.org/wmt08/test.tgz"], + md5=["0582e4e894a3342044059c894e1aea3d"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2008:WMT,\n author = {Callison-Burch, Chris and Fordyce, Cameron and Koehn, Philipp and Monz, Christof and Schroeder, Josh},\n title = {Further Meta-Evaluation of Machine Translation},\n booktitle = {Proceedings of the Third Workshop on Statistical Machine Translation},\n month = {June},\n year = {2008},\n address = {Columbus, Ohio},\n publisher = {Association for Computational Linguistics},\n pages = {70--106},\n url = {http://www.aclweb.org/anthology/W/W08/W08-0309}\n}", + langpairs={ + "cs-en": ["test/newstest2008-src.cz.sgm", "test/newstest2008-src.en.sgm"], + "en-cs": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.cz.sgm"], + "de-en": ["test/newstest2008-src.de.sgm", "test/newstest2008-src.en.sgm"], + "en-de": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.de.sgm"], + "es-en": ["test/newstest2008-src.es.sgm", "test/newstest2008-src.en.sgm"], + "en-es": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.es.sgm"], + "fr-en": ["test/newstest2008-src.fr.sgm", "test/newstest2008-src.en.sgm"], + "en-fr": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.fr.sgm"], + "hu-en": ["test/newstest2008-src.hu.sgm", "test/newstest2008-src.en.sgm"], + "en-hu": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.hu.sgm"], + }, + ), + "wmt08/nc": FakeSGMLDataset( + "wmt08/nc", + data=["https://statmt.org/wmt08/test.tgz"], + md5=["0582e4e894a3342044059c894e1aea3d"], + description="Official evaluation data (news commentary).", + langpairs={ + "cs-en": ["test/nc-test2008-src.cz.sgm", "test/nc-test2008-src.en.sgm"], + "en-cs": ["test/nc-test2008-src.en.sgm", "test/nc-test2008-src.cz.sgm"], + }, + ), + "wmt08/europarl": FakeSGMLDataset( + "wmt08/europarl", + data=["https://statmt.org/wmt08/test.tgz"], + md5=["0582e4e894a3342044059c894e1aea3d"], + description="Official evaluation data (Europarl).", + langpairs={ + "de-en": ["test/test2008-src.de.sgm", "test/test2008-src.en.sgm"], + "en-de": ["test/test2008-src.en.sgm", "test/test2008-src.de.sgm"], + "es-en": ["test/test2008-src.es.sgm", "test/test2008-src.en.sgm"], + "en-es": ["test/test2008-src.en.sgm", "test/test2008-src.es.sgm"], + "fr-en": ["test/test2008-src.fr.sgm", "test/test2008-src.en.sgm"], + "en-fr": ["test/test2008-src.en.sgm", "test/test2008-src.fr.sgm"], + }, + ), + # iwslt + "iwslt17": IWSLTXMLDataset( + "iwslt17", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/ar/en-ar.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/ar/en/ar-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/ja/en-ja.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/ja/en/ja-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/ko/en-ko.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/ko/en/ko-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/zh/en/zh-en.tgz", + ], + md5=[ + "1849bcc3b006dc0642a8843b11aa7192", + "79bf7a2ef02d226875f55fb076e7e473", + "b68e7097b179491f6c466ef41ad72b9b", + "e3f5b2a075a2da1a395c8b60bf1e9be1", + "ecdc6bc4ab4c8984e919444f3c05183a", + "4b5141d14b98706c081371e2f8afe0ca", + "d957ee79de1f33c89077d37c5a2c5b06", + "c213e8bb918ebf843543fe9fd2e33db2", + "59f6a81c707378176e9ad8bb8d811f5f", + "7e580af973bb389ec1d1378a1850742f", + "975a858783a0ebec8c57d83ddd5bd381", + "cc51d9b7fe1ff2af858c6a0dd80b8815", + ], + description="Official evaluation data for IWSLT.", + citation="@InProceedings{iwslt2017,\n author = {Cettolo, Mauro and Federico, Marcello and Bentivogli, Luisa and Niehues, Jan and Stüker, Sebastian and Sudoh, Katsuitho and Yoshino, Koichiro and Federmann, Christian},\n title = {Overview of the IWSLT 2017 Evaluation Campaign},\n booktitle = {14th International Workshop on Spoken Language Translation},\n month = {December},\n year = {2017},\n address = {Tokyo, Japan},\n pages = {2--14},\n url = {http://workshop2017.iwslt.org/downloads/iwslt2017_proceeding_v2.pdf}\n}", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2017.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2017.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2017.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2017.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2017.en-de.en.xml", + "de-en/IWSLT17.TED.tst2017.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2017.de-en.de.xml", + "en-de/IWSLT17.TED.tst2017.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2017.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2017.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2017.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2017.en-zh.en.xml", + ], + "en-ar": [ + "en-ar/IWSLT17.TED.tst2017.en-ar.en.xml", + "ar-en/IWSLT17.TED.tst2017.ar-en.ar.xml", + ], + "ar-en": [ + "ar-en/IWSLT17.TED.tst2017.ar-en.ar.xml", + "en-ar/IWSLT17.TED.tst2017.en-ar.en.xml", + ], + "en-ja": [ + "en-ja/IWSLT17.TED.tst2017.en-ja.en.xml", + "ja-en/IWSLT17.TED.tst2017.ja-en.ja.xml", + ], + "ja-en": [ + "ja-en/IWSLT17.TED.tst2017.ja-en.ja.xml", + "en-ja/IWSLT17.TED.tst2017.en-ja.en.xml", + ], + "en-ko": [ + "en-ko/IWSLT17.TED.tst2017.en-ko.en.xml", + "ko-en/IWSLT17.TED.tst2017.ko-en.ko.xml", + ], + "ko-en": [ + "ko-en/IWSLT17.TED.tst2017.ko-en.ko.xml", + "en-ko/IWSLT17.TED.tst2017.en-ko.en.xml", + ], + }, + ), + "iwslt17/tst2016": IWSLTXMLDataset( + "iwslt17/tst2016", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/zh/en/zh-en.tgz", + ], + md5=[ + "1849bcc3b006dc0642a8843b11aa7192", + "79bf7a2ef02d226875f55fb076e7e473", + "b68e7097b179491f6c466ef41ad72b9b", + "e3f5b2a075a2da1a395c8b60bf1e9be1", + "975a858783a0ebec8c57d83ddd5bd381", + "cc51d9b7fe1ff2af858c6a0dd80b8815", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2016.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2016.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2016.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2016.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2016.en-de.en.xml", + "de-en/IWSLT17.TED.tst2016.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2016.de-en.de.xml", + "en-de/IWSLT17.TED.tst2016.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2016.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2016.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2016.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2016.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2015": IWSLTXMLDataset( + "iwslt17/tst2015", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2015.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2015.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2015.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2015.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2015.en-de.en.xml", + "de-en/IWSLT17.TED.tst2015.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2015.de-en.de.xml", + "en-de/IWSLT17.TED.tst2015.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2015.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2015.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2015.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2015.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2014": IWSLTXMLDataset( + "iwslt17/tst2014", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2014.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2014.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2014.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2014.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2014.en-de.en.xml", + "de-en/IWSLT17.TED.tst2014.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2014.de-en.de.xml", + "en-de/IWSLT17.TED.tst2014.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2014.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2014.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2014.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2014.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2013": IWSLTXMLDataset( + "iwslt17/tst2013", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2013.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2013.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2013.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2013.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2013.en-de.en.xml", + "de-en/IWSLT17.TED.tst2013.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2013.de-en.de.xml", + "en-de/IWSLT17.TED.tst2013.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2013.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2013.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2013.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2013.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2012": IWSLTXMLDataset( + "iwslt17/tst2012", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2012.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2012.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2012.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2012.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2012.en-de.en.xml", + "de-en/IWSLT17.TED.tst2012.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2012.de-en.de.xml", + "en-de/IWSLT17.TED.tst2012.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2012.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2012.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2012.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2012.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2011": IWSLTXMLDataset( + "iwslt17/tst2011", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2011.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2011.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2011.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2011.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2011.en-de.en.xml", + "de-en/IWSLT17.TED.tst2011.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2011.de-en.de.xml", + "en-de/IWSLT17.TED.tst2011.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2011.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2011.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2011.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2011.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2010": IWSLTXMLDataset( + "iwslt17/tst2010", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2010.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2010.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2010.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2010.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2010.en-de.en.xml", + "de-en/IWSLT17.TED.tst2010.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2010.de-en.de.xml", + "en-de/IWSLT17.TED.tst2010.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2010.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2010.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2010.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2010.en-zh.en.xml", + ], + }, + ), + "iwslt17/dev2010": IWSLTXMLDataset( + "iwslt17/dev2010", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.dev2010.en-fr.en.xml", + "fr-en/IWSLT17.TED.dev2010.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.dev2010.fr-en.fr.xml", + "en-fr/IWSLT17.TED.dev2010.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.dev2010.en-de.en.xml", + "de-en/IWSLT17.TED.dev2010.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.dev2010.de-en.de.xml", + "en-de/IWSLT17.TED.dev2010.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.dev2010.en-zh.en.xml", + "zh-en/IWSLT17.TED.dev2010.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.dev2010.zh-en.zh.xml", + "en-zh/IWSLT17.TED.dev2010.en-zh.en.xml", + ], + }, + ), + # mtedx + "mtedx/valid": PlainTextDataset( + "mtedx/valid", + data=[ + "https://raw.githubusercontent.com/esalesky/mtedx-eval/main/valid.tar.gz" + ], + description="mTEDx evaluation data, valid: http://openslr.org/100", + citation="@misc{salesky2021multilingual,\n title={The Multilingual TEDx Corpus for Speech Recognition and Translation}, \n author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post},\n year={2021},\n eprint={2102.01757},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", + md5=["40618171614c50e6cbb5e5bbceee0635"], + langpairs={ + "el-en": ["valid/mtedx-valid-elen.el", "valid/mtedx-valid-elen.en"], + "es-en": ["valid/mtedx-valid-esen.es", "valid/mtedx-valid-esen.en"], + "es-fr": ["valid/mtedx-valid-esfr.es", "valid/mtedx-valid-esfr.fr"], + "es-it": ["valid/mtedx-valid-esit.es", "valid/mtedx-valid-esit.it"], + "es-pt": ["valid/mtedx-valid-espt.es", "valid/mtedx-valid-espt.pt"], + "fr-en": ["valid/mtedx-valid-fren.fr", "valid/mtedx-valid-fren.en"], + "fr-es": ["valid/mtedx-valid-fres.fr", "valid/mtedx-valid-fres.es"], + "fr-pt": ["valid/mtedx-valid-frpt.fr", "valid/mtedx-valid-frpt.pt"], + "it-en": ["valid/mtedx-valid-iten.it", "valid/mtedx-valid-iten.en"], + "it-es": ["valid/mtedx-valid-ites.it", "valid/mtedx-valid-ites.es"], + "pt-en": ["valid/mtedx-valid-pten.pt", "valid/mtedx-valid-pten.en"], + "pt-es": ["valid/mtedx-valid-ptes.pt", "valid/mtedx-valid-ptes.es"], + "ru-en": ["valid/mtedx-valid-ruen.ru", "valid/mtedx-valid-ruen.en"], + }, + ), + "mtedx/test": PlainTextDataset( + "mtedx/test", + data=["https://raw.githubusercontent.com/esalesky/mtedx-eval/main/test.tar.gz"], + description="mTEDx evaluation data, test: http://openslr.org/100", + citation="@misc{salesky2021multilingual,\n title={The Multilingual TEDx Corpus for Speech Recognition and Translation}, \n author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post},\n year={2021},\n eprint={2102.01757},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", + md5=["fa4cb1548c210ec424d7d6bc9a3675a7"], + langpairs={ + "el-en": ["test/mtedx-test-elen.el", "test/mtedx-test-elen.en"], + "es-en": ["test/mtedx-test-esen.es", "test/mtedx-test-esen.en"], + "es-fr": ["test/mtedx-test-esfr.es", "test/mtedx-test-esfr.fr"], + "es-it": ["test/mtedx-test-esit.es", "test/mtedx-test-esit.it"], + "es-pt": ["test/mtedx-test-espt.es", "test/mtedx-test-espt.pt"], + "fr-en": ["test/mtedx-test-fren.fr", "test/mtedx-test-fren.en"], + "fr-es": ["test/mtedx-test-fres.fr", "test/mtedx-test-fres.es"], + "fr-pt": ["test/mtedx-test-frpt.fr", "test/mtedx-test-frpt.pt"], + "it-en": ["test/mtedx-test-iten.it", "test/mtedx-test-iten.en"], + "it-es": ["test/mtedx-test-ites.it", "test/mtedx-test-ites.es"], + "pt-en": ["test/mtedx-test-pten.pt", "test/mtedx-test-pten.en"], + "pt-es": ["test/mtedx-test-ptes.pt", "test/mtedx-test-ptes.es"], + "ru-en": ["test/mtedx-test-ruen.ru", "test/mtedx-test-ruen.en"], + }, + ), + # multi30k + "multi30k/2016": PlainTextDataset( + "multi30k/2016", + data=[ + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/multi30k_test_sets_d3ec2a38.tar.gz" + ], + md5=["9cf8f22d57fee2ca2af3c682dfdc525b"], + description="2016 flickr test set of Multi30k dataset", + citation='@InProceedings{elliott-etal-2016-multi30k,\n title = "{M}ulti30{K}: Multilingual {E}nglish-{G}erman Image Descriptions",\n author = "Elliott, Desmond and Frank, Stella and Sima{\'}an, Khalil and Specia, Lucia",\n booktitle = "Proceedings of the 5th Workshop on Vision and Language",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W16-3210",\n doi = "10.18653/v1/W16-3210",\n pages = "70--74",\n}', + langpairs={ + "en-fr": ["test_2016_flickr.en", "test_2016_flickr.fr"], + "en-de": ["test_2016_flickr.en", "test_2016_flickr.de"], + "en-cs": ["test_2016_flickr.en", "test_2016_flickr.cs"], + }, + ), + "multi30k/2017": PlainTextDataset( + "multi30k/2017", + data=[ + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/multi30k_test_sets_d3ec2a38.tar.gz" + ], + md5=["9cf8f22d57fee2ca2af3c682dfdc525b"], + description="2017 flickr test set of Multi30k dataset", + citation='@InProceedings{elliott-etal-2016-multi30k,\n title = "{M}ulti30{K}: Multilingual {E}nglish-{G}erman Image Descriptions",\n author = "Elliott, Desmond and Frank, Stella and Sima{\'}an, Khalil and Specia, Lucia",\n booktitle = "Proceedings of the 5th Workshop on Vision and Language",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W16-3210",\n doi = "10.18653/v1/W16-3210",\n pages = "70--74",\n}\n\n@InProceedings{elliott-etal-2017-findings,\n title = "Findings of the Second Shared Task on Multimodal Machine Translation and Multilingual Image Description",\n author = {Elliott, Desmond and Frank, Stella and Barrault, Lo{\\"\\i}c and Bougares, Fethi and Specia, Lucia},\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W17-4718",\n doi = "10.18653/v1/W17-4718",\n pages = "215--233",\n}\n', + langpairs={ + "en-fr": ["test_2017_flickr.en", "test_2017_flickr.fr"], + "en-de": ["test_2017_flickr.en", "test_2017_flickr.de"], + }, + ), + "multi30k/2018": PlainTextDataset( + "multi30k/2018", + data=[ + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/multi30k_test_sets_d3ec2a38.tar.gz", + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.cs.gz", + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.de.gz", + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.fr.gz", + ], + md5=[ + "9cf8f22d57fee2ca2af3c682dfdc525b", + "4c6b6490e58107b2e397c5e3e1690abc", + "87e00327083dd69feaa029a8f7c1a047", + "a64563e986438ed731a6713027c36bfd", + ], + description="2018 flickr test set of Multi30k dataset. See https://competitions.codalab.org/competitions/19917 for evaluation.", + citation='@InProceedings{elliott-etal-2016-multi30k,\n title = "{M}ulti30{K}: Multilingual {E}nglish-{G}erman Image Descriptions",\n author = "Elliott, Desmond and Frank, Stella and Sima{\'}an, Khalil and Specia, Lucia",\n booktitle = "Proceedings of the 5th Workshop on Vision and Language",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W16-3210",\n doi = "10.18653/v1/W16-3210",\n pages = "70--74",\n}\n\n@InProceedings{barrault-etal-2018-findings,\n title = "Findings of the Third Shared Task on Multimodal Machine Translation",\n author = {Barrault, Lo{\\"\\i}c and Bougares, Fethi and Specia, Lucia and Lala, Chiraag and Elliott, Desmond and Frank, Stella},\n booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6402",\n doi = "10.18653/v1/W18-6402",\n pages = "304--323",\n}\n', + langpairs={ + "en-fr": ["test_2018_flickr.en", "multi30k_2018.test_2018_flickr.fr.gz"], + "en-de": ["test_2018_flickr.en", "multi30k_2018.test_2018_flickr.de.gz"], + "en-cs": ["test_2018_flickr.en", "multi30k_2018.test_2018_flickr.cs.gz"], + }, + ), + # mtnt + "mtnt2019": TSVDataset( + "mtnt2019", + data=["https://pmichel31415.github.io/hosting/MTNT2019.tar.gz"], + description="Test set for the WMT 19 robustness shared task", + md5=["78a672e1931f106a8549023c0e8af8f6"], + langpairs={ + "en-fr": ["2:MTNT2019/en-fr.final.tsv", "3:MTNT2019/en-fr.final.tsv"], + "fr-en": ["2:MTNT2019/fr-en.final.tsv", "3:MTNT2019/fr-en.final.tsv"], + "en-ja": ["2:MTNT2019/en-ja.final.tsv", "3:MTNT2019/en-ja.final.tsv"], + "ja-en": ["2:MTNT2019/ja-en.final.tsv", "3:MTNT2019/ja-en.final.tsv"], + }, + ), + "mtnt1.1/test": TSVDataset( + "mtnt1.1/test", + data=[ + "https://github.com/pmichel31415/mtnt/releases/download/v1.1/MTNT.1.1.tar.gz" + ], + description="Test data for the Machine Translation of Noisy Text task: http://www.cs.cmu.edu/~pmichel1/mtnt/", + citation='@InProceedings{michel2018a:mtnt,\n author = "Michel, Paul and Neubig, Graham",\n title = "MTNT: A Testbed for Machine Translation of Noisy Text",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n pages = "543--553",\n location = "Brussels, Belgium",\n url = "http://aclweb.org/anthology/D18-1050"\n}', + md5=["8ce1831ac584979ba8cdcd9d4be43e1d"], + langpairs={ + "en-fr": ["1:MTNT/test/test.en-fr.tsv", "2:MTNT/test/test.en-fr.tsv"], + "fr-en": ["1:MTNT/test/test.fr-en.tsv", "2:MTNT/test/test.fr-en.tsv"], + "en-ja": ["1:MTNT/test/test.en-ja.tsv", "2:MTNT/test/test.en-ja.tsv"], + "ja-en": ["1:MTNT/test/test.ja-en.tsv", "2:MTNT/test/test.ja-en.tsv"], + }, + ), + "mtnt1.1/valid": TSVDataset( + "mtnt1.1/valid", + data=[ + "https://github.com/pmichel31415/mtnt/releases/download/v1.1/MTNT.1.1.tar.gz" + ], + description="Validation data for the Machine Translation of Noisy Text task: http://www.cs.cmu.edu/~pmichel1/mtnt/", + citation='@InProceedings{michel2018a:mtnt,\n author = "Michel, Paul and Neubig, Graham",\n title = "MTNT: A Testbed for Machine Translation of Noisy Text",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n pages = "543--553",\n location = "Brussels, Belgium",\n url = "http://aclweb.org/anthology/D18-1050"\n}', + md5=["8ce1831ac584979ba8cdcd9d4be43e1d"], + langpairs={ + "en-fr": ["1:MTNT/valid/valid.en-fr.tsv", "2:MTNT/valid/valid.en-fr.tsv"], + "fr-en": ["1:MTNT/valid/valid.fr-en.tsv", "2:MTNT/valid/valid.fr-en.tsv"], + "en-ja": ["1:MTNT/valid/valid.en-ja.tsv", "2:MTNT/valid/valid.en-ja.tsv"], + "ja-en": ["1:MTNT/valid/valid.ja-en.tsv", "2:MTNT/valid/valid.ja-en.tsv"], + }, + ), + "mtnt1.1/train": TSVDataset( + "mtnt1.1/train", + data=[ + "https://github.com/pmichel31415/mtnt/releases/download/v1.1/MTNT.1.1.tar.gz" + ], + description="Validation data for the Machine Translation of Noisy Text task: http://www.cs.cmu.edu/~pmichel1/mtnt/", + citation='@InProceedings{michel2018a:mtnt,\n author = "Michel, Paul and Neubig, Graham",\n title = "MTNT: A Testbed for Machine Translation of Noisy Text",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n pages = "543--553",\n location = "Brussels, Belgium",\n url = "http://aclweb.org/anthology/D18-1050"\n}', + md5=["8ce1831ac584979ba8cdcd9d4be43e1d"], + langpairs={ + "en-fr": ["1:MTNT/train/train.en-fr.tsv", "2:MTNT/train/train.en-fr.tsv"], + "fr-en": ["1:MTNT/train/train.fr-en.tsv", "2:MTNT/train/train.fr-en.tsv"], + "en-ja": ["1:MTNT/train/train.en-ja.tsv", "2:MTNT/train/train.en-ja.tsv"], + "ja-en": ["1:MTNT/train/train.ja-en.tsv", "2:MTNT/train/train.ja-en.tsv"], + }, + ), +} diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b13d59a4f896719cc73cee5343872e28f92af63 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py @@ -0,0 +1,45 @@ +import sys + +from . import DATASETS + +try: + cmd = sys.argv[1] +except IndexError: + print(f"Usage: {sys.argv[0]} --check | --dump") + sys.exit(1) + +if cmd == "--check": + import hashlib + import urllib.request + + url_md5 = {} + + for item in DATASETS.values(): + if item.md5 is not None: + assert item.data + assert item.md5 + assert len(item.data) == len(item.md5) + pairs = zip(item.data, item.md5) + for url, md5_hash in pairs: + url_md5[url] = md5_hash + + for url, md5_hash in url_md5.items(): + try: + print("Downloading ", url) + with urllib.request.urlopen(url) as f: + data = f.read() + except Exception as exc: + raise (exc) + + if hashlib.md5(data).hexdigest() != md5_hash: + print("MD5 check failed for", url) +elif cmd == "--dump": + import re + + # Dumps a table in markdown format + print(f'| {"Dataset":<30} | {"Description":<115} |') + header = "| " + "-" * 30 + " | " + "-" * 115 + " |" + print(header) + for name, item in DATASETS.items(): + desc = re.sub(r"(http[s]?:\/\/\S+)", r"[URL](\1)", str(item.description)) + print(f"| {name:<30} | {desc:<115} |") diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e634c2fa2719b28d74b09436033d21b534b22fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf1cdb99796c336426c349856a8e62cb8e8d95ae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fc4e31f3d87a8149531efa9a1deeec6a34721db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d383c3e8259d7d95d19cfcaec1c5a0eb6126c95 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ee44c05b1f12053d6153f5a3b0629ffb66260a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acfed5fbacca04371b09793096e1a6a001bb47bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2bc4ad1c30d766e5459ecec2100aa9c0639e129 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b617a55a7c7962a9b1eb342ed8640f8341800fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/base.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/base.py new file mode 100644 index 0000000000000000000000000000000000000000..cf3c092fae7ae206d2606680f0313dae65d7bccb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/base.py @@ -0,0 +1,195 @@ +""" +The base class for all types of datasets. +""" +import os +import re +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Optional + +from ..utils import SACREBLEU_DIR, download_file, smart_open + + +class Dataset(metaclass=ABCMeta): + def __init__( + self, + name: str, + data: Optional[List[str]] = None, + description: Optional[str] = None, + citation: Optional[str] = None, + md5: Optional[List[str]] = None, + langpairs=Dict[str, List[str]], + **kwargs, + ): + """ + Params come from the values in DATASETS. + + :param name: Name of the dataset. + :param data: URL of the raw data of the dataset. + :param description: Description of the dataset. + :param citation: Citation for the dataset. + :param md5: MD5 checksum of the dataset. + :param langpairs: List of available language pairs. + """ + self.name = name + self.data = data + self.description = description + self.citation = citation + self.md5 = md5 + self.langpairs = langpairs + self.kwargs = kwargs + + # Don't do any downloading or further processing now. + # Only do that lazily, when asked. + + # where to store the dataset + self._outdir = os.path.join(SACREBLEU_DIR, self.name) + self._rawdir = os.path.join(self._outdir, "raw") + + def maybe_download(self): + """ + If the dataset isn't downloaded, use utils/download_file() + This can be implemented here in the base class. It should write + to ~/.sacreleu/DATASET/raw exactly as it does now. + """ + os.makedirs(self._rawdir, exist_ok=True) + + expected_checksums = self.md5 if self.md5 else [None] * len(self.data) + + for url, expected_md5 in zip(self.data, expected_checksums): + tarball = os.path.join(self._rawdir, self._get_tarball_filename(url)) + + download_file( + url, tarball, extract_to=self._rawdir, expected_md5=expected_md5 + ) + + @staticmethod + def _clean(s): + """ + Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one. + + :param s: The string. + :return: A cleaned-up string. + """ + return re.sub(r"\s+", " ", s.strip()) + + def _get_tarball_filename(self, url): + """ + Produces a local filename for tarball. + :param url: The url to download. + :return: A name produced from the dataset identifier and the URL basename. + """ + return self.name.replace("/", "_") + "." + os.path.basename(url) + + def _get_txt_file_path(self, langpair, fieldname): + """ + Given the language pair and fieldname, return the path to the text file. + The format is: ~/.sacrebleu/DATASET/DATASET.LANGPAIR.FIELDNAME + + :param langpair: The language pair. + :param fieldname: The fieldname. + :return: The path to the text file. + """ + # handle the special case of subsets. e.g. "wmt21/dev" > "wmt21_dev" + name = self.name.replace("/", "_") + # Colons are used to distinguish multiple references, but are not supported in Windows filenames + fieldname = fieldname.replace(":", "-") + return os.path.join(self._outdir, f"{name}.{langpair}.{fieldname}") + + def _get_langpair_metadata(self, langpair): + """ + Given a language pair, return the metadata for that language pair. + Deal with errors if the language pair is not available. + + :param langpair: The language pair. e.g. "en-de" + :return: Dict format which is same as self.langpairs. + """ + if langpair is None: + langpairs = self.langpairs + elif langpair not in self.langpairs: + raise Exception(f"No such language pair {self.name}/{langpair}") + else: + langpairs = {langpair: self.langpairs[langpair]} + + return langpairs + + @abstractmethod + def process_to_text(self, langpair=None) -> None: + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + pass + + def fieldnames(self, langpair) -> List[str]: + """ + Return a list of all the field names. For most source, this is just + the source and the reference. For others, it might include the document + ID for each line, or the original language (origLang). + + get_files() should return the same number of items as this. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of field names + """ + return ["src", "ref"] + + def __iter__(self, langpair): + """ + Iterates over all fields (source, references, and other metadata) defined + by the dataset. + """ + all_files = self.get_files(langpair) + all_fins = [smart_open(f) for f in all_files] + + for item in zip(*all_fins): + yield item + + def source(self, langpair): + """ + Return an iterable over the source lines. + """ + source_file = self.get_source_file(langpair) + with smart_open(source_file) as fin: + for line in fin: + yield line.strip() + + def references(self, langpair): + """ + Return an iterable over the references. + """ + ref_files = self.get_reference_files(langpair) + ref_fins = [smart_open(f) for f in ref_files] + + for item in zip(*ref_fins): + yield item + + def get_source_file(self, langpair): + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + index = all_fields.index("src") + return all_files[index] + + def get_reference_files(self, langpair): + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + ref_files = [ + f for f, field in zip(all_files, all_fields) if field.startswith("ref") + ] + return ref_files + + def get_files(self, langpair): + """ + Returns the path of the source file and all reference files for + the provided test set / language pair. + Downloads the references first if they are not already local. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of the source file and all reference files + """ + fields = self.fieldnames(langpair) + files = [self._get_txt_file_path(langpair, field) for field in fields] + + for file in files: + if not os.path.exists(file): + self.process_to_text(langpair) + return files diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py new file mode 100644 index 0000000000000000000000000000000000000000..d1f638123e8742bb22e2f671c4af5bd0e556f685 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py @@ -0,0 +1,116 @@ +import os +import re + +from ..utils import smart_open +from .base import Dataset + + +class FakeSGMLDataset(Dataset): + """ + The fake SGML format used by WMT prior to 2021. Can't be properly parsed. + Source and reference(s) in separate files. + """ + + def _convert_format(self, input_file_path, output_filep_path): + """ + Extract data from raw file and convert to raw txt format. + """ + with smart_open(input_file_path) as fin, smart_open( + output_filep_path, "wt" + ) as fout: + for line in fin: + if line.startswith("(.*).*?", "\\1", line)) + print(line, file=fout) + + def _convert_meta(self, input_file_path, field, output_filep_path): + """ + Extract metadata from document tags, projects across segments. + """ + with smart_open(input_file_path) as fin, smart_open( + output_filep_path, "wt" + ) as fout: + value = "" + for line in fin: + if line.startswith("= 2 + ), f"Each language pair in {self.name} must have at least 2 fields." + + fields = ["src"] + + if length == 2: + fields.append("ref") + else: + for i, _ in enumerate(meta[langpair][1:]): + fields.append(f"ref:{i}") + + if not self.name.startswith("wmt08"): + fields += ["docid", "genre", "origlang"] + + return fields + + +class WMTAdditionDataset(FakeSGMLDataset): + """ + Handle special case of WMT Google addition dataset. + """ + + def _convert_format(self, input_file_path, output_filep_path): + if input_file_path.endswith(".sgm"): + return super()._convert_format(input_file_path, output_filep_path) + else: + with smart_open(input_file_path) as fin: + with smart_open(output_filep_path, "wt") as fout: + for line in fin: + print(line.rstrip(), file=fout) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..4381271d0ed69338e975f50eb0cf10977a9df96b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py @@ -0,0 +1,8 @@ +from .fake_sgml import FakeSGMLDataset + + +class IWSLTXMLDataset(FakeSGMLDataset): + """IWSLT dataset format. Can be parsed with the lxml parser.""" + + # Same as FakeSGMLDataset. Nothing to do here. + pass diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py new file mode 100644 index 0000000000000000000000000000000000000000..7f7a93db5339cdfa563fff390d4fb246da8350b4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py @@ -0,0 +1,36 @@ +import os + +from ..utils import smart_open +from .base import Dataset + + +class PlainTextDataset(Dataset): + """ + The plain text format. Data is separated into source and reference files. + Each line of the two files is aligned. + """ + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + langpairs = self._get_langpair_metadata(langpair) + + for langpair in langpairs: + fieldnames = self.fieldnames(langpair) + origin_files = [ + os.path.join(self._rawdir, path) for path in langpairs[langpair] + ] + + for field, origin_file in zip(fieldnames, origin_files): + + origin_file = os.path.join(self._rawdir, origin_file) + output_file = self._get_txt_file_path(langpair, field) + + with smart_open(origin_file) as fin: + with smart_open(output_file, "wt") as fout: + for line in fin: + print(line.rstrip(), file=fout) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py new file mode 100644 index 0000000000000000000000000000000000000000..011bf20dd7e894a705d0327c44ceeb4faea5cc6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py @@ -0,0 +1,61 @@ +import os + +from ..utils import smart_open +from .base import Dataset + + +class TSVDataset(Dataset): + """ + The format used by the MTNT datasets. Data is in a single TSV file. + """ + + @staticmethod + def _split_index_and_filename(meta, field): + """ + Splits the index and filename from a metadata string. + + e.g. meta="3:en-de.tsv", filed=[Any value] -> (3, "en-de.tsv") + "en-de.tsv", filed="src" -> (1, "en-de.tsv") + "en-de.tsv", filed="tgt" -> (2, "en-de.tsv") + """ + arr = meta.split(":") + if len(arr) == 2: + try: + index = int(arr[0]) + except ValueError: + raise Exception(f"Invalid meta for TSVDataset: {meta}") + return index, arr[1] + + else: + index = 0 if field == "src" else 1 + return index, meta + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + langpairs = self._get_langpair_metadata(langpair) + + for langpair in langpairs: + fieldnames = self.fieldnames(langpair) + origin_files = [ + os.path.join(self._rawdir, path) for path in langpairs[langpair] + ] + + for field, origin_file, meta in zip( + fieldnames, origin_files, langpairs[langpair] + ): + index, origin_file = self._split_index_and_filename(meta, field) + + origin_file = os.path.join(self._rawdir, origin_file) + output_file = self._get_txt_file_path(langpair, field) + + with smart_open(origin_file) as fin: + with smart_open(output_file, "wt") as fout: + for line in fin: + # be careful with empty source or reference lines + # MTNT2019/ja-en.final.tsv:632 `'1033\t718\t\t\n'` + print(line.rstrip("\n").split("\t")[index], file=fout) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a18c227748fd59cb1848539ea88b045b75e5dc64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py @@ -0,0 +1,11 @@ +"""The implementation of various metrics.""" + +from .bleu import BLEU, BLEUScore # noqa: F401 +from .chrf import CHRF, CHRFScore # noqa: F401 +from .ter import TER, TERScore # noqa: F401 + +METRICS = { + 'BLEU': BLEU, + 'CHRF': CHRF, + 'TER': TER, +} diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..125106414c866fcbbbd866f75512b24790c0ec2f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a28b8cdc76976a881dcca1282cff8d1d56641dd6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb3463b59d857fcc0e388cc6b26589920cb3d898 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a09028f99ef344ef6a4fa7846492d155944ad4f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/base.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/base.py new file mode 100644 index 0000000000000000000000000000000000000000..93fb10815a1a8b08c69bad19d2cbed58e251afc7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/base.py @@ -0,0 +1,438 @@ +"""The base `Score`, `Metric` and `Signature` classes to derive from. + +`Metric` is an abstract class that enforces the implementation of a set +of abstract methods. This way, a correctly implemented metric will work +seamlessly with the rest of the codebase. +""" + +import json +import logging +import statistics +from typing import List, Sequence, Any, Optional, Dict +from abc import ABCMeta, abstractmethod + +from .. import __version__ + +sacrelogger = logging.getLogger('sacrebleu') + + +class Score: + """A base score class to derive from. + + :param name: The name of the underlying metric. + :param score: A floating point number for the final metric. + """ + def __init__(self, name: str, score: float): + """`Score` initializer.""" + self.name = name + self.score = score + + # Statistical test related fields + self._mean = -1.0 + self._ci = -1.0 + + # More info can be added right after the score + self._verbose = '' + + def format(self, width: int = 2, score_only: bool = False, + signature: str = '', is_json: bool = False) -> str: + """Returns a pretty representation of the score. + :param width: Floating point decimal precision width. + :param score_only: If `True`, and the format is not `json`, + returns a single score string. + :param signature: A string representation of the given `Signature` + instance. + :param is_json: If `True`, will output the score in JSON string. + :return: A plain or JSON-formatted string representation. + """ + d = { + 'name': self.name, + 'score': float(f'{self.score:.{width}f}'), + 'signature': signature, + } + + sc = f'{self.score:.{width}f}' + + if self._mean > 0: + confidence_mean = f'{self._mean:.{width}f}' + confidence_var = f'{self._ci:.{width}f}' + confidence_str = f'μ = {confidence_mean} ± {confidence_var}' + + sc += f' ({confidence_str})' + if is_json: + d['confidence_mean'] = float(confidence_mean) + d['confidence_var'] = float(confidence_var) + d['confidence'] = confidence_str + + # Construct full score line + full_score = f"{self.name}|{signature}" if signature else self.name + full_score = f"{full_score} = {sc}" + if self._verbose: + full_score += f' {self._verbose}' + d['verbose_score'] = self._verbose + + if score_only: + return sc + + if is_json: + for param in signature.split('|'): + key, value = param.split(':') + d[key] = value + return json.dumps(d, indent=1, ensure_ascii=False) + + return full_score + + def estimate_ci(self, scores: List['Score']): + """Takes a list of scores and stores mean, stdev and 95% confidence + interval around the mean. + + :param scores: A list of `Score` objects obtained from bootstrap + resampling for example. + """ + # Sort the scores + raw_scores = sorted([x.score for x in scores]) + n = len(raw_scores) + + # Get CI bounds (95%, i.e. 1/40 from left) + lower_idx = n // 40 + upper_idx = n - lower_idx - 1 + lower, upper = raw_scores[lower_idx], raw_scores[upper_idx] + self._ci = 0.5 * (upper - lower) + self._mean = statistics.mean(raw_scores) + + def __repr__(self): + """Returns a human readable score string.""" + return self.format() + + +class Signature: + """A convenience class to represent sacreBLEU reproducibility signatures. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`Signature` initializer.""" + # Global items that are shared across all metrics + self._abbr = { + 'version': 'v', + 'nrefs': '#', + 'test': 't', + 'lang': 'l', + 'subset': 'S', + 'origlang': 'o', + 'bs': 'bs', # Bootstrap resampling trials + 'ar': 'ar', # Approximate randomization trials + 'seed': 'rs', # RNG's seed + } + + if 'num_refs' not in args: + raise ValueError( + 'Number of references unknown, please evaluate the metric first.') + + num_refs = args['num_refs'] + if num_refs == -1: + # Detect variable number of refs + num_refs = 'var' + + # Global items that are shared across all metrics + # None's will be ignored + self.info = { + 'version': __version__, + 'nrefs': num_refs, + 'bs': args.get('n_bootstrap', None), + 'ar': None, + 'seed': args.get('seed', None), + 'test': args.get('test_set', None), + 'lang': args.get('langpair', None), + 'origlang': args.get('origlang', None), + 'subset': args.get('subset', None), + } + + def format(self, short: bool = False) -> str: + """Returns a string representation of the signature. + + :param short: If True, shortened signature is produced. + :return: A string representation of the signature. + """ + pairs = [] + keys = list(self.info.keys()) + # keep version always at end + keys.remove('version') + for name in keys + ['version']: + value = self.info[name] + if value is not None: + if isinstance(value, bool): + # Replace True/False with yes/no + value = 'yes' if value else 'no' + final_name = self._abbr[name] if short else name + pairs.append(f'{final_name}:{value}') + + return '|'.join(pairs) + + def update(self, key: str, value: Any): + """Add a new item or update an existing one. + + :param key: The key to use in the dictionary. + :param value: The associated value for the `key`. + """ + self.info[key] = value + + def __str__(self): + """Returns a human-readable signature string.""" + return self.format() + + def __repr__(self): + """Returns a human-readable signature string.""" + return self.format() + + +class Metric(metaclass=ABCMeta): + """A base class for all metrics that ensures the implementation of some + methods. Much of the common functionality is moved to this base class + from other metrics.""" + + # Each metric should define its Signature class' name here + _SIGNATURE_TYPE = Signature + + def __init__(self): + """`Metric` initializer.""" + # The pre-computed reference cache + self._ref_cache = None + + # only useful for BLEU tokenized warnings. Set to True so that + # warnings are not issued for other metrics. + self._force = True + + # Will be used by the signature when bootstrap resampling + self.n_bootstrap = None + self.seed = None + + def _check_sentence_score_args(self, hyp: str, refs: Sequence[str]): + """Performs sanity checks on `sentence_score` method's arguments. + + :param hyp: A single hypothesis string. + :param refs: A sequence of reference strings. + """ + prefix = self.__class__.__name__ + err_msg = None + + if not isinstance(hyp, str): + err_msg = 'The argument `hyp` should be a string.' + elif isinstance(refs, str) or not isinstance(refs, Sequence): + err_msg = 'The argument `refs` should be a sequence of strings.' + elif not isinstance(refs[0], str) and refs[0] is not None: + err_msg = 'Each element of `refs` should be a string.' + + if err_msg: + raise TypeError(f'{prefix}: {err_msg}') + + def _check_corpus_score_args(self, hyps: Sequence[str], + refs: Optional[Sequence[Sequence[str]]]): + """Performs sanity checks on `corpus_score` method's arguments. + + :param hypses: A sequence of hypothesis strings. + :param refs: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + """ + + prefix = self.__class__.__name__ + err_msg = None + + if not isinstance(hyps, Sequence): + err_msg = "`hyps` should be a sequence of strings." + elif not isinstance(hyps[0], str): + err_msg = 'Each element of `hyps` should be a string.' + elif any(line is None for line in hyps): + err_msg = "Undefined line in hypotheses stream!" + + if refs is not None: + if not isinstance(refs, Sequence): + err_msg = "`refs` should be a sequence of sequence of strings." + elif not isinstance(refs[0], Sequence): + err_msg = "Each element of `refs` should be a sequence of strings." + elif not isinstance(refs[0][0], str) and refs[0][0] is not None: + err_msg = "`refs` should be a sequence of sequence of strings." + + if err_msg: + raise TypeError(f'{prefix}: {err_msg}') + + @abstractmethod + def _aggregate_and_compute(self, stats: List[List[Any]]) -> Any: + """Computes the final score given the pre-computed match statistics. + + :param stats: A list of segment-level statistics. + :return: A `Score` instance. + """ + pass + + @abstractmethod + def _compute_score_from_stats(self, stats: List[Any]) -> Any: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `Score` object. + """ + pass + + @abstractmethod + def _preprocess_segment(self, sent: str) -> str: + """A wrapper around the metric's tokenization and pre-processing logic. + This should be implemented for reference caching to work correctly. + + :param sent: The input sentence. + :return: The pre-processed output sentence. + """ + pass + + @abstractmethod + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, extract the required + information (such as n-grams for BLEU and chrF). This should be implemented + for the generic `_cache_references()` to work across all metrics. + + :param refs: A sequence of strings. + """ + pass + + @abstractmethod + def _compute_segment_statistics(self, hypothesis: str, ref_kwargs: Dict) -> List[Any]: + """Given a (pre-processed) hypothesis sentence and already computed + reference info, returns the best match statistics across the + references. The return type is usually a List of ints or floats. + + :param hypothesis: A pre-processed hypothesis sentence. + :param ref_kwargs: A dictionary with reference-related information + within. This is formulated as a dictionary as different metrics may + require different information regarding a reference segment. + """ + pass + + def _cache_references(self, references: Sequence[Sequence[str]]) -> List[Any]: + """Given the full set of document references, extract segment n-grams + (or other necessary information) for caching purposes. + + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. A particular reference + segment can be '' or `None` to allow the use of variable number + of references per segment. + :return: A list where each element is a tuple of segment n-grams and + reference lengths, as returned by `_extract_reference_info()`. + """ + ref_cache = [] + + # Decide on final number of refs here as well + num_refs = set() + + for refs in zip(*references): + # Remove undefined references + lines = [x for x in refs if x is not None] + + # Keep track of reference counts to allow variable reference + # info in the signature + num_refs.add(len(lines)) + + lines = [self._preprocess_segment(x) for x in lines] + + # Get n-grams + ref_cache.append(self._extract_reference_info(lines)) + + if len(num_refs) == 1: + self.num_refs = list(num_refs)[0] + else: + # A variable number of refs exist + self.num_refs = -1 + + return ref_cache + + def _extract_corpus_statistics(self, hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]]) -> Any: + """Reads the corpus and returns sentence-level match statistics for + faster re-computations esp. during statistical tests. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + :return: A list where each sublist corresponds to segment statistics. + """ + # Pre-compute references + # Don't store the cache as the user is explicitly passing refs + if references: + ref_cache = self._cache_references(references) + elif self._ref_cache: + ref_cache = self._ref_cache + else: + raise RuntimeError('No references provided and the cache is empty.') + + stats = [] + tok_count = 0 + + for hyp, ref_kwargs in zip(hypotheses, ref_cache): + # Check for already-tokenized input problem (only for BLEU) + if not self._force and hyp.endswith(' .'): + tok_count += 1 + + hyp = self._preprocess_segment(hyp) + + # Collect stats + stats.append(self._compute_segment_statistics(hyp, ref_kwargs)) + + if tok_count >= 100: + sacrelogger.warning("That's 100 lines that end in a tokenized period ('.')") + sacrelogger.warning("It looks like you forgot to detokenize your test data, which may hurt your score.") + sacrelogger.warning("If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.") + + return stats + + def sentence_score(self, hypothesis: str, references: Sequence[str]) -> Any: + """Compute the metric for a single sentence against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :return: A `Score` object. + """ + self._check_sentence_score_args(hypothesis, references) + + stats = self._extract_corpus_statistics( + [hypothesis], [[refs] for refs in references]) + return self._aggregate_and_compute(stats) + + def corpus_score(self, hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + n_bootstrap: int = 1) -> Any: + """Compute the metric for a corpus against a single (or multiple) reference(s). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + :param n_bootstrap: If > 1, provides 95% confidence interval around true mean + using bootstrap resampling with `n_bootstrap` samples. + :return: A `Score` object. + """ + self._check_corpus_score_args(hypotheses, references) + + # Collect corpus stats + stats = self._extract_corpus_statistics(hypotheses, references) + + # Compute the actual system score + actual_score = self._aggregate_and_compute(stats) + + if n_bootstrap > 1: + # Compute bootstrap estimate as well + # Delayed import is to escape from numpy import if bootstrap + # is not requested. + from ..significance import _bootstrap_resample + + self.n_bootstrap = n_bootstrap + self.seed, bs_scores = _bootstrap_resample(stats, self, n_bootstrap) + actual_score.estimate_ci(bs_scores) + + return actual_score + + def get_signature(self) -> Signature: + """Creates and returns the signature for the metric. The creation + of signatures is delayed as the number of references is resolved + only at the point of reference caching.""" + return self._SIGNATURE_TYPE(self.__dict__) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca77b9af5c4ecc77acde3b7816607d11cd4bc7f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py @@ -0,0 +1,420 @@ +"""The implementation of the BLEU metric (Papineni et al., 2002).""" + +import math +import logging +from importlib import import_module +from typing import List, Sequence, Optional, Dict, Any + +from ..utils import my_log, sum_of_lists + +from .base import Score, Signature, Metric +from .helpers import extract_all_word_ngrams + +sacrelogger = logging.getLogger('sacrebleu') + +# The default for the maximum n-gram order when computing precisions +MAX_NGRAM_ORDER = 4 + +_TOKENIZERS = { + 'none': 'tokenizer_none.NoneTokenizer', + 'zh': 'tokenizer_zh.TokenizerZh', + '13a': 'tokenizer_13a.Tokenizer13a', + 'intl': 'tokenizer_intl.TokenizerV14International', + 'char': 'tokenizer_char.TokenizerChar', + 'ja-mecab': 'tokenizer_ja_mecab.TokenizerJaMecab', + 'ko-mecab': 'tokenizer_ko_mecab.TokenizerKoMecab', + 'spm': 'tokenizer_spm.TokenizerSPM', + 'flores101': 'tokenizer_spm.Flores101Tokenizer', + 'flores200': 'tokenizer_spm.Flores200Tokenizer', +} + + +def _get_tokenizer(name: str): + """Dynamically import tokenizer as importing all is slow.""" + module_name, class_name = _TOKENIZERS[name].rsplit('.', 1) + return getattr( + import_module(f'.tokenizers.{module_name}', 'sacrebleu'), + class_name) + + +class BLEUSignature(Signature): + """A convenience class to represent the reproducibility signature for BLEU. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`BLEUSignature` initializer.""" + super().__init__(args) + + self._abbr.update({ + 'case': 'c', + 'eff': 'e', + 'tok': 'tok', + 'smooth': 's', + }) + + # Construct a combined string for smoothing method and value + smooth_str = args['smooth_method'] + smooth_def = BLEU.SMOOTH_DEFAULTS[smooth_str] + + # If the method requires a parameter, add it within brackets + if smooth_def is not None: + # the following can be None if the user wants to use the default + smooth_val = args['smooth_value'] + + if smooth_val is None: + smooth_val = smooth_def + + smooth_str += f'[{smooth_val:.2f}]' + + self.info.update({ + 'case': 'lc' if args['lowercase'] else 'mixed', + 'eff': 'yes' if args['effective_order'] else 'no', + 'tok': args['tokenizer_signature'], + 'smooth': smooth_str, + }) + + +class BLEUScore(Score): + """A convenience class to represent BLEU scores. + + :param score: The BLEU score. + :param counts: List of counts of correct ngrams, 1 <= n <= max_ngram_order + :param totals: List of counts of total ngrams, 1 <= n <= max_ngram_order + :param precisions: List of precisions, 1 <= n <= max_ngram_order + :param bp: The brevity penalty. + :param sys_len: The cumulative system length. + :param ref_len: The cumulative reference length. + """ + def __init__(self, score: float, counts: List[int], totals: List[int], + precisions: List[float], bp: float, + sys_len: int, ref_len: int): + """`BLEUScore` initializer.""" + super().__init__('BLEU', score) + self.bp = bp + self.counts = counts + self.totals = totals + self.sys_len = sys_len + self.ref_len = ref_len + self.precisions = precisions + + self.prec_str = "/".join([f"{p:.1f}" for p in self.precisions]) + self.ratio = self.sys_len / self.ref_len if self.ref_len else 0 + + # The verbose part of BLEU + self._verbose = f"{self.prec_str} (BP = {self.bp:.3f} " + self._verbose += f"ratio = {self.ratio:.3f} hyp_len = {self.sys_len:d} " + self._verbose += f"ref_len = {self.ref_len:d})" + + +class BLEU(Metric): + """Computes the BLEU metric given hypotheses and references. + + :param lowercase: If True, lowercased BLEU is computed. + :param force: Ignore data that looks already tokenized. + :param tokenize: The tokenizer to use. If None, defaults to language-specific tokenizers with '13a' as the fallback default. + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none'). + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions. + :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be + `True`, if sentence-level BLEU will be computed. + :param trg_lang: An optional language code to raise potential tokenizer warnings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference n-grams + and lengths will be pre-computed and cached for faster BLEU computation + across many systems. + """ + + SMOOTH_DEFAULTS: Dict[str, Optional[float]] = { + # The defaults for `floor` and `add-k` are obtained from the following paper + # A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU + # Boxing Chen and Colin Cherry + # http://aclweb.org/anthology/W14-3346 + 'none': None, # No value is required + 'floor': 0.1, + 'add-k': 1, + 'exp': None, # No value is required + } + + TOKENIZERS = _TOKENIZERS.keys() + + # mteval-v13a.pl tokenizer unless Chinese or Japanese is provided + TOKENIZER_DEFAULT = '13a' + + # Some language specific mappings to use if `trg_lang` is given + # and the tokenizer is not explicitly specified + _TOKENIZER_MAP = { + 'zh': 'zh', + 'ja': 'ja-mecab', + 'ko': 'ko-mecab', + } + + _SIGNATURE_TYPE = BLEUSignature + + def __init__(self, lowercase: bool = False, + force: bool = False, + tokenize: Optional[str] = None, + smooth_method: str = 'exp', + smooth_value: Optional[float] = None, + max_ngram_order: int = MAX_NGRAM_ORDER, + effective_order: bool = False, + trg_lang: str = '', + references: Optional[Sequence[Sequence[str]]] = None): + """`BLEU` initializer.""" + super().__init__() + + self._force = force + self.trg_lang = trg_lang + self.lowercase = lowercase + self.smooth_value = smooth_value + self.smooth_method = smooth_method + self.max_ngram_order = max_ngram_order + self.effective_order = effective_order + + # Sanity check + assert self.smooth_method in self.SMOOTH_DEFAULTS.keys(), \ + "Unknown smooth_method {self.smooth_method!r}" + + # If the tokenizer wasn't specified, choose it according to the + # following logic. We use 'v13a' except for ZH and JA. Note that + # this logic can only be applied when sacrebleu knows the target + # language, which is only the case for builtin datasets. + if tokenize is None: + best_tokenizer = self.TOKENIZER_DEFAULT + + # Set `zh` or `ja-mecab` or `ko-mecab` if target language is provided + if self.trg_lang in self._TOKENIZER_MAP: + best_tokenizer = self._TOKENIZER_MAP[self.trg_lang] + else: + best_tokenizer = tokenize + if self.trg_lang == 'zh' and best_tokenizer != 'zh': + sacrelogger.warning( + "Consider using the 'zh' or 'spm' tokenizer for Chinese.") + if self.trg_lang == 'ja' and best_tokenizer != 'ja-mecab': + sacrelogger.warning( + "Consider using the 'ja-mecab' or 'spm' tokenizer for Japanese.") + if self.trg_lang == 'ko' and best_tokenizer != 'ko-mecab': + sacrelogger.warning( + "Consider using the 'ko-mecab' or 'spm' tokenizer for Korean.") + + # Create the tokenizer + self.tokenizer = _get_tokenizer(best_tokenizer)() + + # Build the signature + self.tokenizer_signature = self.tokenizer.signature() + + if references is not None: + # Pre-compute reference ngrams and lengths + self._ref_cache = self._cache_references(references) + + @staticmethod + def compute_bleu(correct: List[int], + total: List[int], + sys_len: int, + ref_len: int, + smooth_method: str = 'none', + smooth_value=None, + effective_order: bool = False, + max_ngram_order: int = MAX_NGRAM_ORDER) -> BLEUScore: + """Computes BLEU score from its sufficient statistics with smoothing. + + Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU", + Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346) + + - none: No smoothing. + - floor: Method 1 (requires small positive value (0.1 in the paper) to be set) + - add-k: Method 2 (Generalizing Lin and Och, 2004) + - exp: Method 3 (NIST smoothing method i.e. in use with mteval-v13a.pl) + + :param correct: List of counts of correct ngrams, 1 <= n <= max_ngram_order + :param total: List of counts of total ngrams, 1 <= n <= max_ngram_order + :param sys_len: The cumulative system length + :param ref_len: The cumulative reference length + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none') + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be + `True`, if sentence-level BLEU will be computed. + :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions. + :return: A `BLEUScore` instance. + """ + assert smooth_method in BLEU.SMOOTH_DEFAULTS.keys(), \ + "Unknown smooth_method {smooth_method!r}" + + # Fetch the default value for floor and add-k + if smooth_value is None: + smooth_value = BLEU.SMOOTH_DEFAULTS[smooth_method] + + # Compute brevity penalty + if sys_len < ref_len: + bp = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0 + else: + bp = 1.0 + + # n-gram precisions + precisions = [0.0 for x in range(max_ngram_order)] + + # Early stop if there are no matches (#141) + if not any(correct): + return BLEUScore(0.0, correct, total, precisions, bp, sys_len, ref_len) + + smooth_mteval = 1. + eff_order = max_ngram_order + for n in range(1, len(precisions) + 1): + if smooth_method == 'add-k' and n > 1: + correct[n - 1] += smooth_value + total[n - 1] += smooth_value + + if total[n - 1] == 0: + break + + # If the system guesses no i-grams, 1 <= i <= max_ngram_order, + # the BLEU score is 0 (technically undefined). This is a problem for sentence + # level BLEU or a corpus of short sentences, where systems will get + # no credit if sentence lengths fall under the max_ngram_order threshold. + # This fix scales max_ngram_order to the observed maximum order. + # It is only available through the API and off by default + if effective_order: + eff_order = n + + if correct[n - 1] == 0: + if smooth_method == 'exp': + smooth_mteval *= 2 + precisions[n - 1] = 100. / (smooth_mteval * total[n - 1]) + elif smooth_method == 'floor': + precisions[n - 1] = 100. * smooth_value / total[n - 1] + else: + precisions[n - 1] = 100. * correct[n - 1] / total[n - 1] + + # Compute BLEU score + score = bp * math.exp( + sum([my_log(p) for p in precisions[:eff_order]]) / eff_order) + + return BLEUScore(score, correct, total, precisions, bp, sys_len, ref_len) + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, lowercases (optionally) and tokenizes it + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + if self.lowercase: + sent = sent.lower() + return self.tokenizer(sent.rstrip()) + + def _compute_score_from_stats(self, stats: List[int]) -> BLEUScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `BLEUScore` object. + """ + return self.compute_bleu( + correct=stats[2: 2 + self.max_ngram_order], + total=stats[2 + self.max_ngram_order:], + sys_len=int(stats[0]), ref_len=int(stats[1]), + smooth_method=self.smooth_method, smooth_value=self.smooth_value, + effective_order=self.effective_order, + max_ngram_order=self.max_ngram_order + ) + + def _aggregate_and_compute(self, stats: List[List[int]]) -> BLEUScore: + """Computes the final BLEU score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `BLEUScore` instance. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _get_closest_ref_len(self, hyp_len: int, ref_lens: List[int]) -> int: + """Given a hypothesis length and a list of reference lengths, returns + the closest reference length to be used by BLEU. + + :param hyp_len: The hypothesis length. + :param ref_lens: A list of reference lengths. + :return: The closest reference length. + """ + closest_diff, closest_len = -1, -1 + + for ref_len in ref_lens: + diff = abs(hyp_len - ref_len) + if closest_diff == -1 or diff < closest_diff: + closest_diff = diff + closest_len = ref_len + elif diff == closest_diff and ref_len < closest_len: + closest_len = ref_len + + return closest_len + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, extract the n-grams and reference lengths. + The latter will be useful when comparing hypothesis and reference lengths for BLEU. + + :param refs: A sequence of strings. + :return: A dictionary that will be passed to `_compute_segment_statistics()` + through keyword arguments. + """ + ngrams = None + ref_lens = [] + + for ref in refs: + # extract n-grams for this ref + this_ngrams, ref_len = extract_all_word_ngrams(ref, 1, self.max_ngram_order) + ref_lens.append(ref_len) + + if ngrams is None: + # Set it directly for first set of refs + ngrams = this_ngrams + else: + # Merge counts across multiple references + # The below loop is faster than `ngrams |= this_ngrams` + for ngram, count in this_ngrams.items(): + ngrams[ngram] = max(ngrams[ngram], count) + + return {'ref_ngrams': ngrams, 'ref_lens': ref_lens} + + def _compute_segment_statistics(self, hypothesis: str, + ref_kwargs: Dict) -> List[int]: + """Given a (pre-processed) hypothesis sentence and already computed + reference n-grams & lengths, returns the best match statistics across the + references. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with `refs_ngrams`and `ref_lens` keys + that denote the counter containing all n-gram counts and reference lengths, + respectively. + :return: A list of integers with match statistics. + """ + + ref_ngrams, ref_lens = ref_kwargs['ref_ngrams'], ref_kwargs['ref_lens'] + + # Extract n-grams for the hypothesis + hyp_ngrams, hyp_len = extract_all_word_ngrams( + hypothesis, 1, self.max_ngram_order) + + ref_len = self._get_closest_ref_len(hyp_len, ref_lens) + + # Count the stats + # Although counter has its internal & and | operators, this is faster + correct = [0 for i in range(self.max_ngram_order)] + total = correct[:] + for hyp_ngram, hyp_count in hyp_ngrams.items(): + # n-gram order + n = len(hyp_ngram) - 1 + # count hypothesis n-grams + total[n] += hyp_count + # count matched n-grams + if hyp_ngram in ref_ngrams: + correct[n] += min(hyp_count, ref_ngrams[hyp_ngram]) + + # Return a flattened list for efficient computation + return [hyp_len, ref_len] + correct + total + + def sentence_score(self, hypothesis: str, references: Sequence[str]) -> BLEUScore: + """Compute the metric for a single sentence against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :return: a `BLEUScore` object. + """ + if not self.effective_order: + sacrelogger.warning( + 'It is recommended to enable `effective_order` for sentence-level BLEU.') + return super().sentence_score(hypothesis, references) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d4f6858d0c6005f97ad8011a0b17bd97c2bcea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py @@ -0,0 +1,284 @@ +"""The implementation of chrF (Popović 2015) and chrF++ (Popović 2017) metrics.""" + +from typing import List, Sequence, Optional, Dict +from collections import Counter + +from ..utils import sum_of_lists +from .base import Score, Signature, Metric +from .helpers import extract_all_char_ngrams, extract_word_ngrams + + +class CHRFSignature(Signature): + """A convenience class to represent the reproducibility signature for chrF. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`CHRFSignature` initializer.""" + super().__init__(args) + self._abbr.update({ + 'case': 'c', + 'eff': 'e', + 'nc': 'nc', + 'nw': 'nw', + 'space': 's', + }) + + self.info.update({ + 'case': 'lc' if args['lowercase'] else 'mixed', + 'eff': 'yes' if not args['eps_smoothing'] else 'no', + 'nc': args['char_order'], + 'nw': args['word_order'], + 'space': 'yes' if args['whitespace'] else 'no', + }) + + +class CHRFScore(Score): + """A convenience class to represent chrF scores. + + :param score: The chrF (chrF++) score. + :param char_order: The character n-gram order. + :param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + """ + def __init__(self, score: float, char_order: int, word_order: int, beta: int): + """`CHRFScore` initializer.""" + self.beta = beta + self.char_order = char_order + self.word_order = word_order + + # Add + signs to denote chrF+ variant + name = f'chrF{self.beta}' + '+' * self.word_order + + super().__init__(name, score) + + +class CHRF(Metric): + """Computes the chrF(++) metric given hypotheses and references. + + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param lowercase: Enable case-insensitivity. + :param whitespace: If `True`, include whitespaces when extracting character n-grams. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference n-grams + will be pre-computed and cached for faster re-computation across many systems. + """ + + # Maximum character n-gram order to take into account + CHAR_ORDER = 6 + + # chrF+ additionally takes into account some of the word n-grams + WORD_ORDER = 0 + + # Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341) + BETA = 2 + + # Cache string.punctuation for chrF+' punctuation stripper + _PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~') + + _SIGNATURE_TYPE = CHRFSignature + + def __init__(self, char_order: int = CHAR_ORDER, + word_order: int = WORD_ORDER, + beta: int = BETA, + lowercase: bool = False, + whitespace: bool = False, + eps_smoothing: bool = False, + references: Optional[Sequence[Sequence[str]]] = None): + """`CHRF` initializer.""" + super().__init__() + + self.beta = beta + self.char_order = char_order + self.word_order = word_order + self.order = self.char_order + self.word_order + self.lowercase = lowercase + self.whitespace = whitespace + self.eps_smoothing = eps_smoothing + + if references is not None: + # Pre-compute reference ngrams + self._ref_cache = self._cache_references(references) + + @staticmethod + def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]: + """Computes the match statistics between hypothesis and reference n-grams. + + :param hyp_ngrams: A `Counter` holding hypothesis n-grams. + :param ref_ngrams: A `Counter` holding reference n-grams. + :return: A list of three numbers denoting hypothesis n-gram count, + reference n-gram count and the intersection count. + """ + # Counter's internal intersection is not that fast, count manually + match_count, hyp_count = 0, 0 + for ng, count in hyp_ngrams.items(): + hyp_count += count + if ng in ref_ngrams: + match_count += min(count, ref_ngrams[ng]) + + return [ + # Don't count hits if no reference exists for that n-gram + hyp_count if ref_ngrams else 0, + sum(ref_ngrams.values()), + match_count, + ] + + def _remove_punctuation(self, sent: str) -> List[str]: + """Separates out punctuations from beginning and end of words for chrF. + Adapted from https://github.com/m-popovic/chrF + + :param sent: A string. + :return: A list of words. + """ + tokenized = [] + for w in sent.split(): + if len(w) == 1: + tokenized.append(w) + else: + # NOTE: This splits '(hi)' to '(hi' and ')' (issue #124) + if w[-1] in self._PUNCTS: + tokenized += [w[:-1], w[-1]] + elif w[0] in self._PUNCTS: + tokenized += [w[0], w[1:]] + else: + tokenized.append(w) + return tokenized + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, apply optional lowercasing. + + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + return sent.lower() if self.lowercase else sent + + def _compute_f_score(self, statistics: List[int]) -> float: + """Compute the chrF score given the n-gram match statistics. + + :param statistics: A flattened list of 3 * (`char_order` + `word_order`) + elements giving the [hyp, ref, match] counts for each order. + :return: The final f_beta score between [0, 100]. + """ + eps = 1e-16 + score = 0.0 + effective_order = 0 + factor = self.beta ** 2 + avg_prec, avg_rec = 0.0, 0.0 + + for i in range(self.order): + n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3] + + # chrF++.py style EPS smoothing (also used by Moses and NLTK) + prec = n_match / n_hyp if n_hyp > 0 else eps + rec = n_match / n_ref if n_ref > 0 else eps + + denom = factor * prec + rec + score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps + + # sacreBLEU <2.0.0 style effective order smoothing + if n_hyp > 0 and n_ref > 0: + avg_prec += prec + avg_rec += rec + effective_order += 1 + + if self.eps_smoothing: + return 100 * score / self.order + + if effective_order == 0: + avg_prec = avg_rec = 0.0 + else: + avg_prec /= effective_order + avg_rec /= effective_order + + if avg_prec + avg_rec: + score = (1 + factor) * avg_prec * avg_rec + score /= ((factor * avg_prec) + avg_rec) + return 100 * score + else: + return 0.0 + + def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `CHRFScore` object. + """ + return CHRFScore( + self._compute_f_score(stats), self.char_order, + self.word_order, self.beta) + + def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore: + """Computes the final score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `CHRFScore` object. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]: + """Given a list of reference segments, extract the character and word n-grams. + + :param refs: A sequence of reference segments. + :return: A list where each element contains n-grams per reference segment. + """ + ngrams = [] + + for ref in refs: + # extract character n-grams + stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace) + + # Check chrF+ mode + if self.word_order > 0: + ref_words = self._remove_punctuation(ref) + + for n in range(self.word_order): + stats.append(extract_word_ngrams(ref_words, n + 1)) + + ngrams.append(stats) + + return {'ref_ngrams': ngrams} + + def _compute_segment_statistics( + self, hypothesis: str, ref_kwargs: Dict) -> List[int]: + """Given a (pre-processed) hypothesis sentence and already computed + reference n-grams, returns the best match statistics across the + references. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with key `ref_ngrams` which is a list + where each sublist contains n-gram counters for a particular reference sentence. + :return: A list of integers where each triplet denotes [hyp, ref, match] + statistics. + """ + best_stats = [] + best_f_score = -1.0 + + # extract character n-grams + all_hyp_ngrams = extract_all_char_ngrams( + hypothesis, self.char_order, self.whitespace) + + # Check chrF+ mode to see if we'll add word n-grams as well + if self.word_order > 0: + # Primitive tokenization: separate out punctuations + hwords = self._remove_punctuation(hypothesis) + _range = range(1, self.word_order + 1) + all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range]) + + # Iterate over multiple references, pick the one with best F score + for _ref_ngrams in ref_kwargs['ref_ngrams']: + stats = [] + # Traverse all orders + for h, r in zip(all_hyp_ngrams, _ref_ngrams): + stats.extend(self._get_match_statistics(h, r)) + f_score = self._compute_f_score(stats) + + if f_score > best_f_score: + best_f_score = f_score + best_stats = stats + + return best_stats diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..72ec14461658249fcd63a139623f3ead9a4aa057 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py @@ -0,0 +1,69 @@ +"""Various utility functions for word and character n-gram extraction.""" + +from collections import Counter +from typing import List, Tuple + + +def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]: + """Extracts all ngrams (min_order <= n <= max_order) from a sentence. + + :param line: A string sentence. + :param min_order: Minimum n-gram order. + :param max_order: Maximum n-gram order. + :return: a Counter object with n-grams counts and the sequence length. + """ + + ngrams = [] + tokens = line.split() + + for n in range(min_order, max_order + 1): + for i in range(0, len(tokens) - n + 1): + ngrams.append(tuple(tokens[i: i + n])) + + return Counter(ngrams), len(tokens) + + +def extract_word_ngrams(tokens: List[str], n: int) -> Counter: + """Extracts n-grams with order `n` from a list of tokens. + + :param tokens: A list of tokens. + :param n: The order of n-grams. + :return: a Counter object with n-grams counts. + """ + return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)]) + + +def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter: + """Yields counts of character n-grams from a sentence. + + :param line: A segment containing a sequence of words. + :param n: The order of the n-grams. + :param include_whitespace: If given, will not strip whitespaces from the line. + :return: a dictionary containing ngrams and counts + """ + if not include_whitespace: + line = ''.join(line.split()) + + return Counter([line[i:i + n] for i in range(len(line) - n + 1)]) + + +def extract_all_char_ngrams( + line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]: + """Extracts all character n-grams at once for convenience. + + :param line: A segment containing a sequence of words. + :param max_order: The maximum order of the n-grams. + :param include_whitespace: If given, will not strip whitespaces from the line. + :return: a list of Counter objects containing ngrams and counts. + """ + + counters = [] + + if not include_whitespace: + line = ''.join(line.split()) + + for n in range(1, max_order + 1): + ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)]) + counters.append(ngrams) + + return counters diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2de4944c955ebf0c8b37fce7f04eb16f79c026 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py @@ -0,0 +1,478 @@ +"""This module implements various utility functions for the TER metric.""" + +# Copyright 2020 Memsource +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import math +from typing import List, Tuple, Dict + + +_COST_INS = 1 +_COST_DEL = 1 +_COST_SUB = 1 + +# Tercom-inspired limits +_MAX_SHIFT_SIZE = 10 +_MAX_SHIFT_DIST = 50 +_BEAM_WIDTH = 25 + +# Our own limits +_MAX_CACHE_SIZE = 10000 +_MAX_SHIFT_CANDIDATES = 1000 +_INT_INFINITY = int(1e16) + +_OP_INS = 'i' +_OP_DEL = 'd' +_OP_NOP = ' ' +_OP_SUB = 's' +_OP_UNDEF = 'x' + +_FLIP_OPS = str.maketrans(_OP_INS + _OP_DEL, _OP_DEL + _OP_INS) + + +def translation_edit_rate(words_hyp: List[str], words_ref: List[str]) -> Tuple[int, int]: + """Calculate the translation edit rate. + + :param words_hyp: Tokenized translation hypothesis. + :param words_ref: Tokenized reference translation. + :return: tuple (number of edits, length) + """ + n_words_ref = len(words_ref) + n_words_hyp = len(words_hyp) + if n_words_ref == 0: + # FIXME: This trace here is not used? + trace = _OP_DEL * n_words_hyp + # special treatment of empty refs + return n_words_hyp, 0 + + cached_ed = BeamEditDistance(words_ref) + shifts = 0 + + input_words = words_hyp + checked_candidates = 0 + while True: + # do shifts until they stop reducing the edit distance + delta, new_input_words, checked_candidates = _shift( + input_words, words_ref, cached_ed, checked_candidates) + + if checked_candidates >= _MAX_SHIFT_CANDIDATES: + break + + if delta <= 0: + break + shifts += 1 + input_words = new_input_words + + edit_distance, trace = cached_ed(input_words) + total_edits = shifts + edit_distance + + return total_edits, n_words_ref + + +def _shift(words_h: List[str], words_r: List[str], cached_ed, + checked_candidates: int) -> Tuple[int, List[str], int]: + """Attempt to shift words in hypothesis to match reference. + + Returns the shift that reduces the edit distance the most. + + Note that the filtering of possible shifts and shift selection are heavily + based on somewhat arbitrary heuristics. The code here follows as closely + as possible the logic in Tercom, not always justifying the particular design + choices. + + :param words_h: Hypothesis. + :param words_r: Reference. + :param cached_ed: Cached edit distance. + :param checked_candidates: Number of shift candidates that were already + evaluated. + :return: (score, shifted_words, checked_candidates). Best shift and updated + number of evaluated shift candidates. + """ + pre_score, inv_trace = cached_ed(words_h) + + # to get alignment, we pretend we are rewriting reference into hypothesis, + # so we need to flip the trace of edit operations + trace = _flip_trace(inv_trace) + align, ref_err, hyp_err = trace_to_alignment(trace) + + best = None + + for start_h, start_r, length in _find_shifted_pairs(words_h, words_r): + # don't do the shift unless both the hypothesis was wrong and the + # reference doesn't match hypothesis at the target position + if sum(hyp_err[start_h: start_h + length]) == 0: + continue + + if sum(ref_err[start_r: start_r + length]) == 0: + continue + + # don't try to shift within the subsequence + if start_h <= align[start_r] < start_h + length: + continue + + prev_idx = -1 + for offset in range(-1, length): + if start_r + offset == -1: + idx = 0 # insert before the beginning + elif start_r + offset in align: + # Unlike Tercom which inserts *after* the index, we insert + # *before* the index. + idx = align[start_r + offset] + 1 + else: + break # offset is out of bounds => aims past reference + + if idx == prev_idx: + continue # skip idx if already tried + + prev_idx = idx + + shifted_words = _perform_shift(words_h, start_h, length, idx) + assert(len(shifted_words) == len(words_h)) + + # Elements of the tuple are designed to replicate Tercom ranking + # of shifts: + candidate = ( + pre_score - cached_ed(shifted_words)[0], # highest score first + length, # then, longest match first + -start_h, # then, earliest match first + -idx, # then, earliest target position first + shifted_words, + ) + + checked_candidates += 1 + + if not best or candidate > best: + best = candidate + + if checked_candidates >= _MAX_SHIFT_CANDIDATES: + break + + if not best: + return 0, words_h, checked_candidates + else: + best_score, _, _, _, shifted_words = best + return best_score, shifted_words, checked_candidates + + +def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]: + """Perform a shift in `words` from `start` to `target`. + + :param words: Words to shift. + :param start: Where from. + :param length: How many words. + :param target: Where to. + :return: Shifted words. + """ + if target < start: + # shift before previous position + return words[:target] + words[start: start + length] \ + + words[target: start] + words[start + length:] + elif target > start + length: + # shift after previous position + return words[:start] + words[start + length: target] \ + + words[start: start + length] + words[target:] + else: + # shift within the shifted string + return words[:start] + words[start + length: length + target] \ + + words[start: start + length] + words[length + target:] + + +def _find_shifted_pairs(words_h: List[str], words_r: List[str]): + """Find matching word sub-sequences in two lists of words. + + Ignores sub-sequences starting at the same position. + + :param words_h: First word list. + :param words_r: Second word list. + :return: Yields tuples of (h_start, r_start, length) such that: + words_h[h_start:h_start+length] = words_r[r_start:r_start+length] + """ + n_words_h = len(words_h) + n_words_r = len(words_r) + for start_h in range(n_words_h): + for start_r in range(n_words_r): + # this is slightly different from what tercom does but this should + # really only kick in in degenerate cases + if abs(start_r - start_h) > _MAX_SHIFT_DIST: + continue + + length = 0 + while words_h[start_h + length] == words_r[start_r + length] and length < _MAX_SHIFT_SIZE: + length += 1 + + yield start_h, start_r, length + + # If one sequence is consumed, stop processing + if n_words_h == start_h + length or n_words_r == start_r + length: + break + + +def _flip_trace(trace): + """Flip the trace of edit operations. + + Instead of rewriting a->b, get a recipe for rewriting b->a. + + Simply flips insertions and deletions. + """ + return trace.translate(_FLIP_OPS) + + +def trace_to_alignment(trace: str) -> Tuple[Dict, List, List]: + """Transform trace of edit operations into an alignment of the sequences. + + :param trace: Trace of edit operations (' '=no change or 's'/'i'/'d'). + :return: Alignment, error positions in reference, error positions in hypothesis. + """ + pos_hyp = -1 + pos_ref = -1 + hyp_err = [] + ref_err = [] + align = {} + + # we are rewriting a into b + for op in trace: + if op == _OP_NOP: + pos_hyp += 1 + pos_ref += 1 + align[pos_ref] = pos_hyp + hyp_err.append(0) + ref_err.append(0) + elif op == _OP_SUB: + pos_hyp += 1 + pos_ref += 1 + align[pos_ref] = pos_hyp + hyp_err.append(1) + ref_err.append(1) + elif op == _OP_INS: + pos_hyp += 1 + hyp_err.append(1) + elif op == _OP_DEL: + pos_ref += 1 + align[pos_ref] = pos_hyp + ref_err.append(1) + else: + raise Exception(f"unknown operation {op!r}") + + return align, ref_err, hyp_err + + +class BeamEditDistance: + """Edit distance with several features required for TER calculation. + + * internal cache + * "beam" search + * tracking of edit operations + + The internal self._cache works like this: + + Keys are words of the hypothesis. Values are tuples (next_node, row) where: + + * next_node is the cache for the next word in the sequence + * row is the stored row of the edit distance matrix + + Effectively, caching allows to skip several rows in the edit distance + matrix calculation and instead, to initialize the computation with the last + matching matrix row. + + Beam search, as implemented here, only explores a fixed-size sub-row of + candidates around the matrix diagonal (more precisely, it's a + "pseudo"-diagonal since we take the ratio of sequence lengths into account). + + Tracking allows to reconstruct the optimal sequence of edit operations. + + :param words_ref: A list of reference tokens. + """ + def __init__(self, words_ref: List[str]): + """`BeamEditDistance` initializer.""" + self._words_ref = words_ref + self._n_words_ref = len(self._words_ref) + + # first row corresponds to insertion operations of the reference, + # so we do 1 edit operation per reference word + self._initial_row = [(i * _COST_INS, _OP_INS) + for i in range(self._n_words_ref + 1)] + + self._cache = {} # type: Dict[str, Tuple] + self._cache_size = 0 + + # Precomputed empty matrix row. Contains infinities so that beam search + # avoids using the uninitialized cells. + self._empty_row = [(_INT_INFINITY, _OP_UNDEF)] * (self._n_words_ref + 1) + + def __call__(self, words_hyp: List[str]) -> Tuple[int, str]: + """Calculate edit distance between self._words_ref and the hypothesis. + + Uses cache to skip some of the computation. + + :param words_hyp: Words in translation hypothesis. + :return: Edit distance score. + """ + + # skip initial words in the hypothesis for which we already know the + # edit distance + start_position, dist = self._find_cache(words_hyp) + + # calculate the rest of the edit distance matrix + edit_distance, newly_created_matrix, trace = self._edit_distance( + words_hyp, start_position, dist) + + # update our cache with the newly calculated rows + self._add_cache(words_hyp, newly_created_matrix) + + return edit_distance, trace + + def _edit_distance(self, words_h: List[str], start_h: int, + cache: List[List[Tuple[int, str]]]) -> Tuple[int, List, str]: + """Actual edit distance calculation. + + Can be initialized with the last cached row and a start position in + the hypothesis that it corresponds to. + + :param words_h: Words in translation hypothesis. + :param start_h: Position from which to start the calculation. + (This is zero if no cache match was found.) + :param cache: Precomputed rows corresponding to edit distance matrix + before `start_h`. + :return: Edit distance value, newly computed rows to update the + cache, trace. + """ + + n_words_h = len(words_h) + + # initialize the rest of the matrix with infinite edit distances + rest_empty = [list(self._empty_row) + for _ in range(n_words_h - start_h)] + + dist = cache + rest_empty + + assert len(dist) == n_words_h + 1 + + length_ratio = self._n_words_ref / n_words_h if words_h else 1 + + # in some crazy sentences, the difference in length is so large that + # we may end up with zero overlap with previous row + if _BEAM_WIDTH < length_ratio / 2: + beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH) + else: + beam_width = _BEAM_WIDTH + + # calculate the Levenshtein distance + for i in range(start_h + 1, n_words_h + 1): + pseudo_diag = math.floor(i * length_ratio) + min_j = max(0, pseudo_diag - beam_width) + max_j = min(self._n_words_ref + 1, pseudo_diag + beam_width) + + if i == n_words_h: + max_j = self._n_words_ref + 1 + + for j in range(min_j, max_j): + if j == 0: + dist[i][j] = (dist[i - 1][j][0] + _COST_DEL, _OP_DEL) + else: + if words_h[i - 1] == self._words_ref[j - 1]: + cost_sub = 0 + op_sub = _OP_NOP + else: + cost_sub = _COST_SUB + op_sub = _OP_SUB + + # Tercom prefers no-op/sub, then insertion, then deletion. + # But since we flip the trace and compute the alignment from + # the inverse, we need to swap order of insertion and + # deletion in the preference. + ops = ( + (dist[i - 1][j - 1][0] + cost_sub, op_sub), + (dist[i - 1][j][0] + _COST_DEL, _OP_DEL), + (dist[i][j - 1][0] + _COST_INS, _OP_INS), + ) + + for op_cost, op_name in ops: + if dist[i][j][0] > op_cost: + dist[i][j] = op_cost, op_name + + # get the trace + trace = "" + i = n_words_h + j = self._n_words_ref + + while i > 0 or j > 0: + op = dist[i][j][1] + trace = op + trace + if op in (_OP_SUB, _OP_NOP): + i -= 1 + j -= 1 + elif op == _OP_INS: + j -= 1 + elif op == _OP_DEL: + i -= 1 + else: + raise Exception(f"unknown operation {op!r}") + + return dist[-1][-1][0], dist[len(cache):], trace + + def _add_cache(self, words_hyp: List[str], mat: List[List[Tuple]]): + """Add newly computed rows to cache. + + Since edit distance is only calculated on the hypothesis suffix that + was not in cache, the number of rows in `mat` may be shorter than + hypothesis length. In that case, we skip over these initial words. + + :param words_hyp: Hypothesis words. + :param mat: Edit distance matrix rows for each position. + """ + if self._cache_size >= _MAX_CACHE_SIZE: + return + + node = self._cache + + n_mat = len(mat) + + # how many initial words to skip + skip_num = len(words_hyp) - n_mat + + # jump through the cache to the current position + for i in range(skip_num): + node = node[words_hyp[i]][0] + + assert len(words_hyp[skip_num:]) == n_mat + + # update cache with newly computed rows + for word, row in zip(words_hyp[skip_num:], mat): + if word not in node: + node[word] = ({}, tuple(row)) + self._cache_size += 1 + value = node[word] + node = value[0] + + def _find_cache(self, words_hyp: List[str]) -> Tuple[int, List[List]]: + """Find the already computed rows of the edit distance matrix in cache. + + Returns a partially computed edit distance matrix. + + :param words_hyp: Translation hypothesis. + :return: Tuple (start position, dist). + """ + node = self._cache + start_position = 0 + dist = [self._initial_row] + for word in words_hyp: + if word in node: + start_position += 1 + node, row = node[word] + dist.append(row) + else: + break + + return start_position, dist diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/ter.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/ter.py new file mode 100644 index 0000000000000000000000000000000000000000..40f8221853ac651502435fae3efd9db6a7f7aa04 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/ter.py @@ -0,0 +1,195 @@ +"""The implementation of the TER metric (Snover et al., 2006).""" + +# Copyright 2020 Memsource +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Dict, Sequence, Optional, Any + +from ..tokenizers.tokenizer_ter import TercomTokenizer +from ..utils import sum_of_lists +from .base import Score, Signature, Metric +from .lib_ter import translation_edit_rate + + +class TERSignature(Signature): + """A convenience class to represent the reproducibility signature for TER. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`TERSignature` initializer.""" + super().__init__(args) + self._abbr.update({ + 'case': 'c', + 'tok': 't', + 'norm': 'nr', + 'punct': 'pn', + 'asian': 'as', + }) + + self.info.update({ + 'case': 'mixed' if args['case_sensitive'] else 'lc', + 'tok': args['tokenizer_signature'], + 'norm': args['normalized'], + 'punct': not args['no_punct'], + 'asian': args['asian_support'], + }) + + +class TERScore(Score): + """A convenience class to represent TER scores. + + :param score: The TER score. + :param num_edits: The cumulative number of edits. + :param ref_length: The cumulative average reference length. + """ + def __init__(self, score: float, num_edits: float, ref_length: float): + """`TERScore` initializer.""" + super().__init__('TER', score) + self.num_edits = int(num_edits) + self.ref_length = ref_length + + +class TER(Metric): + """Translation edit rate (TER). A near-exact reimplementation of the Tercom + algorithm, produces identical results on all "sane" outputs. + + Tercom original implementation: https://github.com/jhclark/tercom + + The beam edit distance algorithm uses a slightly different approach (we stay + around the diagonal which is faster, at least in Python) so in some + (extreme) corner cases, the output could differ. + + Caching in the edit distance is based partly on the PyTer package by Hiroyuki + Tanaka (MIT license). (https://github.com/aflc/pyter) + + :param normalized: Enable character normalization. By default, normalizes a couple of things such as + newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When + 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e. + split them down to the character level. + :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical + punctuation markers in Asian languages (CJK). + :param asian_support: Enable special treatment of Asian characters. This option only has an effect when + 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK) + characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support', + specific unicode ranges for CJK and full-width punctuations are also removed. + :param case_sensitive: If `True`, does not lowercase sentences. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference info + will be pre-computed and cached for faster re-computation across many systems. + """ + + _SIGNATURE_TYPE = TERSignature + + def __init__(self, normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False, + references: Optional[Sequence[Sequence[str]]] = None): + """`TER` initializer.""" + super().__init__() + + self.no_punct = no_punct + self.normalized = normalized + self.asian_support = asian_support + self.case_sensitive = case_sensitive + + self.tokenizer = TercomTokenizer( + normalized=self.normalized, + no_punct=self.no_punct, + asian_support=self.asian_support, + case_sensitive=self.case_sensitive, + ) + self.tokenizer_signature = self.tokenizer.signature() + + if references is not None: + self._ref_cache = self._cache_references(references) + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, apply tokenization if enabled. + + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + return self.tokenizer(sent.rstrip()) + + def _compute_score_from_stats(self, stats: List[float]) -> TERScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `TERScore` object. + """ + total_edits, sum_ref_lengths = stats[0], stats[1] + + if sum_ref_lengths > 0: + score = total_edits / sum_ref_lengths + elif total_edits > 0: + score = 1.0 # empty reference(s) and non-empty hypothesis + else: + score = 0.0 # both reference(s) and hypothesis are empty + + return TERScore(100 * score, total_edits, sum_ref_lengths) + + def _aggregate_and_compute(self, stats: List[List[float]]) -> TERScore: + """Computes the final TER score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `TERScore` instance. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _compute_segment_statistics( + self, hypothesis: str, ref_kwargs: Dict) -> List[float]: + """Given a (pre-processed) hypothesis sentence and already computed + reference words, returns the segment statistics required to compute + the full TER score. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with `ref_words` key which is a list + where each sublist contains reference words. + :return: A two-element list that contains the 'minimum number of edits' + and 'the average reference length'. + """ + + ref_lengths = 0 + best_num_edits = int(1e16) + + words_hyp = hypothesis.split() + + # Iterate the references + ref_words = ref_kwargs['ref_words'] + for words_ref in ref_words: + num_edits, ref_len = translation_edit_rate(words_hyp, words_ref) + ref_lengths += ref_len + if num_edits < best_num_edits: + best_num_edits = num_edits + + avg_ref_len = ref_lengths / len(ref_words) + return [best_num_edits, avg_ref_len] + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, applies pre-processing & tokenization + and returns list of tokens for each reference. + + :param refs: A sequence of strings. + :return: A dictionary that will be passed to `_compute_segment_statistics()` + through keyword arguments. + """ + ref_words = [] + + for ref in refs: + ref_words.append(self._preprocess_segment(ref).split()) + + return {'ref_words': ref_words} diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d658a1ba847f7663a502f559fd5ed8ef1be4a013 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py @@ -0,0 +1,2 @@ +# Base tokenizer to derive from +from .tokenizer_base import BaseTokenizer # noqa: F401 diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..280a654176b27513b3a80f7443ee7ffb2ac60012 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d76686ba3011cebcee068dcc4bc02ab6bb2035d1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e1bc278b8e9bfc79fb9f562f29d89148400833f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..443620a707cf1101f909da569124c8cc45328cf2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b2718b34125d50202d1c75d74e63855ba30262 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffce5f474202aa69950668fd8b141f4cc2611886 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b17e04ae93c199a96378d4c3da47a84e0880cb7a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6aa273bdb787f9f341ce1f6dd473afc9c7ee584 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a5666f72c0b888a12e9dbe1f869be1278939c26 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074cbd38ce8d1f56dbc21a55331ff807ed1d1b30 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1e80d91eafe77704283b256861fd9d9f128f614 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8108f5dfab8cdf2f13cda06c396c8e7b4799a568 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py new file mode 100644 index 0000000000000000000000000000000000000000..6441a7621882007faf99d261f7506550ee8164bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py @@ -0,0 +1,34 @@ +from functools import lru_cache +from .tokenizer_base import BaseTokenizer +from .tokenizer_re import TokenizerRegexp + + +class Tokenizer13a(BaseTokenizer): + + def signature(self): + return '13a' + + def __init__(self): + self._post_tokenizer = TokenizerRegexp() + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Tokenizes an input line using a relatively minimal tokenization + that is however equivalent to mteval-v13a, used by WMT. + + :param line: a segment to tokenize + :return: the tokenized line + """ + + # language-independent part: + line = line.replace('', '') + line = line.replace('-\n', '') + line = line.replace('\n', ' ') + + if '&' in line: + line = line.replace('"', '"') + line = line.replace('&', '&') + line = line.replace('<', '<') + line = line.replace('>', '>') + + return self._post_tokenizer(f' {line} ') diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..faf3de4ca6310bd879f1aa2e4c35df59210e280d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py @@ -0,0 +1,19 @@ +class BaseTokenizer: + """A base dummy tokenizer to derive from.""" + + def signature(self): + """ + Returns a signature for the tokenizer. + + :return: signature string + """ + raise NotImplementedError() + + def __call__(self, line): + """ + Tokenizes an input line with the tokenizer. + + :param line: a segment to tokenize + :return: the tokenized line + """ + raise NotImplementedError() diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8f8c5d73d1dd6e27185c6cef88615bec919564 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py @@ -0,0 +1,19 @@ +from functools import lru_cache +from .tokenizer_base import BaseTokenizer + + +class TokenizerChar(BaseTokenizer): + def signature(self): + return 'char' + + def __init__(self): + pass + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Tokenizes all the characters in the input line. + + :param line: a segment to tokenize + :return: the tokenized line + """ + return " ".join((char for char in line)) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py new file mode 100644 index 0000000000000000000000000000000000000000..bd980c6f3b47a80c2e5fbf92db45085cf7ed24ff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py @@ -0,0 +1,50 @@ +from functools import lru_cache + +import regex + +from .tokenizer_base import BaseTokenizer + + +class TokenizerV14International(BaseTokenizer): + """Tokenizes a string following the official BLEU implementation. + + See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983 + + In our case, the input string is expected to be just one line. + We just tokenize on punctuation and symbols, + except when a punctuation is preceded and followed by a digit + (e.g. a comma/dot as a thousand/decimal separator). + We do not recover escaped forms of punctuations such as ' or > + as these should never appear in MT system outputs (see issue #138) + + Note that a number (e.g., a year) followed by a dot at the end of + sentence is NOT tokenized, i.e. the dot stays with the number because + `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a + space after each sentence). However, this error is already in the + original mteval-v14.pl and we want to be consistent with it. + The error is not present in the non-international version, + which uses `$norm_text = " $norm_text "`. + + :param line: the input string to tokenize. + :return: The tokenized string. + """ + + def signature(self): + return 'intl' + + def __init__(self): + self._re = [ + # Separate out punctuations preceeded by a non-digit + (regex.compile(r'(\P{N})(\p{P})'), r'\1 \2 '), + # Separate out punctuations followed by a non-digit + (regex.compile(r'(\p{P})(\P{N})'), r' \1 \2'), + # Separate out symbols + (regex.compile(r'(\p{S})'), r' \1 '), + ] + + @lru_cache(maxsize=2**16) + def __call__(self, line: str) -> str: + for (_re, repl) in self._re: + line = _re.sub(repl, line) + + return ' '.join(line.split()) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py new file mode 100644 index 0000000000000000000000000000000000000000..2844c5fca61804a3749d70870a142d5cb497740a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py @@ -0,0 +1,52 @@ +from functools import lru_cache + +try: + import MeCab + import ipadic +except ImportError: + # Don't fail until the tokenizer is actually used + MeCab = None + +from .tokenizer_base import BaseTokenizer + +FAIL_MESSAGE = """ +Japanese tokenization requires extra dependencies, but you do not have them installed. +Please install them like so. + + pip install sacrebleu[ja] +""" + + +class TokenizerJaMecab(BaseTokenizer): + def __init__(self): + if MeCab is None: + raise RuntimeError(FAIL_MESSAGE) + self.tagger = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati") + + # make sure the dictionary is IPA + d = self.tagger.dictionary_info() + assert d.size == 392126, \ + "Please make sure to use the IPA dictionary for MeCab" + # This asserts that no user dictionary has been loaded + assert d.next is None + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """ + Tokenizes an Japanese input line using MeCab morphological analyzer. + + :param line: a segment to tokenize + :return: the tokenized line + """ + line = line.strip() + sentence = self.tagger.parse(line).strip() + return sentence + + def signature(self): + """ + Returns the MeCab parameters. + + :return: signature string + """ + signature = self.tagger.version() + "-IPA" + return 'ja-mecab-' + signature diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py new file mode 100644 index 0000000000000000000000000000000000000000..7d880428376d4b4733a55e16714c9b75e1d8c81a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py @@ -0,0 +1,52 @@ +from functools import lru_cache + +try: + import mecab_ko as MeCab + import mecab_ko_dic +except ImportError: + # Don't fail until the tokenizer is actually used + MeCab = None + +from .tokenizer_base import BaseTokenizer + +FAIL_MESSAGE = """ +Korean tokenization requires extra dependencies, but you do not have them installed. +Please install them like so. + + pip install sacrebleu[ko] +""" + + +class TokenizerKoMecab(BaseTokenizer): + def __init__(self): + if MeCab is None: + raise RuntimeError(FAIL_MESSAGE) + self.tagger = MeCab.Tagger(mecab_ko_dic.MECAB_ARGS + " -Owakati") + + # make sure the dictionary is mecab-ko-dic + d = self.tagger.dictionary_info() + assert d.size == 811795, \ + "Please make sure to use the mecab-ko-dic for MeCab-ko" + # This asserts that no user dictionary has been loaded + assert d.next is None + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """ + Tokenizes an Korean input line using MeCab-ko morphological analyzer. + + :param line: a segment to tokenize + :return: the tokenized line + """ + line = line.strip() + sentence = self.tagger.parse(line).strip() + return sentence + + def signature(self): + """ + Returns the MeCab-ko parameters. + + :return: signature string + """ + signature = self.tagger.version() + "-KO" + return 'ko-mecab-' + signature diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py new file mode 100644 index 0000000000000000000000000000000000000000..a204c0009f6920a587fbbf87ad611d546fbcffcd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py @@ -0,0 +1,10 @@ +from .tokenizer_base import BaseTokenizer + +class NoneTokenizer(BaseTokenizer): + """Don't apply any tokenization. Not recommended!.""" + + def signature(self): + return 'none' + + def __call__(self, line): + return line diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb67eb5126c4882d330387f662c0c865ce56f0c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py @@ -0,0 +1,38 @@ +from functools import lru_cache +import re + +from .tokenizer_base import BaseTokenizer + + +class TokenizerRegexp(BaseTokenizer): + + def signature(self): + return 're' + + def __init__(self): + self._re = [ + # language-dependent part (assuming Western languages) + (re.compile(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])'), r' \1 '), + # tokenize period and comma unless preceded by a digit + (re.compile(r'([^0-9])([\.,])'), r'\1 \2 '), + # tokenize period and comma unless followed by a digit + (re.compile(r'([\.,])([^0-9])'), r' \1 \2'), + # tokenize dash when preceded by a digit + (re.compile(r'([0-9])(-)'), r'\1 \2 '), + # one space only between words + # NOTE: Doing this in Python (below) is faster + # (re.compile(r'\s+'), r' '), + ] + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Common post-processing tokenizer for `13a` and `zh` tokenizers. + + :param line: a segment to tokenize + :return: the tokenized line + """ + for (_re, repl) in self._re: + line = _re.sub(repl, line) + + # no leading or trailing spaces, single space within words + return ' '.join(line.split()) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py new file mode 100644 index 0000000000000000000000000000000000000000..92729b5be351622ccfc23d25b7f85e6221b56dd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +import os +import logging + +from functools import lru_cache +from ..utils import SACREBLEU_DIR, download_file +from .tokenizer_base import BaseTokenizer + +sacrelogger = logging.getLogger('sacrebleu') + + +SPM_MODELS = { + "spm": { + "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model", + "signature": "flores101", + }, + # same as the default of "spm" + "flores101": { + "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model", + "signature": "flores101", + }, + "flores200": { + "url": "https://tinyurl.com/flores200sacrebleuspm", + "signature": "flores200", + }, +} + +class TokenizerSPM(BaseTokenizer): + def signature(self): + return self.name + + def __init__(self, key="spm"): + self.name = SPM_MODELS[key]["signature"] + + if key == "spm": + sacrelogger.warn("Tokenizer 'spm' has been changed to 'flores101', and may be removed in the future.") + + try: + import sentencepiece as spm + except (ImportError, ModuleNotFoundError): + raise ImportError( + '\n\nPlease install the sentencepiece library for SPM tokenization:' + '\n\n pip install sentencepiece ' + ) + self.sp = spm.SentencePieceProcessor() + + model_path = os.path.join(SACREBLEU_DIR, "models", os.path.basename(SPM_MODELS[key]["url"])) + if not os.path.exists(model_path): + url = SPM_MODELS[self.name]["url"] + download_file(url, model_path) + self.sp.Load(model_path) + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Tokenizes all the characters in the input line. + + :param line: a segment to tokenize + :return: the tokenized line + """ + return " ".join(self.sp.EncodeAsPieces(line)) + + +class Flores200Tokenizer(TokenizerSPM): + def __init__(self): + super().__init__("flores200") + +class Flores101Tokenizer(TokenizerSPM): + def __init__(self): + super().__init__("flores101") diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py new file mode 100644 index 0000000000000000000000000000000000000000..66c7cf238588bc9e3d0cc076ee3d4aa5566f1a96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py @@ -0,0 +1,171 @@ +# Copyright 2020 Memsource +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import re +from functools import lru_cache + +from .tokenizer_base import BaseTokenizer + + +def _normalize_general_and_western(sent: str) -> str: + # language-independent (general) part + + # strip end-of-line hyphenation and join lines + sent = re.sub(r"\n-", "", sent) + + # join lines + sent = re.sub(r"\n", " ", sent) + + # handle XML escaped symbols + sent = re.sub(r""", "\"", sent) + sent = re.sub(r"&", "&", sent) + sent = re.sub(r"<", "<", sent) + sent = re.sub(r">", ">", sent) + + # language-dependent (Western) part + sent = f" {sent} " + + # tokenize punctuation + sent = re.sub(r"([{-~[-` -&(-+:-@/])", r" \1 ", sent) + + # handle possesives + sent = re.sub(r"'s ", r" 's ", sent) + sent = re.sub(r"'s$", r" 's", sent) + + # tokenize period and comma unless preceded by a digit + sent = re.sub(r"([^0-9])([\.,])", r"\1 \2 ", sent) + + # tokenize period and comma unless followed by a digit + sent = re.sub(r"([\.,])([^0-9])", r" \1 \2", sent) + + # tokenize dash when preceded by a digit + sent = re.sub(r"([0-9])(-)", r"\1 \2 ", sent) + + return sent + + +def _normalize_asian(sent: str) -> str: + # Split Chinese chars and Japanese kanji down to character level + + # 4E00—9FFF CJK Unified Ideographs + # 3400—4DBF CJK Unified Ideographs Extension A + sent = re.sub(r"([\u4e00-\u9fff\u3400-\u4dbf])", r" \1 ", sent) + + # 31C0—31EF CJK Strokes + # 2E80—2EFF CJK Radicals Supplement + sent = re.sub(r"([\u31c0-\u31ef\u2e80-\u2eff])", r" \1 ", sent) + + # 3300—33FF CJK Compatibility + # F900—FAFF CJK Compatibility Ideographs + # FE30—FE4F CJK Compatibility Forms + sent = re.sub( + r"([\u3300-\u33ff\uf900-\ufaff\ufe30-\ufe4f])", r" \1 ", sent) + + # 3200—32FF Enclosed CJK Letters and Months + sent = re.sub(r"([\u3200-\u3f22])", r" \1 ", sent) + + # Split Hiragana, Katakana, and KatakanaPhoneticExtensions + # only when adjacent to something else + # 3040—309F Hiragana + # 30A0—30FF Katakana + # 31F0—31FF Katakana Phonetic Extensions + sent = re.sub( + r"(^|^[\u3040-\u309f])([\u3040-\u309f]+)(?=$|^[\u3040-\u309f])", + r"\1 \2 ", sent) + sent = re.sub( + r"(^|^[\u30a0-\u30ff])([\u30a0-\u30ff]+)(?=$|^[\u30a0-\u30ff])", + r"\1 \2 ", sent) + sent = re.sub( + r"(^|^[\u31f0-\u31ff])([\u31f0-\u31ff]+)(?=$|^[\u31f0-\u31ff])", + r"\1 \2 ", sent) + + sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r" \1 ", sent) + sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r" \1 ", sent) + return sent + + +def _remove_punct(sent: str) -> str: + return re.sub(r"[\.,\?:;!\"\(\)]", "", sent) + + +def _remove_asian_punct(sent: str) -> str: + sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r"", sent) + sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r"", sent) + return sent + + +class TercomTokenizer(BaseTokenizer): + """Re-implementation of Tercom Tokenizer in Python 3. + + See src/ter/core/Normalizer.java in https://github.com/jhclark/tercom + + Note that Python doesn't support named Unicode blocks so the mapping for + relevant blocks was taken from here: + + https://unicode-table.com/en/blocks/ + """ + ASIAN_PUNCT = r"([\u3001\u3002\u3008-\u3011\u3014-\u301f\uff61-\uff65\u30fb])" + FULL_WIDTH_PUNCT = r"([\uff0e\uff0c\uff1f\uff1a\uff1b\uff01\uff02\uff08\uff09])" + + def __init__(self, + normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False): + """Initialize the tokenizer. + + :param normalized: Enable character normalization. By default, normalizes a couple of things such as + newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When + 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e. + split them down to the character level. + :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical + punctuation markers in Asian languages (CJK). + :param asian_support: Enable special treatment of Asian characters. This option only has an effect when + 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK) + characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support', + specific unicode ranges for CJK and full-width punctuations are also removed. + :param case_sensitive: Enable case sensitivity, i.e., do not lower case data. + """ + self._normalized = normalized + self._no_punct = no_punct + self._asian_support = asian_support + self._case_sensitive = case_sensitive + + @lru_cache(maxsize=2**16) + # Although the cache is shared across different instances, same sentence + # queries do not return invalid returns across different instances since + # `self` becomes part of the query as well. + def __call__(self, sent: str) -> str: + if not sent: + return "" + + if not self._case_sensitive: + sent = sent.lower() + + if self._normalized: + sent = _normalize_general_and_western(sent) + if self._asian_support: + sent = _normalize_asian(sent) + + if self._no_punct: + sent = _remove_punct(sent) + if self._asian_support: + sent = _remove_asian_punct(sent) + + # Strip extra whitespaces + return ' '.join(sent.split()) + + def signature(self): + return 'tercom' diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec831aa34648006b52609ff03f049f9bafcb666 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py @@ -0,0 +1,119 @@ +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +############## + +# MIT License +# Copyright (c) 2017 - Shujian Huang + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Author: Shujian Huang huangsj@nju.edu.cn + + +from functools import lru_cache + +from .tokenizer_base import BaseTokenizer +from .tokenizer_re import TokenizerRegexp + +_UCODE_RANGES = [ + (u'\u3400', u'\u4db5'), # CJK Unified Ideographs Extension A, release 3.0 + (u'\u4e00', u'\u9fa5'), # CJK Unified Ideographs, release 1.1 + (u'\u9fa6', u'\u9fbb'), # CJK Unified Ideographs, release 4.1 + (u'\uf900', u'\ufa2d'), # CJK Compatibility Ideographs, release 1.1 + (u'\ufa30', u'\ufa6a'), # CJK Compatibility Ideographs, release 3.2 + (u'\ufa70', u'\ufad9'), # CJK Compatibility Ideographs, release 4.1 + (u'\u20000', u'\u2a6d6'), # (UTF16) CJK Unified Ideographs Extension B, release 3.1 + (u'\u2f800', u'\u2fa1d'), # (UTF16) CJK Compatibility Supplement, release 3.1 + (u'\uff00', u'\uffef'), # Full width ASCII, full width of English punctuation, + # half width Katakana, half wide half width kana, Korean alphabet + (u'\u2e80', u'\u2eff'), # CJK Radicals Supplement + (u'\u3000', u'\u303f'), # CJK punctuation mark + (u'\u31c0', u'\u31ef'), # CJK stroke + (u'\u2f00', u'\u2fdf'), # Kangxi Radicals + (u'\u2ff0', u'\u2fff'), # Chinese character structure + (u'\u3100', u'\u312f'), # Phonetic symbols + (u'\u31a0', u'\u31bf'), # Phonetic symbols (Taiwanese and Hakka expansion) + (u'\ufe10', u'\ufe1f'), + (u'\ufe30', u'\ufe4f'), + (u'\u2600', u'\u26ff'), + (u'\u2700', u'\u27bf'), + (u'\u3200', u'\u32ff'), + (u'\u3300', u'\u33ff'), +] + + +class TokenizerZh(BaseTokenizer): + + def signature(self): + return 'zh' + + def __init__(self): + self._post_tokenizer = TokenizerRegexp() + + @staticmethod + @lru_cache(maxsize=2**16) + def _is_chinese_char(uchar): + """ + :param uchar: input char in unicode + :return: whether the input char is a Chinese character. + """ + for start, end in _UCODE_RANGES: + if start <= uchar <= end: + return True + return False + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """The tokenization of Chinese text in this script contains two + steps: separate each Chinese characters (by utf-8 encoding); tokenize + the non Chinese part (following the `13a` i.e. mteval tokenizer). + + Author: Shujian Huang huangsj@nju.edu.cn + + :param line: input sentence + :return: tokenized sentence + """ + + line = line.strip() + line_in_chars = "" + + # TODO: the below code could probably be replaced with the following: + # @ozan: Gives slightly different scores, need to investigate + # import regex + # line = regex.sub(r'(\p{Han})', r' \1 ', line) + for char in line: + if self._is_chinese_char(char): + line_in_chars += " " + line_in_chars += char + line_in_chars += " " + else: + line_in_chars += char + + return self._post_tokenizer(line_in_chars) diff --git a/llmeval-env/lib/python3.10/site-packages/triton/__init__.py b/llmeval-env/lib/python3.10/site-packages/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..76299bdeeb30aea0158c1085ec41200907f5a718 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/__init__.py @@ -0,0 +1,69 @@ +"""isort:skip_file""" +__version__ = '2.3.0' + +# --------------------------------------- +# Note: import order is significant here. + +# submodules +from .runtime import ( + autotune, + Config, + heuristics, + JITFunction, + KernelInterface, + reinterpret, + TensorWrapper, + OutOfResources, + MockTensor, +) +from .runtime.jit import jit +from .compiler import compile, CompilationError + +from . import language +from . import testing +from . import tools + +__all__ = [ + "autotune", + "cdiv", + "CompilationError", + "compile", + "Config", + "heuristics", + "impl", + "jit", + "JITFunction", + "KernelInterface", + "language", + "MockTensor", + "next_power_of_2", + "ops", + "OutOfResources", + "reinterpret", + "runtime", + "TensorWrapper", + "testing", + "tools", +] + +# ------------------------------------- +# misc. utilities that don't fit well +# into any specific module +# ------------------------------------- + + +def cdiv(x: int, y: int): + return (x + y - 1) // y + + +def next_power_of_2(n: int): + """Return the smallest power of 2 greater than or equal to n""" + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + n += 1 + return n diff --git a/llmeval-env/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a33c5de7fa02f2c69f5e7cb7165ab55ed2621945 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ae26ffaf640cd091fc526634b6e577ff827189f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/common/__init__.py b/llmeval-env/lib/python3.10/site-packages/triton/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfb6f8870e48aff3f0e195ba7820fa9d68235fb2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/common/__init__.py @@ -0,0 +1,3 @@ +from .build import _build, cuda_include_dir, libcuda_dirs + +__all__ = ["_build", "libcuda_dirs", "cuda_include_dir"] diff --git a/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c325647c51636f5aec102fd860e0731894b749c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5b1af64775fe4e911001743533d8c40ad3aa845 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/build.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00f5f82163220fb483830e5ec0240958872ad07f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/common/__pycache__/build.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/common/backend.py b/llmeval-env/lib/python3.10/site-packages/triton/common/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..fffbf600af5f8e2b20b54d46b0d1c451ad7d3c4c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/common/backend.py @@ -0,0 +1,183 @@ +import functools +import hashlib +import importlib +import importlib.util +import os +import re +import subprocess +import traceback +from typing import Dict + +from ..runtime.driver import DriverBase + +TRITON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +TRITON_VERSION = "2.3.0" + + +class BaseBackend: + + def __init__(self, device_type: str) -> None: + self.device_type = device_type + + def add_stages(self, arch, extern_libs, stages): + """ + Custom the arch, extern_libs and stages per backend specific requirement + """ + raise NotImplementedError + + def add_meta_info(self, ir, cur_module, next_module, metadata, asm): + """ + Custom the ir, module, metadata and asm per backend specific requirement + """ + raise NotImplementedError + + def get_load_binary_fn(self): + """ + Return a callable to load binary + """ + raise NotImplementedError + + def get_driver(self) -> DriverBase: + """ + Get the backend driver. Please refer to "DriverBase" for more details + """ + raise NotImplementedError + + def get_stream(self): + """ + Get stream for current device + """ + raise NotImplementedError + + def get_device_properties(self, device): + raise NotImplementedError + + def get_current_device(self): + """ + Get current device + """ + raise NotImplementedError + + def set_current_device(self, device): + """ + Set current device as the given device + """ + raise NotImplementedError + + def get_kernel_bin(self): + raise NotImplementedError + + def make_launcher_stub(self, name, signature, constants): + """ + Generate the launcher stub to launch the kernel + """ + raise NotImplementedError + + def get_architecture_descriptor(self, **kwargs): + """ + Get the architecture descriptor the backend + """ + raise NotImplementedError + + @classmethod + def create_backend(cls, device_type: str): + return cls(device_type) + + +_backends: Dict[str, BaseBackend] = {} + + +def register_backend(device_type: str, backend_cls: type): + if device_type not in _backends: + _backends[device_type] = backend_cls.create_backend(device_type) + + +def get_backend(device_type: str): + if device_type not in _backends: + device_backend_package_name = f"...third_party.{device_type}" + if importlib.util.find_spec(device_backend_package_name, package=__spec__.name): + try: + importlib.import_module(device_backend_package_name, package=__spec__.name) + except Exception: + traceback.print_exc() + else: + return None + return _backends[device_type] if device_type in _backends else None + + +def _path_to_binary(binary: str): + base_dir = os.path.join(os.path.dirname(__file__), os.pardir) + paths = [ + os.environ.get(f"TRITON_{binary.upper()}_PATH", ""), + os.path.join(base_dir, "third_party", "cuda", "bin", binary) + ] + + for p in paths: + bin = p.split(" ")[0] + if os.path.exists(bin) and os.path.isfile(bin): + result = subprocess.check_output([bin, "--version"], stderr=subprocess.STDOUT) + if result is not None: + version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE) + if version is not None: + return p, version.group(1) + raise RuntimeError(f"Cannot find {binary}") + + +@functools.lru_cache() +def path_to_ptxas(): + return _path_to_binary("ptxas") + + +@functools.lru_cache() +def path_to_cuobjdump(): + return _path_to_binary("cuobjdump") + + +@functools.lru_cache() +def path_to_nvdisasm(): + return _path_to_binary("nvdisasm") + + +@functools.lru_cache() +def compute_core_version_key(): + import pkgutil + contents = [] + # frontend + with open(__file__, "rb") as f: + contents += [hashlib.sha1(f.read()).hexdigest()] + # compiler + compiler_path = os.path.join(TRITON_PATH, 'compiler') + for lib in pkgutil.iter_modules([compiler_path]): + with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: + contents += [hashlib.sha1(f.read()).hexdigest()] + # backend + libtriton_hash = hashlib.sha1() + with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f: + while True: + chunk = f.read(1024**2) + if not chunk: + break + libtriton_hash.update(chunk) + contents.append(libtriton_hash.hexdigest()) + # language + language_path = os.path.join(TRITON_PATH, 'language') + for lib in pkgutil.iter_modules([language_path]): + with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: + contents += [hashlib.sha1(f.read()).hexdigest()] + return '-'.join(TRITON_VERSION) + '-'.join(contents) + + +_cached_cuda_version_key = None + + +def get_cuda_version_key(): + global _cached_cuda_version_key + if _cached_cuda_version_key is None: + key = compute_core_version_key() + try: + ptxas = path_to_ptxas()[0] + ptxas_version = subprocess.check_output([ptxas, "--version"]) + except RuntimeError: + ptxas_version = b"NO_PTXAS" + _cached_cuda_version_key = key + '-' + hashlib.sha1(ptxas_version).hexdigest() + return _cached_cuda_version_key diff --git a/llmeval-env/lib/python3.10/site-packages/triton/common/build.py b/llmeval-env/lib/python3.10/site-packages/triton/common/build.py new file mode 100644 index 0000000000000000000000000000000000000000..4153272a267cbf346b4db436ec230a2181923490 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/common/build.py @@ -0,0 +1,140 @@ +import contextlib +import functools +import io +import os +import shutil +import subprocess +import sys +import sysconfig + +import setuptools + + +# TODO: is_hip shouldn't be here +def is_hip(): + import torch + return torch.version.hip is not None + + +@functools.lru_cache() +def libcuda_dirs(): + env_libcuda_path = os.getenv("TRITON_LIBCUDA_PATH") + if env_libcuda_path: + return [env_libcuda_path] + + libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode() + # each line looks like the following: + # libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1 + locs = [line.split()[-1] for line in libs.splitlines() if "libcuda.so" in line] + dirs = [os.path.dirname(loc) for loc in locs] + env_ld_library_path = os.getenv("LD_LIBRARY_PATH") + if env_ld_library_path and not dirs: + dirs = [dir for dir in env_ld_library_path.split(":") if os.path.exists(os.path.join(dir, "libcuda.so"))] + msg = 'libcuda.so cannot found!\n' + if locs: + msg += 'Possible files are located at %s.' % str(locs) + msg += 'Please create a symlink of libcuda.so to any of the file.' + else: + msg += 'Please make sure GPU is setup and then run "/sbin/ldconfig"' + msg += ' (requires sudo) to refresh the linker cache.' + assert any(os.path.exists(os.path.join(path, 'libcuda.so')) for path in dirs), msg + return dirs + + +@functools.lru_cache() +def rocm_path_dir(): + return os.getenv("ROCM_PATH", default="/opt/rocm") + + +@contextlib.contextmanager +def quiet(): + old_stdout, old_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = io.StringIO(), io.StringIO() + try: + yield + finally: + sys.stdout, sys.stderr = old_stdout, old_stderr + + +@functools.lru_cache() +def cuda_include_dir(): + base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir) + cuda_path = os.path.join(base_dir, "third_party", "cuda") + return os.path.join(cuda_path, "include") + + +def _build(name, src, srcdir): + if is_hip(): + hip_lib_dir = os.path.join(rocm_path_dir(), "lib") + hip_include_dir = os.path.join(rocm_path_dir(), "include") + else: + cuda_lib_dirs = libcuda_dirs() + cu_include_dir = cuda_include_dir() + suffix = sysconfig.get_config_var('EXT_SUFFIX') + so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix)) + # try to avoid setuptools if possible + cc = os.environ.get("CC") + if cc is None: + # TODO: support more things here. + clang = shutil.which("clang") + gcc = shutil.which("gcc") + cc = gcc if gcc is not None else clang + if cc is None: + raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.") + # This function was renamed and made public in Python 3.10 + if hasattr(sysconfig, 'get_default_scheme'): + scheme = sysconfig.get_default_scheme() + else: + scheme = sysconfig._get_default_scheme() + # 'posix_local' is a custom scheme on Debian. However, starting Python 3.10, the default install + # path changes to include 'local'. This change is required to use triton with system-wide python. + if scheme == 'posix_local': + scheme = 'posix_prefix' + py_include_dir = sysconfig.get_paths(scheme=scheme)["include"] + + if is_hip(): + ret = subprocess.check_call([ + cc, src, f"-I{hip_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", + f"-L{hip_lib_dir}", "-lamdhip64", "-o", so + ]) + else: + cc_cmd = [ + cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", + "-o", so + ] + cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs] + ret = subprocess.check_call(cc_cmd) + + if ret == 0: + return so + # fallback on setuptools + extra_compile_args = [] + library_dirs = cuda_lib_dirs + include_dirs = [srcdir, cu_include_dir] + libraries = ['cuda'] + # extra arguments + extra_link_args = [] + # create extension module + ext = setuptools.Extension( + name=name, + language='c', + sources=[src], + include_dirs=include_dirs, + extra_compile_args=extra_compile_args + ['-O3'], + extra_link_args=extra_link_args, + library_dirs=library_dirs, + libraries=libraries, + ) + # build extension module + args = ['build_ext'] + args.append('--build-temp=' + srcdir) + args.append('--build-lib=' + srcdir) + args.append('-q') + args = dict( + name=name, + ext_modules=[ext], + script_args=args, + ) + with quiet(): + setuptools.setup(**args) + return so diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__init__.py b/llmeval-env/lib/python3.10/site-packages/triton/language/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b5cfac6290430ffb267084f79e82718dd71123 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/language/__init__.py @@ -0,0 +1,218 @@ +"""isort:skip_file""" +# Import order is significant here. + +from . import math +from . import extra +from .standard import ( + argmax, + argmin, + cdiv, + cumprod, + cumsum, + max, + maximum, + min, + minimum, + sigmoid, + softmax, + sort, + sum, + ravel, + swizzle2d, + xor_sum, + zeros, + zeros_like, +) +from .core import ( + TRITON_MAX_TENSOR_NUMEL, + abs, + advance, + arange, + associative_scan, + atomic_add, + atomic_and, + atomic_cas, + atomic_max, + atomic_min, + atomic_or, + atomic_xchg, + atomic_xor, + bfloat16, + block_type, + broadcast, + broadcast_to, + cat, + constexpr, + cos, + debug_barrier, + device_assert, + device_print, + dot, + dtype, + exp, + expand_dims, + full, + fdiv, + float16, + float32, + float64, + float8e4b15, + float8e4b15x4, + float8e4nv, + float8e5, + function_type, + inline_asm_elementwise, + int1, + int16, + int32, + int64, + int8, + load, + log, + make_block_ptr, + max_constancy, + max_contiguous, + multiple_of, + num_programs, + pi32_t, + pointer_type, + program_id, + reduce, + reshape, + sin, + sqrt, + static_assert, + static_print, + store, + static_range, + tensor, + trans, + # triton, + uint16, + uint32, + uint64, + uint8, + umulhi, + view, + void, + where, +) +from .random import ( + pair_uniform_to_normal, + philox, + philox_impl, + rand, + rand4x, + randint, + randint4x, + randn, + randn4x, + uint_to_uniform_float, +) + +__all__ = [ + "TRITON_MAX_TENSOR_NUMEL", + "abs", + "advance", + "arange", + "argmin", + "argmax", + "associative_scan", + "atomic_add", + "atomic_and", + "atomic_cas", + "atomic_max", + "atomic_min", + "atomic_or", + "atomic_xchg", + "atomic_xor", + "bfloat16", + "block_type", + "broadcast", + "broadcast_to", + "builtin", + "cat", + "cdiv", + "constexpr", + "cos", + "cumprod", + "cumsum", + "debug_barrier", + "device_assert", + "device_print", + "dot", + "dtype", + "exp", + "expand_dims", + "extra", + "fdiv", + "float16", + "float32", + "float64", + "float8e4b15", + "float8e4b15x4", + "float8e4nv", + "float8e5", + "full", + "function_type", + "inline_asm_elementwise", + "int1", + "int16", + "int32", + "int64", + "int8", + "ir", + "math", + "load", + "log", + "make_block_ptr", + "max", + "max_constancy", + "max_contiguous", + "maximum", + "min", + "minimum", + "multiple_of", + "num_programs", + "pair_uniform_to_normal", + "philox", + "philox_impl", + "pi32_t", + "pointer_type", + "program_id", + "rand", + "rand4x", + "randint", + "randint4x", + "randn", + "randn4x", + "ravel", + "reduce", + "reshape", + "sigmoid", + "sin", + "softmax", + "sort", + "sqrt", + "static_range", + "static_assert", + "static_print", + "store", + "sum", + "swizzle2d", + "tensor", + "trans", + "triton", + "uint16", + "uint32", + "uint_to_uniform_float", + "uint64", + "uint8", + "umulhi", + "view", + "void", + "where", + "xor_sum", + "zeros", + "zeros_like", +] diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34ec161fcf807a11ea69200d3d1eaf2563e8a5ee Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2398afc39d0a50df2d14d7c929e4dd1d9cfbfaf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db746849e3c7d7b0b3a39975ca6da979f517a144 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97df7e6b401d54042032ee987ac72a8644baa1f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4cfc83869bbcae84b51e77c6b7e6f4346f04593 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a363c38c5fffc998b18a554c04982f4111922880 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/core.py b/llmeval-env/lib/python3.10/site-packages/triton/language/core.py new file mode 100644 index 0000000000000000000000000000000000000000..a60a9b7bc83263e5cda4f02f6ba0cfbfbf429540 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/language/core.py @@ -0,0 +1,1883 @@ +from __future__ import annotations + +from contextlib import contextmanager +from enum import Enum +from functools import partial, wraps +from typing import Callable, List, Sequence, TypeVar + +from .._C.libtriton.triton import ir +from . import semantic + +T = TypeVar('T') + +TRITON_MAX_TENSOR_NUMEL = 1048576 + +TRITON_BUILTIN = "__triton_builtin__" + + +def builtin(fn: T) -> T: + """Mark a function as a builtin.""" + assert callable(fn) + + @wraps(fn) + def wrapper(*args, **kwargs): + if "_builder" not in kwargs or kwargs["_builder"] is None: + raise ValueError("Did you forget to add @triton.jit ? " + "(`_builder` argument must be provided outside of JIT functions.)") + return fn(*args, **kwargs) + + setattr(wrapper, TRITON_BUILTIN, True) + + return wrapper + + +def is_builtin(fn) -> bool: + """Is this a registered triton builtin function?""" + return getattr(fn, TRITON_BUILTIN, False) + + +def _to_tensor(x, builder): + if isinstance(x, bool): + return tensor(builder.get_int1(x), int1) + # Note: compile-time const integers are represented by unsigned values + elif isinstance(x, int): + if -2**31 <= x < 2**31: + return tensor(builder.get_int32(x), int32) + elif 2**31 <= x < 2**32: + return tensor(builder.get_uint32(x), uint32) + elif -2**63 <= x < 2**63: + return tensor(builder.get_int64(x), int64) + elif 2**63 <= x < 2**64: + return tensor(builder.get_uint64(x), uint64) + else: + raise RuntimeError(f'Nonrepresentable integer {x}.') + elif isinstance(x, float): + min_float32 = 2**-126 + max_float32 = (2 - 2**-23) * 2**127 + abs_x = __builtins__['abs'](x) + if abs_x == float("inf") or\ + abs_x == 0.0 or \ + x != x or \ + min_float32 <= abs_x <= max_float32: + return tensor(builder.get_fp32(x), float32) + else: + return tensor(builder.get_fp64(x), float64) + + elif isinstance(x, constexpr): + return _to_tensor(x.value, builder) + elif isinstance(x, tensor): + return x + assert False, f"cannot convert {x} of type {type(x)} to tensor" + + +class dtype: + SINT_TYPES = ['int8', 'int16', 'int32', 'int64'] + UINT_TYPES = ['int1', 'uint8', 'uint16', 'uint32', 'uint64'] + FP_TYPES = ['fp8e4b15', 'fp8e4b15x4', 'fp8e4nv', 'fp8e5', 'fp16', 'bf16', 'fp32', 'fp64'] + STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64'] + OTHER_TYPES = ['void'] + + class SIGNEDNESS(Enum): + SIGNED = 0 + UNSIGNED = 1 + + def __init__(self, name): + self.name = name + assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name + if name in dtype.SINT_TYPES: + self.int_signedness = dtype.SIGNEDNESS.SIGNED + self.int_bitwidth = int(name.split('int')[-1]) + self.primitive_bitwidth = self.int_bitwidth + elif name in dtype.UINT_TYPES: + self.int_signedness = dtype.SIGNEDNESS.UNSIGNED + self.int_bitwidth = int(name.split('int')[-1]) + self.primitive_bitwidth = self.int_bitwidth + elif name in dtype.FP_TYPES: + if name == 'fp8e4b15': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp8e4b15x4': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp8e4nv': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 7 + elif name == 'fp8e5': + self.fp_mantissa_width = 2 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp16': + self.fp_mantissa_width = 10 + self.primitive_bitwidth = 16 + self.exponent_bias = 15 + elif name == 'bf16': + self.fp_mantissa_width = 7 + self.primitive_bitwidth = 16 + self.exponent_bias = 127 + elif name == 'fp32': + self.fp_mantissa_width = 23 + self.primitive_bitwidth = 32 + self.exponent_bias = 127 + elif name == 'fp64': + self.fp_mantissa_width = 53 + self.primitive_bitwidth = 64 + self.exponent_bias = 1023 + else: + raise RuntimeError(f'Unsupported floating-point type {name}') + elif name == 'void': + self.primitive_bitwidth = 0 + + def is_fp8(self): + return 'fp8' in self.name + + def is_fp8e4nv(self): + return self.name == 'fp8e4nv' + + def is_fp8e4b15(self): + return self.name == 'fp8e4b15' + + def is_fp8e4b15x4(self): + return self.name == 'fp8e4b15x4' + + def is_fp8e5(self): + return self.name == 'fp8e5' + + def is_fp16(self): + return self.name == 'fp16' + + def is_bf16(self): + return self.name == 'bf16' + + def is_fp32(self): + return self.name == 'fp32' + + def is_fp64(self): + return self.name == 'fp64' + + def is_int1(self): + return self.name == 'int1' + + def is_int8(self): + return self.name == 'int8' + + def is_int16(self): + return self.name == 'int16' + + def is_int32(self): + return self.name == 'int32' + + def is_int64(self): + return self.name == 'int64' + + def is_uint8(self): + return self.name == 'uint8' + + def is_uint16(self): + return self.name == 'uint16' + + def is_uint32(self): + return self.name == 'uint32' + + def is_uint64(self): + return self.name == 'uint64' + + def is_floating(self): + return self.name in dtype.FP_TYPES + + def is_standard_floating(self): + return self.name in dtype.STANDARD_FP_TYPES + + def is_int_signed(self): + return self.name in dtype.SINT_TYPES + + def is_int_unsigned(self): + return self.name in dtype.UINT_TYPES + + def is_int(self): + return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES + + def is_bool(self): + return self.is_int1() + + @staticmethod + def is_dtype(type_str): + return type_str in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES + + @staticmethod + def is_void(): + raise RuntimeError("Not implemented") + + @staticmethod + def is_block(): + return False + + @staticmethod + def is_ptr(): + return False + + def __eq__(self, other: dtype): + if not isinstance(other, dtype): + return False + return self.name == other.name + + def __ne__(self, other: dtype): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.name, )) + + @property + def scalar(self): + return self + + def to_ir(self, builder: ir.builder) -> ir.type: + if self.name == 'void': + return builder.get_void_ty() + elif self.name == 'int1': + return builder.get_int1_ty() + elif self.name in ('int8', 'uint8'): + return builder.get_int8_ty() + elif self.name in ('int16', 'uint16'): + return builder.get_int16_ty() + elif self.name in ('int32', 'uint32'): + return builder.get_int32_ty() + elif self.name in ('int64', 'uint64'): + return builder.get_int64_ty() + elif self.name == 'fp8e5': + return builder.get_fp8e5_ty() + elif self.name == 'fp8e4nv': + return builder.get_fp8e4nv_ty() + elif self.name == 'fp8e4b15': + return builder.get_fp8e4b15_ty() + elif self.name == 'fp8e4b15x4': + return builder.get_fp8e4b15x4_ty() + elif self.name == 'fp16': + return builder.get_half_ty() + elif self.name == 'bf16': + return builder.get_bf16_ty() + elif self.name == 'fp32': + return builder.get_float_ty() + elif self.name == 'fp64': + return builder.get_double_ty() + raise ValueError(f'fail to convert {self} to ir type') + + def __str__(self): + return self.name + + @property + def cache_key_part(self) -> str: + """See cache_key_part() in triton.cc.""" + return self.name + + def __repr__(self): + return f'triton.language.{str(self)}' + + +class pointer_type(dtype): + + def __init__(self, element_ty: dtype, address_space: int = 1): + if not isinstance(element_ty, dtype): + raise TypeError('element_ty is a {type(element_ty).__name__}.') + self.element_ty = element_ty + self.address_space = address_space + + self.name = self.__str__() + + def to_ir(self, builder: ir.builder) -> ir.pointer_type: + return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1) + + def __str__(self): + return f'pointer<{self.element_ty}>' + + def __repr__(self): + return self.__str__() + + def is_ptr(self): + return True + + def __eq__(self, other: pointer_type) -> bool: + if not isinstance(other, pointer_type): + return False + return self.element_ty == other.element_ty and self.address_space == other.address_space + + def __ne__(self, other: pointer_type) -> bool: + return not self.__eq__(other) + + @property + def scalar(self): + return self + + +class block_type(dtype): + + def __init__(self, element_ty: dtype, shape: List): + self.element_ty = element_ty + + # Note that block_type's shape is a list of int + # while tensor's shape is a list of constexpr. + + # shape can be empty ([]) when an input is a 0D tensor. + if not shape: + raise TypeError('0d block_type is forbidden') + if isinstance(shape[0], constexpr): + shape = [s.value for s in shape] + + self.shape = shape + self.numel = 1 + for s in self.shape: + self.numel *= s + if self.numel > TRITON_MAX_TENSOR_NUMEL: + raise ValueError(f"numel ({self.numel}) exceeds triton maximum tensor numel ({TRITON_MAX_TENSOR_NUMEL})") + + self.name = self.__str__() + + def to_ir(self, builder: ir.builder) -> ir.block_type: + return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape) + + def __str__(self): + return f'<{self.shape}, {self.element_ty}>' + + def __repr__(self): + return self.__str__() + + def is_block(self): + return True + + def get_block_shapes(self) -> List[int]: + return self.shape + + def __eq__(self, other: block_type) -> bool: + if not isinstance(other, block_type): + return False + return self.element_ty == other.element_ty and self.shape == other.shape + + def __ne__(self, other: block_type) -> bool: + return not self.__eq__(other) + + @property + def scalar(self): + return self.element_ty + + +class function_type(dtype): + + def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None: + self.ret_types = ret_types + self.param_types = param_types + + def __str__(self): + return f'fn ({self.param_types}) -> {self.ret_types}' + + def to_ir(self, builder: ir.builder): + ir_param_types = [ty.to_ir(builder) for ty in self.param_types] + ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types] + return builder.get_function_ty(ir_param_types, ret_types) + + +# scalar types +void = dtype('void') +int1 = dtype('int1') +int8 = dtype('int8') +int16 = dtype('int16') +int32 = dtype('int32') +int64 = dtype('int64') +uint8 = dtype('uint8') +uint16 = dtype('uint16') +uint32 = dtype('uint32') +uint64 = dtype('uint64') +float8e5 = dtype('fp8e5') +float8e4nv = dtype('fp8e4nv') +float8e4b15 = dtype('fp8e4b15') +float8e4b15x4 = dtype('fp8e4b15x4') +float16 = dtype('fp16') +bfloat16 = dtype('bf16') +float32 = dtype('fp32') +float64 = dtype('fp64') +# pointer types +pi32_t = pointer_type(int32) + +# ----------------------- +# constexpr +# ----------------------- + + +class constexpr: + """ + This class is used to store a value that is known at compile-time. + """ + + def __init__(self, value): + if isinstance(value, constexpr): + self.value = value.value + else: + self.value = value + + def __repr__(self) -> str: + return f"constexpr[{self.value}]" + + def __index__(self): + return self.value + + def __add__(self, other): + return constexpr(self.value + other.value) + + def __radd__(self, other): + return constexpr(other.value + self.value) + + def __sub__(self, other): + return constexpr(self.value - other.value) + + def __rsub__(self, other): + return constexpr(other.value - self.value) + + def __mul__(self, other): + return constexpr(self.value * other.value) + + def __mod__(self, other): + return constexpr(self.value % other.value) + + def __rmul__(self, other): + return constexpr(other.value * self.value) + + def __truediv__(self, other): + return constexpr(self.value / other.value) + + def __rtruediv__(self, other): + return constexpr(other.value / self.value) + + def __floordiv__(self, other): + return constexpr(self.value // other.value) + + def __rfloordiv__(self, other): + return constexpr(other.value // self.value) + + def __gt__(self, other): + return constexpr(self.value > other.value) + + def __rgt__(self, other): + return constexpr(other.value > self.value) + + def __ge__(self, other): + return constexpr(self.value >= other.value) + + def __rge__(self, other): + return constexpr(other.value >= self.value) + + def __lt__(self, other): + return constexpr(self.value < other.value) + + def __rlt__(self, other): + return constexpr(other.value < self.value) + + def __le__(self, other): + return constexpr(self.value <= other.value) + + def __rle__(self, other): + return constexpr(other.value <= self.value) + + def __eq__(self, other): + return constexpr(self.value == other.value) + + def __ne__(self, other): + return constexpr(self.value != other.value) + + def __bool__(self): + return bool(self.value) + + def __neg__(self): + return constexpr(-self.value) + + def __and__(self, other): + return constexpr(self.value & other.value) + + def logical_and(self, other): + return constexpr(self.value and other.value) + + def __or__(self, other): + return constexpr(self.value | other.value) + + def __xor__(self, other): + return constexpr(self.value ^ other.value) + + def logical_or(self, other): + return constexpr(self.value or other.value) + + def __pos__(self): + return constexpr(+self.value) + + def __invert__(self): + return constexpr(~self.value) + + def __pow__(self, other): + return constexpr(self.value**other.value) + + def __rshift__(self, other): + return constexpr(self.value >> other.value) + + def __lshift__(self, other): + return constexpr(self.value << other.value) + + def __not__(self): + return constexpr(not self.value) + + def __call__(self, *args, **kwds): + return self.value(*args, **kwds) + + +class tensor: + + def __init__(self, handle, type: dtype): + # IR handle + self.handle = handle + # Block shape + self.shape = type.shape if type.is_block() else () + self.numel = 1 + for s in self.shape: + self.numel *= s + self.numel = constexpr(self.numel) + self.type = type # Tensor type (can be block_type) + # Following the practice in pytorch, dtype is scalar type + self.dtype = type.scalar + self.shape = [constexpr(s) for s in self.shape] + + def __str__(self) -> str: + # ex. "float32[16, 32]" + return str(self.dtype) + '[' + ', '.join(str(s) for s in self.shape) + ']' + + @builtin + def __add__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.add(self, other, _builder) + + @builtin + def __radd__(self, other, _builder=None): + return self.__add__(other, _builder=_builder) + + @builtin + def __sub__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.sub(self, other, _builder) + + @builtin + def __rsub__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.sub(other, self, _builder) + + @builtin + def __mul__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mul(self, other, _builder) + + @builtin + def __rmul__(self, other, _builder=None): + return self.__mul__(other, _builder=_builder) + + @builtin + def __truediv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.truediv(self, other, _builder) + + @builtin + def __rtruediv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.truediv(other, self, _builder) + + @builtin + def __floordiv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.floordiv(self, other, _builder) + + @builtin + def __rfloordiv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.floordiv(other, self, _builder) + + @builtin + def __mod__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mod(self, other, _builder) + + @builtin + def __rmod__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mod(other, self, _builder) + + # unary operators + @builtin + def __neg__(self, _builder=None): + return semantic.minus(self, _builder) + + @builtin + def __invert__(self, _builder=None): + return semantic.invert(self, _builder) + + # bitwise operators + + @builtin + def __and__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.and_(self, other, _builder) + + @builtin + def __rand__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.and_(other, self, _builder) + + @builtin + def __or__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.or_(self, other, _builder) + + @builtin + def __ror__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.or_(other, self, _builder) + + @builtin + def __xor__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.xor_(self, other, _builder) + + @builtin + def __rxor__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.xor_(other, self, _builder) + + @builtin + def __lshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.shl(self, other, _builder) + + @builtin + def __rlshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.shl(other, self, _builder) + + @builtin + def __rshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + if self.dtype.is_int_signed(): + return semantic.ashr(self, other, _builder) + else: + return semantic.lshr(self, other, _builder) + + @builtin + def __rrshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + if self.dtype.is_int_signed(): + return semantic.ashr(other, self, _builder) + else: + return semantic.lshr(other, self, _builder) + + # > + @builtin + def __gt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_than(self, other, _builder) + + @builtin + def __rgt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_than(other, self, _builder) + + # >= + @builtin + def __ge__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_equal(self, other, _builder) + + @builtin + def __rge__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_equal(other, self, _builder) + + # < + @builtin + def __lt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_than(self, other, _builder) + + @builtin + def __rlt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_than(other, self, _builder) + + # <= + @builtin + def __le__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_equal(self, other, _builder) + + @builtin + def __rle__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_equal(other, self, _builder) + + # == + @builtin + def __eq__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.equal(self, other, _builder) + + @builtin + def __req__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.equal(other, self, _builder) + + @builtin + def __ne__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.not_equal(self, other, _builder) + + @builtin + def __rne__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.not_equal(other, self, _builder) + + @builtin + def logical_and(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_and(self, other, _builder) + + @builtin + def logical_or(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_or(self, other, _builder) + + # note: __not__ isn't actually a magic method in python + # but it's ok because our ASTVisitor handles it + @builtin + def __not__(self, _builder=None): + return semantic.not_(self, _builder) + + @builtin + def __getitem__(self, slices, _builder=None): + if isinstance(slices, (slice, constexpr)): + slices = [slices] + ret = self + for dim, sl in enumerate(slices): + if sl is None or isinstance(sl, constexpr) and sl.value is None: + ret = semantic.expand_dims(ret, dim, _builder) + elif isinstance(sl, slice) and sl.start is None and sl.stop is None and sl.step is None: + pass + else: + assert False, f"unsupported tensor index: {sl}" + return ret + + @property + def T(self): + assert False, "Transposition must be created by the AST Visitor" + + @builtin + def to(self, dtype, bitcast=False, _builder=None): + if isinstance(bitcast, constexpr): + bitcast = bitcast.value + if bitcast: + return semantic.bitcast(self, dtype, _builder) + return semantic.cast(self, dtype, _builder) + + +# ----------------------- +# SPMD Programming Model +# ----------------------- +def _constexpr_to_value(v): + if isinstance(v, constexpr): + return v.value + return v + + +@builtin +def program_id(axis, _builder=None): + """ + Returns the id of the current program instance along the given :code:`axis`. + + :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. + :type axis: int + """ + # if axis == -1: + # pid0 = program_id(0, _builder) + # pid1 = program_id(1, _builder) + # pid2 = program_id(2, _builder) + # npg0 = num_programs(0, _builder) + # npg1 = num_programs(0, _builder) + # return pid0 + pid1*npg0 + pid2*npg0*npg1 + axis = _constexpr_to_value(axis) + return semantic.program_id(axis, _builder) + + +@builtin +def num_programs(axis, _builder=None): + """ + Returns the number of program instances launched along the given :code:`axis`. + + :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. + :type axis: int + """ + axis = _constexpr_to_value(axis) + return semantic.num_programs(axis, _builder) + + +# ----------------------- +# Block Initialization +# ----------------------- + + +@builtin +def arange(start, end, _builder=None): + """ + Returns contiguous values within the left-closed and right-open interval [:code:`start`, :code:`end`). \ + End - Start must be less than or equal to TRITON_MAX_TENSOR_NUMEL = 131072 + + :param start: Start of the interval. Must be a power of two. + :type start: int32 + :param end: End of the interval. Must be a power of two > start. + :type end: int32 + """ + start = _constexpr_to_value(start) + end = _constexpr_to_value(end) + return semantic.arange(start, end, _builder) + + +def _shape_check_impl(shape): + shape = _constexpr_to_value(shape) + for i, d in enumerate(shape): + if isinstance(d, int): + d = constexpr(d) + if not isinstance(d, constexpr): + raise TypeError(f"Shape element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + return [_constexpr_to_value(x) for x in shape] + + +@builtin +def full(shape, value, dtype, _builder=None): + """ + Returns a tensor filled with the scalar value for the given :code:`shape` and :code:`dtype`. + + :param shape: Shape of the new array, e.g., (8, 16) or (8, ) + :value value: A scalar value to fill the array with + :type shape: tuple of ints + :param dtype: Data-type of the new array, e.g., :code:`tl.float16` + :type dtype: DType + """ + shape = _shape_check_impl(shape) + value = _constexpr_to_value(value) + dtype = _constexpr_to_value(dtype) + return semantic.full(shape, value, dtype, _builder) + + +# ----------------------- +# Shape Manipulation +# ----------------------- + + +@builtin +def broadcast(input, other, _builder=None): + """ + Tries to broadcast the two given blocks to a common compatible shape. + + :param input: The first input tensor. + :type input: Block + :param other: The second input tensor. + :type other: Block + """ + return semantic.broadcast_impl_value(input, other, _builder) + + +@builtin +def broadcast_to(input, shape, _builder=None): + """ + Tries to broadcast the given tensor to a new :code:`shape`. + + :param input: The input tensor. + :type input: Block + :param shape: The desired shape. + :type shape: Tuple[int] + """ + shape = _shape_check_impl(shape) + return semantic.broadcast_impl_shape(input, shape, _builder) + + +@builtin +def trans(input, _builder=None): + """ + Returns a transposed tensor. + + :param input: The input tensor. + :type input: + """ + return semantic.trans(input, _builder) + + +@builtin +def cat(input, other, can_reorder=False, _builder=None): + """ + Concatenate the given blocks + + :param input: The first input tensor. + :type input: + :param other: The second input tensor. + :type other: + :param reorder: Compiler hint. If true, the compiler is + allowed to reorder elements while concatenating inputs. Only use if the + order does not matter (e.g., result is only used in reduction ops) + """ + return semantic.cat(input, other, can_reorder, _builder) + + +@builtin +def view(input, shape, _builder=None): + """ + Returns a tensor with the same elements as `input` but a different shape. + The order of the elements may not be preserved. + + :param input: The input tensor. + :type input: + :param shape: The desired shape. + :type shape: Tuple[int] + + """ + shape = _shape_check_impl(shape) + return semantic.view(input, shape, _builder) + + +@builtin +def reshape(input, shape, _builder=None): + """ + Returns a tensor with the same number of elements as input but with the + provided shape. + + :param input: The input tensor. + :type input: + :param shape: The new shape. + :type shape: Tuple[int] + """ + shape = _shape_check_impl(shape) + return semantic.reshape(input, shape, _builder) + + +def _wrap_axis(axis, ndim): + if not (-ndim <= axis < ndim): + raise ValueError(f"invalid axis {axis}. Expected {-ndim} <= axis < {ndim}") + + return axis if axis >= 0 else axis + ndim + + +@builtin +def expand_dims(input, axis, _builder=None): + """ + Expand the shape of a tensor, by inserting new length-1 dimensions. + + Axis indices are with respect to the resulting tensor, so + ``result.shape[axis]`` will be 1 for each axis. + + :param input: The input tensor. + :type input: tl.tensor + :param axis: The indices to add new axes + :type axis: int | Sequence[int] + + """ + axis = _constexpr_to_value(axis) + axes = list(axis) if isinstance(axis, Sequence) else [axis] + new_ndim = len(input.shape) + len(axes) + axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes] + + if len(set(axes)) != len(axes): + raise ValueError(f"expand_dims recieved duplicate axes, normalized axes = {axes}") + + ret = input + for a in sorted(axes): + ret = semantic.expand_dims(ret, a, _builder) + return ret + + +# ----------------------- +# Linear Algebra +# ----------------------- + + +@builtin +def dot(input, other, acc=None, allow_tf32=True, max_num_imprecise_acc=None, out_dtype=float32, _builder=None): + """ + Returns the matrix product of two blocks. + + The two blocks must be two-dimensional and have compatible inner dimensions. + + :param input: The first tensor to be multiplied. + :type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} + :param other: The second tensor to be multiplied. + :type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} + """ + allow_tf32 = _constexpr_to_value(allow_tf32) + out_dtype = _constexpr_to_value(out_dtype) + max_num_imprecise_acc = _constexpr_to_value(max_num_imprecise_acc) + return semantic.dot(input, other, acc, allow_tf32, max_num_imprecise_acc, out_dtype, _builder) + + +# ----------------------- +# Non-Atomic Memory Operations +# ----------------------- + + +@builtin +def load(pointer, mask=None, other=None, boundary_check=tuple(), padding_option="", cache_modifier="", + eviction_policy="", volatile=False, _builder=None): + """ + Return a tensor of data whose values are loaded from memory at location defined by `pointer`: + (1) `pointer` could be a single element pointer, then a scalar will be loaded + + - `mask` and `other` must be scalar too + - `other` is implicitly typecast to `pointer.dtype.element_ty` + - `boundary_check` and `padding_option` must be empty + + (2) `pointer` could be element-wise tensor of pointers, in which case: + + - `mask` and `other` are implicitly broadcast to `pointer.shape` + - `other` is implicitly typecast to `pointer.dtype.element_ty` + - `boundary_check` and `padding_option` must be empty + + (3) `pointer` could be a block pointer defined by `make_block_ptr`, in which case: + + - `mask` and `other` must be None + - `boundary_check` and `padding_option` can be specified to control the behavior of out-of-bound access + + :param pointer: Pointer to the data to be loaded + :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` + :param mask: if `mask[idx]` is false, do not load the data at address `pointer[idx]` + (must be `None` with block pointers) + :type mask: Block of `triton.int1`, optional + :param other: if `mask[idx]` is false, return `other[idx]` + :type other: Block, optional + :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check + :type boundary_check: tuple of ints, optional + :param padding_option: should be one of {"", "zero", "nan"}, do padding while out of bound + :param cache_modifier: changes cache option in NVIDIA PTX + :type cache_modifier: str, optional + :param eviction_policy: changes eviction policy in NVIDIA PTX + :type eviction_policy: str, optional + :param volatile: changes volatile option in NVIDIA PTX + :type volatile: bool, optional + """ + # `mask` and `other` can be constexpr + if _constexpr_to_value(mask) is not None: + mask = _to_tensor(mask, _builder) + if _constexpr_to_value(other) is not None: + other = _to_tensor(other, _builder) + padding_option = _constexpr_to_value(padding_option) + cache_modifier = _constexpr_to_value(cache_modifier) + eviction_policy = _constexpr_to_value(eviction_policy) + volatile = _constexpr_to_value(volatile) + return semantic.load(pointer, mask, other, boundary_check, padding_option, cache_modifier, eviction_policy, + volatile, _builder) + + +@builtin +def store(pointer, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="", _builder=None): + """ + Store a tensor of data into memory locations defined by `pointer`: + (1) `pointer` could be a single element pointer, then a scalar will be stored + + - `mask` must be scalar too + - `boundary_check` and `padding_option` must be empty + + (2) `pointer` could be element-wise tensor of pointers, in which case: + + - `mask` is implicitly broadcast to `pointer.shape` + - `boundary_check` must be empty + + (3) or `pointer` could be a block pointer defined by `make_block_ptr`, in which case: + + - `mask` must be None + - `boundary_check` can be specified to control the behavior of out-of-bound access + + `value` is implicitly broadcast to `pointer.shape` and typecast to `pointer.dtype.element_ty`. + + :param pointer: The memory location where the elements of `value` are stored + :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` + :param value: The tensor of elements to be stored + :type value: Block + :param mask: If `mask[idx]` is false, do not store `value[idx]` at `pointer[idx]` + :type mask: Block of triton.int1, optional + :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check + :type boundary_check: tuple of ints, optional + :param cache_modifier: changes cache option in NVIDIA PTX + :type cache_modifier: str, optional + :param eviction_policy: changes eviction policy in NVIDIA PTX + :type eviction_policy: str, optional + """ + # `value` can be constexpr + value = _to_tensor(value, _builder) + if _constexpr_to_value(mask) is not None: + mask = _to_tensor(mask, _builder) + cache_modifier = _constexpr_to_value(cache_modifier) + eviction_policy = _constexpr_to_value(eviction_policy) + return semantic.store(pointer, value, mask, boundary_check, cache_modifier, eviction_policy, _builder) + + +@builtin +def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None): + """ + Returns a pointer to a block in a parent tensor + + :param base: The base pointer to the parent tensor + :param shape: The shape of the parent tensor + :param strides: The strides of the parent tensor + :param offsets: The offsets to the block + :param block_shape: The shape of the block + :param order: The order of the original data format + """ + return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder) + + +@builtin +def advance(base: tensor, offsets, _builder=None): + """ + Advance a block pointer + + :param base: the block pointer to advance + :param offsets: the offsets to advance, a tuple by dimension + """ + return semantic.advance(base, offsets, _builder) + + +# ----------------------- +# Atomic Memory Operations +# ----------------------- + + +def _add_atomic_docstr(name: str, has_cmp: bool = False) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = f""" + Performs an atomic {name} at the memory location specified by :code:`pointer`. + + Return the data stored at :code:`pointer` before the atomic operation. + + :param pointer: The memory locations to operate on + :type pointer: Block of dtype=triton.PointerDType""" + if has_cmp: + docstr += """ + :param cmp: The values expected to be found in the atomic object + :type cmp: Block of dtype=pointer.dtype.element_ty""" + docstr += """ + :param val: The values with which to perform the atomic operation + :type val: Block of dtype=pointer.dtype.element_ty + :param sem: Memory semantics to use ("ACQUIRE_RELEASE" (default), + "ACQUIRE", "RELEASE", or "RELAXED") + :type sem: str + :param scope: Scope of threads that observe synchronizing effect of the + atomic operation ("GPU" (default), "CTA", or "SYSTEM") + :type scope: str + """ + func.__doc__ = docstr + return func + + return _decorator + + +@builtin +@_add_atomic_docstr("compare-and-swap", has_cmp=True) +def atomic_cas(pointer, cmp, val, sem=None, scope=None, _builder=None): + cmp = _to_tensor(cmp, _builder) + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_cas(pointer, cmp, val, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("exchange") +def atomic_xchg(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_xchg(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("add") +def atomic_add(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_add(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("max") +def atomic_max(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_max(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("min") +def atomic_min(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_min(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("logical and") +def atomic_and(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_and(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("logical or") +def atomic_or(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_or(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("logical xor") +def atomic_xor(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_xor(pointer, val, mask, sem, scope, _builder) + + +# ----------------------- +# Conditioning +# ----------------------- + + +@builtin +def where(condition, x, y, _builder=None): + """ + Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`. + + Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`. + + If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead. + + The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`. + :code:`x` and :code:`y` must have the same data type. + + :param condition: When True (nonzero), yield x, otherwise yield y. + :type condition: Block of triton.bool + :param x: values selected at indices where condition is True. + :param y: values selected at indices where condition is False. + """ + condition = _to_tensor(condition, _builder) + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.where(condition, x, y, _builder) + + +# ----------------------- +# Math +# ----------------------- + + +@builtin +def umulhi(x, y, _builder=None): + """ + Returns the most significant 32 bits of the product of x and y. + + :param x: the input tensor + :type x: int32 + :param y: the input tensor + :type y: int32 + """ + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.umulhi(x, y, _builder) + + +@builtin +def fdiv(x, y, ieee_rounding=False, _builder=None): + """ + Returns a floating-point resultant tensor of dividing x by y. + + :param x: the input numerator value. + :param y: the input denominator value. + :param ieee_rounding: To follow IEEE-754 floating point number + rounding mechanism + :type ieee_rounding: bool + """ + ieee_rounding = _constexpr_to_value(ieee_rounding) + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.fdiv(x, y, ieee_rounding, _builder) + + +def _add_math_1arg_docstr(name: str) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Computes the element-wise {name} of :code:`x`. + + :param x: the input values + :type x: Block + """ + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@builtin +@_add_math_1arg_docstr("exponential") +def exp(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.exp(x, _builder) + + +@builtin +@_add_math_1arg_docstr("natural logarithm") +def log(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.log(x, _builder) + + +@builtin +@_add_math_1arg_docstr("cosine") +def cos(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.cos(x, _builder) + + +@builtin +@_add_math_1arg_docstr("sine") +def sin(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.sin(x, _builder) + + +@builtin +@_add_math_1arg_docstr("square root") +def sqrt(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.sqrt(x, _builder) + + +@builtin +@_add_math_1arg_docstr("absolute value") +def abs(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.abs(x, _builder) + + +# ----------------------- +# Reductions +# ----------------------- + + +def _add_reduction_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` + + :param input: the input values + :param axis: the dimension along which the reduction should be done""" + if return_indices_arg is not None: + docstr += f""" + :param {return_indices_arg}: if true, return index corresponding to the {name} value""" + if tie_break_arg is not None: + docstr += f""" + :param {tie_break_arg}: if true, return the left-most indices in case of ties for values that aren't NaN""" + + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@contextmanager +def _insertion_guard(builder): + ip = builder.get_insertion_point() + yield + builder.restore_insertion_point(ip) + + +@builtin +def reduce(input, axis, combine_fn, _builder=None, _generator=None): + """Applies the combine_fn to all elements in :code:`input` tensors along the provided :code:`axis` + + :param input: the input tensor, or tuple of tensors + :param axis: the dimension along which the reduction should be done + :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) + + """ + if isinstance(input, tensor): + return reduce((input, ), axis, combine_fn, _builder=_builder, _generator=_generator)[0] + + def make_combine_region(reduce_op): + in_scalar_tys = [t.type.scalar for t in input] + prototype = function_type(in_scalar_tys, in_scalar_tys * 2) + + region = reduce_op.get_region(0) + with _insertion_guard(_builder): + param_types = [ty.to_ir(_builder) for ty in prototype.param_types] + block = _builder.create_block_with_parent(region, param_types) + args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] + results = _generator.call_JitFunction(combine_fn, args, kwargs={}) + if isinstance(results, tensor): + handles = [results.handle] + else: + handles = [r.handle for r in results] + _builder.create_reduce_ret(*handles) + + if axis is not None: + axis = _constexpr_to_value(axis) + return semantic.reduction(input, axis, make_combine_region, _builder) + + +@builtin +def _promote_reduction_input(t, _builder=None): + scalar_ty = t.type.scalar + + # hardware doesn't support FMAX, FMIN, CMP for bfloat16 + if scalar_ty is bfloat16: + return t.to(float32, _builder=_builder) + return t + + +@builtin +def _reduce_with_indices(input, axis, combine_fn, _builder=None, _generator=None): + axis = _constexpr_to_value(axis) + n = input.shape[axis] + index = arange(0, n, _builder=_builder) + + if len(input.shape) > 1: + # Broadcast index across the non-reduced axes + axes_to_expand = [constexpr(d) for d in range(len(input.shape))] + del axes_to_expand[axis] + index = expand_dims(index, axes_to_expand, _builder=_builder) + index = broadcast_to(index, input.shape, _builder=_builder) + + rvalue, rindices = reduce((input, index), axis, combine_fn, _builder=_builder, _generator=_generator) + return rvalue, rindices + + +# ----------------------- +# Scans +# ----------------------- + + +def _add_scan_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` + + :param input: the input values + :param axis: the dimension along which the scan should be done""" + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@builtin +def associative_scan(input, axis, combine_fn, _builder=None, _generator=None): + """Applies the combine_fn to each elements with a carry in :code:`input` tensors along the provided :code:`axis` and update the carry + + :param input: the input tensor, or tuple of tensors + :param axis: the dimension along which the reduction should be done + :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) + + """ + if isinstance(input, tensor): + return associative_scan((input, ), axis, combine_fn, _builder=_builder, _generator=_generator)[0] + + def make_combine_region(scan_op): + in_scalar_tys = [t.type.scalar for t in input] + prototype = function_type(in_scalar_tys, in_scalar_tys * 2) + + region = scan_op.get_region(0) + with _insertion_guard(_builder): + param_types = [ty.to_ir(_builder) for ty in prototype.param_types] + block = _builder.create_block_with_parent(region, param_types) + args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] + results = _generator.call_JitFunction(combine_fn, args, kwargs={}) + if isinstance(results, tensor): + handles = [results.handle] + else: + handles = [r.handle for r in results] + _builder.create_scan_ret(*handles) + + axis = _constexpr_to_value(axis) + return semantic.associative_scan(input, axis, make_combine_region, _builder) + + +# ----------------------- +# Compiler Hint Ops +# ----------------------- + + +@builtin +def debug_barrier(_builder=None): + ''' + Insert a barrier to synchronize all threads in a block. + ''' + return semantic.debug_barrier(_builder) + + +@builtin +def multiple_of(input, values, _builder=None): + """ + Let the compiler know that the values in :code:`input` are all multiples of :code:`value`. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.multiple_of(input, values) + + +@builtin +def max_contiguous(input, values, _builder=None): + """ + Let the compiler know that the `value` first values in :code:`input` are contiguous. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.max_contiguous(input, values) + + +@builtin +def max_constancy(input, values, _builder=None): + """ + Let the compiler know that the `value` first values in :code:`input` are constant. + + e.g. if :code:`values` is [4], then each group of 4 values in :code:`input` should all be equal, + for example [0, 0, 0, 0, 1, 1, 1, 1]. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.max_constancy(input, values) + + +# ----------------------- +# Debugging functions +# ----------------------- + + +@builtin +def static_print(*values, sep: str = " ", end: str = "\n", file=None, flush=False, _builder=None): + ''' + Print the values at compile time. The parameters are the same as the builtin :code:`print`. + + NOTE: Calling the Python builtin :code:`print` is not the same as calling this, it instead maps to :code:`device_print`, + which has special requirements for the arguments. + + .. highlight:: python + .. code-block:: python + + tl.static_print(f"{BLOCK_SIZE=}") + ''' + pass + + +@builtin +def static_assert(cond, msg="", _builder=None): + ''' + Assert the condition at compile time. Does not require that the :code:`TRITON_DEBUG` environment variable + is set. + + .. highlight:: python + .. code-block:: python + + tl.static_assert(BLOCK_SIZE == 1024) + ''' + pass + + +@builtin +def device_print(prefix, *args, _builder=None): + ''' + Print the values at runtime from the device. String formatting does not work for runtime values, so you should + provide the values you want to print as arguments. The first value must be a string, all following values must + be scalars or tensors. + + Calling the Python builtin :code:`print` is the same as calling this function, and the requirements for the arguments will match + this function (not the normal requirements for :code:`print`). + + .. highlight:: python + .. code-block:: python + + tl.device_print("pid", pid) + print("pid", pid) + + :param prefix: a prefix to print before the values. This is required to be a string literal. + :param args: the values to print. They can be any tensor or scalar. + ''' + import string + prefix = _constexpr_to_value(prefix) + assert isinstance(prefix, str), f"{prefix} is not string" + b_ascii = True + for ch in prefix: + if ch not in string.printable: + b_ascii = False + break + assert b_ascii, f"{prefix} is not an ascii string" + new_args = [] + for arg in args: + new_args.append(_to_tensor(arg, _builder)) + return semantic.device_print(prefix, new_args, _builder) + + +@builtin +def device_assert(cond, msg="", _builder=None): + ''' + Assert the condition at runtime from the device. Requires that the environment variable :code:`TRITON_DEBUG` + is set to a value besides :code:`0` in order for this to have any effect. + + Using the Python :code:`assert` statement is the same as calling this function, except that the second argument + must be provided and must be a string, e.g. :code:`assert pid == 0, "pid != 0"`. The environment variable must + be set for this :code:`assert` statement to have any effect. + + .. highlight:: python + .. code-block:: python + + tl.device_assert(pid == 0) + assert pid == 0, f"pid != 0" + + :param cond: the condition to assert. This is required to be a boolean tensor. + :param msg: the message to print if the assertion fails. This is required to be a string literal. + ''' + msg = _constexpr_to_value(msg) + import inspect + frame = inspect.currentframe() + module = inspect.getmodule(frame) + # The triton function module doesn't have the name attribute. + # We use this trick to find the caller. + while hasattr(module, "__name__"): + frame = frame.f_back + module = inspect.getmodule(frame) + lineno = 0 + func_name = 'unknown' + file_name = 'unknown' + if frame is not None and frame.f_back is not None: + func_name = frame.f_code.co_name + file_name = frame.f_back.f_code.co_filename + # TODO: The line number currently indicates the line + # where the triton function is called but not where the + # device_assert is called. Need to enhance this. + lineno = frame.f_back.f_lineno + return semantic.device_assert(_to_tensor(cond, _builder), msg, file_name, func_name, lineno, _builder) + + +@builtin +def inline_asm_elementwise(asm: str, constraints: str, args: list, dtype, is_pure: bool, pack: int, _builder=None): + ''' + Execute the inline assembly to a packed of elements of the tensor + :param asm: assembly to be inlined, it has to match the target assembly format + :param constraints: string representing the mapping of operands to register + :param args: the arguments of the operation + :param dtype: the element type of the returned variable + :param is_pure: whether the operation is pure + :param pack: the number of elements to be processed by one instance of inline assembly + :param _builder: the builder + :return: the return value of the function + ''' + asm = _constexpr_to_value(asm) + constraints = _constexpr_to_value(constraints) + pack = _constexpr_to_value(pack) + is_pure = _constexpr_to_value(is_pure) + res_ty = dtype + dispatch_args = [_to_tensor(arg, _builder) for arg in args] + if dispatch_args: + bin_op_type_checking = partial( + semantic.binary_op_type_checking_impl, + builder=_builder, + arithmetic_check=False, + allow_lhs_ptr=True, + allow_rhs_ptr=True, + ) + broadcast_arg = dispatch_args[0] + # Get the broadcast shape over all the arguments + for item in dispatch_args: + _, broadcast_arg = bin_op_type_checking(item, broadcast_arg) + if broadcast_arg.shape: + # Change the shape of each argument based on the broadcast shape + for i, item in enumerate(dispatch_args): + dispatch_args[i], _ = bin_op_type_checking(item, broadcast_arg) + res_ty = block_type(dtype, broadcast_arg.shape) + handles = [t.handle for t in dispatch_args] + call = _builder.create_inline_asm(asm, constraints, handles, res_ty.to_ir(_builder), is_pure, pack) + return tensor(call, res_ty) + + +# ----------------------- +# Iterators +# ----------------------- + + +class static_range: + """ + Iterator that counts upward forever. + + .. highlight:: python + .. code-block:: python + + @triton.jit + def kernel(...): + for i in tl.static_range(10): + ... + :note: This is a special iterator used to implement similar semantics to Python's :code:`range` in the context of + :code:`triton.jit` functions. In addition, it also guides the compiler to unroll the loop aggressively. + :param arg1: the start value. + :param arg2: the end value. + :param step: the step value. + """ + + def __init__(self, arg1, arg2=None, step=None): + assert isinstance(arg1, constexpr) + if step is None: + self.step = constexpr(1) + else: + assert isinstance(step, constexpr) + self.step = step + if arg2 is None: + self.start = constexpr(0) + self.end = arg1 + else: + assert isinstance(arg2, constexpr) + self.start = arg1 + self.end = arg2 + + def __iter__(self): + raise RuntimeError("static_range can only be used in @triton.jit'd functions") + + def __next__(self): + raise RuntimeError("static_range can only be used in @triton.jit'd functions") + + +# ----------------------- +# Extern functions +# ----------------------- + + +def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, + is_pure: bool, _builder=None): + ''' + Dispatch a function to a library + :param func: the function to dispatch + :param lib_name: the name of the library + :param lib_path: the path of the library + :param args: the arguments of the function + :param arg_type_symbol_dict: the type of the arguments + :param ret_shape: the shape of the return value + :param _builder: the builder + :return: the return value of the function + ''' + if len(arg_type_symbol_dict) == 0: + raise ValueError("arg_type_symbol_dict is empty") + + num_args = len(list(arg_type_symbol_dict.keys())[0]) + if len(args) != num_args: + raise ValueError(f"length of input args does not match." + f"Expect {len(args)}, got {num_args}") + + arg_types = [] + arg_list = [] + for arg in args: + if isinstance(arg, tensor): + arg_types.append(arg.dtype) + arg_list.append(arg.handle) + else: + arg_types.append(type(arg)) + arg_list.append(arg) + arg_types = tuple(arg_types) + + if arg_types not in arg_type_symbol_dict: + raise ValueError(f"input arg type does not match." + f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}") + else: + symbol = arg_type_symbol_dict[arg_types][0] + ret_type = arg_type_symbol_dict[arg_types][1] + if ret_shape: + ret_type = block_type(ret_type, ret_shape) + return tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder), is_pure), ret_type) + + +def extern_elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, is_pure: bool, + _builder=None): + ''' + Dispatch an elementwise function to a library + :param lib_name: the name of the library + :param lib_path: the path of the library + :param args: the arguments of the function + :param arg_type_symbol_dict: the type of the arguments + :param is_pure: whether the function is pure + :param _builder: the builder + :return: the return value of the function + ''' + dispatch_args = args.copy() + all_scalar = True + ret_shape = None + arg_types = [] + for i in range(len(dispatch_args)): + dispatch_args[i] = _to_tensor(dispatch_args[i], _builder) + arg_types.append(dispatch_args[i].dtype) + if dispatch_args[i].type.is_block(): + all_scalar = False + if len(arg_types) > 0: + arg_types = tuple(arg_types) + arithmetic_check = True + # If there's a type tuple that is not supported by the library, we will do arithmetic check + if arg_types in arg_type_symbol_dict: + arithmetic_check = False + broadcast_arg = dispatch_args[0] + # Get the broadcast shape over all the arguments + for i, item in enumerate(dispatch_args): + _, broadcast_arg = semantic.binary_op_type_checking_impl(item, broadcast_arg, _builder, + arithmetic_check=arithmetic_check) + # Change the shape of each argument based on the broadcast shape + for i in range(len(dispatch_args)): + dispatch_args[i], _ = semantic.binary_op_type_checking_impl(dispatch_args[i], broadcast_arg, _builder, + arithmetic_check=arithmetic_check) + if not all_scalar: + ret_shape = broadcast_arg.shape + func = getattr(_builder, "create_extern_elementwise") + return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, is_pure, _builder) + + +def binary_op_type_legalization(lhs, rhs, builder): + ''' + Convert both operands to a single common type + :param lhs: the left operand + :param rhs: the right operand + :param builder: the builder + ''' + return semantic.binary_op_type_checking_impl(lhs, rhs, builder) + + +def extern(fn): + """A decorator for external functions.""" + return builtin(fn) diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/math.py b/llmeval-env/lib/python3.10/site-packages/triton/language/math.py new file mode 100644 index 0000000000000000000000000000000000000000..1cbad660d780bc10a541d7274890c1c2500e966c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/language/math.py @@ -0,0 +1,1676 @@ +import functools +import os + +from ..common.build import is_hip +from . import core + + +@functools.lru_cache() +def libdevice_path(): + third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "third_party") + if is_hip(): + default = os.path.join(third_party_dir, "hip", "lib", "bitcode", "cuda2gcn.bc") + else: + default = os.path.join(third_party_dir, "cuda", "lib", "libdevice.10.bc") + + return os.getenv("TRITON_LIBDEVICE_PATH", default) + + +@core.extern +def clz(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_clz", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_clzll", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def popc(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_popc", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_popcll", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def byte_perm(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("int32"), core.dtype("int32"), core.dtype("int32")): ("__nv_byte_perm", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def min(arg0, arg1, _builder=None): + arg0 = core._to_tensor(arg0, _builder) + arg1 = core._to_tensor(arg1, _builder) + arg0, arg1 = core.binary_op_type_legalization(arg0, arg1, _builder) + dtype = arg0.dtype + if dtype.is_floating(): + return core.tensor(_builder.create_minf(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_signed(): + return core.tensor(_builder.create_minsi(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_unsigned(): + return core.tensor(_builder.create_minui(arg0.handle, arg1.handle), arg0.dtype) + else: + assert False, f"Unexpected dtype {dtype}" + + +@core.extern +def max(arg0, arg1, _builder=None): + arg0 = core._to_tensor(arg0, _builder) + arg1 = core._to_tensor(arg1, _builder) + arg0, arg1 = core.binary_op_type_legalization(arg0, arg1, _builder) + dtype = arg0.dtype + if dtype.is_floating(): + return core.tensor(_builder.create_maxf(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_signed(): + return core.tensor(_builder.create_maxsi(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_unsigned(): + return core.tensor(_builder.create_maxui(arg0.handle, arg1.handle), arg0.dtype) + else: + assert False, f"Unexpected dtype {dtype}" + + +@core.extern +def mulhi(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_mulhi", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_umulhi", core.dtype("uint32")), + (core.dtype("int64"), core.dtype("int64")): ("__nv_mul64hi", core.dtype("int64")), + (core.dtype("uint64"), core.dtype("uint64")): ("__nv_umul64hi", core.dtype("uint64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul24(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_mul24", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_umul24", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def brev(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_brev", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_brevll", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sad(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("int32"), core.dtype("int32"), core.dtype("uint32")): ("__nv_sad", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32")): ("__nv_usad", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def abs(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_abs", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_llabs", core.dtype("int64")), + (core.dtype("fp32"), ): ("__nv_fabsf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_fabs", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def floor(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_floorf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_floor", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp64h(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_rcp64h", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rsqrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_rsqrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_rsqrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ceil(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_ceil", core.dtype("fp64")), + (core.dtype("fp32"), ): ("__nv_ceilf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def trunc(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_trunc", core.dtype("fp64")), + (core.dtype("fp32"), ): ("__nv_truncf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp2(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_exp2f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_exp2", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def saturatef(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_saturatef", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_rn(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_rz(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_rd(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_ru(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_dividef(arg0, arg1, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fast_fdividef", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_rn(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_rn", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_rz(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_rz", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_rd(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_rd", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_ru(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_ru", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_rn(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_rn", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_rz(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_rz", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_rd(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_rd", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_ru(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_ru", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sqrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sqrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rn", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rz", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rd", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_ru", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rn", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rz", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rd", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + arg1, + ], { + ( + core.dtype("fp64"), + core.dtype("fp64"), + ): ("__nv_dmul_ru", core.dtype("fp64")), + ( + core.dtype("fp32"), + core.dtype("fp32"), + ): ("__nv_fmul_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hiloint2double(arg0, arg1, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_hiloint2double", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2loint(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2loint", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2hiint(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2hiint", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int_as_float(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int_as_float", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float_as_int(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float_as_int", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint_as_float(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint_as_float", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float_as_uint(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float_as_uint", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def longlong_as_double(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_longlong_as_double", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double_as_longlong(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double_as_longlong", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_sinf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_sinf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_cosf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_cosf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_log2f(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_log2f", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_logf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_logf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_expf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_expf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_tanf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_tanf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_exp10f(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_exp10f", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_log10f(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_log10f", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_powf(arg0, arg1, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fast_powf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hadd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_hadd", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_uhadd", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rhadd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_rhadd", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_urhadd", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rsqrt_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_frsqrt_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ffs(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("int32"), ): ("__nv_ffs", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_ffsll", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rint(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_rintf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_rint", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def llrint(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_llrintf", core.dtype("int64")), + (core.dtype("fp64"), ): ("__nv_llrint", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def nearbyint(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_nearbyintf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_nearbyint", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isnan(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_isnanf", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_isnand", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def signbit(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_signbitf", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_signbitd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def copysign(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_copysignf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_copysign", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def finitef(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_finitef", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isinf(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_isinff", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_isinfd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def nextafter(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_nextafterf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_nextafter", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sin(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sinf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sin", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cos(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cosf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cos", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sinpi(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sinpif", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sinpi", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cospi(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cospif", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cospi", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tan(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_tanf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_tan", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log2(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_log2f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log2", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_expf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_exp", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp10(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_exp10f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_exp10", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cosh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_coshf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cosh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sinh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sinhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sinh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tanh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_tanhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_tanh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atan2(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_atan2f", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_atan2", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atan(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_atanf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_atan", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def asin(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_asinf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_asin", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def acos(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_acosf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_acos", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_logf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log10(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_log10f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log10", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log1p(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_log1pf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log1p", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def acosh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_acoshf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_acosh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def asinh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_asinhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_asinh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atanh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_atanhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_atanh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def expm1(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_expm1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_expm1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hypot(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_hypotf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_hypot", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rhypot(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_rhypotf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_rhypot", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def norm3d(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_norm3df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_norm3d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rnorm3d(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_rnorm3df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_rnorm3d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def norm4d(arg0, arg1, arg2, arg3, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2, arg3], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): + ("__nv_norm4df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): + ("__nv_norm4d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rnorm4d(arg0, arg1, arg2, arg3, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2, arg3], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): + ("__nv_rnorm4df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): + ("__nv_rnorm4d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cbrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cbrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cbrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcbrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_rcbrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_rcbrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def j0(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_j0f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_j0", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def j1(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_j1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_j1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def y0(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_y0f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_y0", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def y1(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_y1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_y1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def yn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("fp32")): ("__nv_ynf", core.dtype("fp32")), + (core.dtype("int32"), core.dtype("fp64")): ("__nv_yn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def jn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("fp32")): ("__nv_jnf", core.dtype("fp32")), + (core.dtype("int32"), core.dtype("fp64")): ("__nv_jn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cyl_bessel_i0(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cyl_bessel_i0f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cyl_bessel_i0", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cyl_bessel_i1(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cyl_bessel_i1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cyl_bessel_i1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erf(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erff", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erf", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfinv(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfinvf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfinv", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfc(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfcf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfc", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfcx(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfcxf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfcx", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfcinv(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfcinvf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfcinv", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def normcdfinv(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_normcdfinvf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_normcdfinv", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def normcdf(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_normcdff", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_normcdf", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def lgamma(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_lgammaf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_lgamma", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ldexp(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__nv_ldexpf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__nv_ldexp", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def scalbn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__nv_scalbnf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__nv_scalbn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fmod(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmodf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_fmod", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def remainder(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_remainderf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_remainder", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def pow(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__nv_powif", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__nv_powi", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_powf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_pow", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tgamma(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_tgammaf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_tgamma", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def round(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_roundf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_round", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def llround(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_llroundf", core.dtype("int64")), + (core.dtype("fp64"), ): ("__nv_llround", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fdim(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdimf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_fdim", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ilogb(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_ilogbf", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_ilogb", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def logb(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_logbf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_logb", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isfinited(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_isfinited", core.dtype("int32")), + }, is_pure=True, _builder=_builder) diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/random.py b/llmeval-env/lib/python3.10/site-packages/triton/language/random.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9d53bfb3e74fb74ebfb7fa5ad29d035471897b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/language/random.py @@ -0,0 +1,202 @@ +from ..runtime.jit import jit +from . import core as tl +from . import standard + +N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox + +# ------------------- +# randint +# ------------------- + + +@jit +def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1). + """ + if c0.dtype == tl.uint32: + PHILOX_KEY_A: tl.constexpr = 0x9E3779B9 + PHILOX_KEY_B: tl.constexpr = 0xBB67AE85 + PHILOX_ROUND_A: tl.constexpr = 0xD2511F53 + PHILOX_ROUND_B: tl.constexpr = 0xCD9E8D57 + else: + tl.static_assert(c0.dtype == tl.uint64, "dtype not supported in philox_impl") + PHILOX_KEY_A: tl.constexpr = 0x9E3779B97F4A7C15 + PHILOX_KEY_B: tl.constexpr = 0xBB67AE8584CAA73B + PHILOX_ROUND_A: tl.constexpr = 0xD2E7470EE14C6C93 + PHILOX_ROUND_B: tl.constexpr = 0xCA5A826395121157 + + for _ in tl.static_range(n_rounds): + # for _ in range(n_rounds): + # update random state + A = PHILOX_ROUND_A + B = PHILOX_ROUND_B + _c0, _c2 = c0, c2 + c0 = tl.umulhi(B, _c2) ^ c1 ^ k0 + c2 = tl.umulhi(A, _c0) ^ c3 ^ k1 + c1 = B * _c2 + c3 = A * _c0 + # raise key + k0 = k0 + PHILOX_KEY_A + k1 = k1 + PHILOX_KEY_B + return c0, c1, c2, c3 + + +@jit +def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + seed = seed.to(tl.uint64) + if tl.constexpr(c0.dtype.primitive_bitwidth) == 32: + int_dtype = tl.uint32 + seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32) + seed_lo = (seed & 0xffffffff).to(tl.uint32) + else: + tl.static_assert(tl.constexpr(c0.dtype.primitive_bitwidth) == 64, "bitwidth not supported in philox") + int_dtype = tl.uint64 + seed_hi = 0 + seed_lo = seed + c0 = c0.to(int_dtype, bitcast=True) + c1 = c1.to(int_dtype, bitcast=True) + c2 = c2.to(int_dtype, bitcast=True) + c3 = c3.to(int_dtype, bitcast=True) + return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds) + + +@jit +def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, returns a single + block of random :code:`int32`. + + If you need multiple streams of random numbers, + using `randint4x` is likely to be faster than calling `randint` 4 times. + + :param seed: The seed for generating random numbers. + :param offset: The offsets to generate random numbers for. + """ + ret, _, _, _ = randint4x(seed, offset, n_rounds) + return ret + + +@jit +def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, returns four + blocks of random :code:`int32`. + + This is the maximally efficient entry point + to Triton's Philox pseudo-random number generator. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + # _0 = tl.zeros(offset.shape, offset.dtype) + _0 = offset * 0 + return philox(seed, offset, _0, _0, _0, n_rounds) + + +# ------------------- +# rand +# ------------------- + +# @jit +# def uint32_to_uniform_float(x): +# """ +# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1). +# """ +# two_to_the_minus_32: tl.constexpr = 2.328306e-10 +# return x * two_to_the_minus_32 + + +@jit +def uint_to_uniform_float(x): + """ + Numerically stable function to convert a random uint into a random float uniformly sampled in [0, 1). + """ + # TODO: fix frontend issues and cleanup + # conditions can be simplified + # scale is ((2**23 - 1) / 2**23) * 2**(N_BITS - 1) + if tl.constexpr(x.dtype == tl.uint32) or tl.constexpr(x.dtype == tl.int32): + # maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + x = x.to(tl.int32, bitcast=True) + scale = 4.6566127342e-10 + else: + tl.static_assert(tl.constexpr(x.dtype == tl.uint64) or tl.constexpr(x.dtype == tl.int64)) + x = x.to(tl.int64, bitcast=True) + scale = 1.0842020432385337e-19 + x = tl.where(x < 0, -x - 1, x) + return x * scale + + +@jit +def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, + returns a block of random :code:`float32` in :math:`U(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + source = randint(seed, offset, n_rounds) + return uint_to_uniform_float(source) + + +@jit +def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offsets` block, + returns 4 blocks of random :code:`float32` in :math:`U(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds) + u1 = uint_to_uniform_float(i1) + u2 = uint_to_uniform_float(i2) + u3 = uint_to_uniform_float(i3) + u4 = uint_to_uniform_float(i4) + return u1, u2, u3, u4 + + +# ------------------- +# randn +# ------------------- + + +@jit +def pair_uniform_to_normal(u1, u2): + """Box-Muller transform""" + u1 = standard.maximum(1.0e-7, u1) + th = 6.283185307179586 * u2 + r = tl.sqrt(-2.0 * tl.log(u1)) + return r * tl.cos(th), r * tl.sin(th) + + +@jit +def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, + returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + i1, i2, _, _ = randint4x(seed, offset, n_rounds) + u1 = uint_to_uniform_float(i1) + u2 = uint_to_uniform_float(i2) + n1, _ = pair_uniform_to_normal(u1, u2) + return n1 + + +@jit +def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, + returns 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + u1, u2, u3, u4 = rand4x(seed, offset, n_rounds) + n1, n2 = pair_uniform_to_normal(u1, u2) + n3, n4 = pair_uniform_to_normal(u3, u4) + return n1, n2, n3, n4 diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/semantic.py b/llmeval-env/lib/python3.10/site-packages/triton/language/semantic.py new file mode 100644 index 0000000000000000000000000000000000000000..c1ee1036ba6f8097546cfbb9c5076b59183981d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/language/semantic.py @@ -0,0 +1,1549 @@ +from __future__ import annotations # remove after python 3.11 + +from functools import wraps +from typing import List, Optional, Sequence, Tuple, TypeVar + +from .._C.libtriton.triton import ir +from ..common.build import is_hip +from . import core as tl + +T = TypeVar('T') + + +class IncompatibleTypeErrorImpl(Exception): + + def __init__(self, type_a, type_b): + self.type_a = type_a + self.type_b = type_b + self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__() + super(IncompatibleTypeErrorImpl, self).__init__(self.message) + + +# ===----------------------------------------------------------------------===## +# Programming Model +# ===----------------------------------------------------------------------===## + + +def program_id(axis: int, builder: ir.builder) -> tl.tensor: + if axis not in (0, 1, 2): + raise ValueError(f"program_id axis must be 0, 1, or 2 but got {axis}") + return tl.tensor(builder.create_get_program_id(axis), tl.int32) + + +def num_programs(axis: int, builder: ir.builder) -> tl.tensor: + if axis not in (0, 1, 2): + raise ValueError(f"num_programs axis must be 0, 1, or 2 but got {axis}") + return tl.tensor(builder.create_get_num_programs(axis), tl.int32) + + +# ===----------------------------------------------------------------------===// +# Implicit Casting Utilities +# ===----------------------------------------------------------------------===// + + +def integer_promote_impl(a_ty: tl.dtype, b_ty: tl.dtype) -> tl.dtype: + a_rank = a_ty.int_bitwidth + b_rank = b_ty.int_bitwidth + a_sn = a_ty.int_signedness + b_sn = b_ty.int_signedness + # Rules for signedness taken from "Usual arithmetic conversions" on + # https://en.cppreference.com/w/c/language/conversion. + if a_sn == b_sn: + return a_ty if a_rank > b_rank else b_ty + elif a_sn == tl.dtype.SIGNEDNESS.UNSIGNED: + return a_ty if a_rank >= b_rank else b_ty + elif b_sn == tl.dtype.SIGNEDNESS.UNSIGNED: + return b_ty if b_rank >= a_rank else a_ty + assert False + + +def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> tl.dtype: + # 1) if one operand is double, the other is implicitly + # converted to double + if a_ty.is_fp64() or b_ty.is_fp64(): + return tl.float64 + # 2) if one operand is float, the other is implicitly + # converted to float + if a_ty.is_fp32() or b_ty.is_fp32(): + return tl.float32 + # 3 ) if one operand is half, the other is implicitly converted to half + # unless we're doing / or %, which do not exist natively in PTX for fp16. + # Supported PTX op: add, sub, mul, fma, neg, abs, min, max, tanh, ex2, setp + if a_ty.is_fp16() or b_ty.is_fp16(): + if div_or_mod: + return tl.float32 + else: + return tl.float16 + # 4) return bf16 only if both operands are of bf16 + if a_ty.is_bf16() or b_ty.is_bf16(): + if div_or_mod: + return tl.float32 + if a_ty.is_bf16() and b_ty.is_bf16(): + return tl.bfloat16 + return tl.float32 + if not a_ty.is_int() or not b_ty.is_int(): + assert False + # 5 ) both operands are integer and undergo + # integer promotion + if div_or_mod and a_ty.int_signedness != b_ty.int_signedness: + raise ValueError("Cannot use /, #, or % with " + a_ty.__repr__() + " and " + b_ty.__repr__() + + " because they have different signedness;" + "this is unlikely to result in a useful answer. Cast them to the same signedness.") + return integer_promote_impl(a_ty, b_ty) + + +# ===----------------------------------------------------------------------===// +# Binary Operators +# ===----------------------------------------------------------------------===// + + +def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None: + if type_a.is_ptr(): + if not allow_ptr_a: + raise IncompatibleTypeErrorImpl(type_a, type_b) + # T* + U* with T != U + if type_b.is_ptr() and (type_a != type_b): + raise IncompatibleTypeErrorImpl(type_a, type_b) + # T* + float + if type_b.is_floating(): + raise IncompatibleTypeErrorImpl(type_a, type_b) + + +def binary_op_type_checking_impl(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder, allow_lhs_ptr=False, + allow_rhs_ptr=False, arithmetic_check=True, + div_or_mod=False) -> Tuple[tl.tensor, tl.tensor]: + # implicit broadcasting + lhs, rhs = broadcast_impl_value(lhs, rhs, builder) + # implicit typecasting + lhs_sca_ty = lhs.type.scalar + rhs_sca_ty = rhs.type.scalar + check_ptr_type_impl(lhs_sca_ty, rhs_sca_ty, allow_lhs_ptr) + check_ptr_type_impl(rhs_sca_ty, lhs_sca_ty, allow_rhs_ptr) + if arithmetic_check and not lhs_sca_ty.is_ptr() and not rhs_sca_ty.is_ptr(): + ret_sca_ty = computation_type_impl(lhs_sca_ty, rhs_sca_ty, div_or_mod) + lhs = cast(lhs, ret_sca_ty, builder) + rhs = cast(rhs, ret_sca_ty, builder) + return lhs, rhs + + +def add(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, True, True) + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if input_scalar_ty.is_ptr() and other_scalar_ty.is_ptr(): + raise ValueError("cannot add pointers together") + + # offset + ptr + # ptr + offset + if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr(): + input, other = other, input + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if input_scalar_ty.is_ptr(): + return tl.tensor(builder.create_addptr(input.handle, other.handle), input.type) + # float + float + elif input_scalar_ty.is_floating(): + return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type) + # int + int + elif input_scalar_ty.is_int(): + return tl.tensor(builder.create_add(input.handle, other.handle), input.type) + assert False + + +def sub(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, True, False) + scalar_ty = input.type.scalar + # ptr - offset + if scalar_ty.is_ptr(): + return tl.tensor(builder.create_addptr(input.handle, minus(other, builder).handle), input.type) + # float - float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fsub(input.handle, other.handle), input.type) + # int - int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_sub(input.handle, other.handle), input.type) + assert False + + +def mul(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float * float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fmul(input.handle, other.handle), input.type) + # * int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_mul(input.handle, other.handle), input.type) + assert False + + +def truediv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + # float / int + if input_scalar_ty.is_floating() and other_scalar_ty.is_int(): + other = cast(other, input_scalar_ty, builder) + # int / float + elif input_scalar_ty.is_int() and other_scalar_ty.is_floating(): + input = cast(input, other_scalar_ty, builder) + # int / int (cast to tl.float32) + elif input_scalar_ty.is_int() and other_scalar_ty.is_int(): + input = cast(input, tl.float32, builder) + other = cast(other, tl.float32, builder) + # float / float (cast to the highest exponent type) + elif input_scalar_ty.is_floating() and other_scalar_ty.is_floating(): + if input_scalar_ty.fp_mantissa_width > other_scalar_ty.fp_mantissa_width: + other = cast(other, input_scalar_ty, builder) + else: + input = cast(input, other_scalar_ty, builder) + # unreachable + else: + assert False + return tl.tensor(builder.create_fdiv(input.handle, other.handle), input.type) + + +def floordiv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if input_scalar_ty.is_int() and other_scalar_ty.is_int(): + ret_ty = integer_promote_impl(input_scalar_ty, other_scalar_ty) + input = cast(input, ret_ty, builder) + other = cast(other, ret_ty, builder) + if ret_ty.is_int_signed(): + return tl.tensor(builder.create_sdiv(input.handle, other.handle), input.type) + else: + return tl.tensor(builder.create_udiv(input.handle, other.handle), input.type) + assert False + + +def fdiv(input: tl.tensor, other: tl.tensor, ieee_rounding: bool, builder: ir.builder) -> tl.tensor: + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating(): + raise ValueError("both operands of fdiv must have floating scalar type") + input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True) + ret = builder.create_fdiv(input.handle, other.handle) + return tl.tensor(ret, input.type) + + +def mod(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) + scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + # float % float + if scalar_ty.is_floating(): + # input - input.div(other, rounding_mode="floor") * other + ret = sub(input, mul(floor(fdiv(input, other, False, builder), builder), other, builder), builder) + return ret + # % int + elif scalar_ty.is_int(): + if scalar_ty.int_signedness != other_scalar_ty.int_signedness: + raise ValueError("Cannot mod " + scalar_ty.__repr__() + " by " + other_scalar_ty.__repr__() + " " + "because they have different signedness;" + "this is unlikely to result in a useful answer. Cast them to the same signedness.") + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_srem(input.handle, other.handle), input.type) + else: + return tl.tensor(builder.create_urem(input.handle, other.handle), input.type) + assert False + + +############## +# bitwise ops +############## + + +def bitwise_op_type_checking_impl(input: tl.tensor, other: tl.tensor, + builder: ir.builder) -> Tuple[tl.tensor, tl.tensor]: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, False) + input_sca_ty = input.type.scalar + other_sca_ty = other.type.scalar + if not input_sca_ty.is_int() or not other_sca_ty.is_int(): + raise IncompatibleTypeErrorImpl(input_sca_ty, other_sca_ty) + ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty) + if ret_sca_ty != input_sca_ty: + input = cast(input, ret_sca_ty, builder) + if ret_sca_ty != other_sca_ty: + other = cast(other, ret_sca_ty, builder) + return input, other + + +def and_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_and(input.handle, other.handle), input.type) + + +def or_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_or(input.handle, other.handle), input.type) + + +def xor_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_xor(input.handle, other.handle), input.type) + + +def logical_and(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + if not other.type.is_int1(): + other = bitcast(other, tl.dtype("int1"), builder) + return and_(input, other, builder) + + +def logical_or(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + if not other.type.is_int1(): + other = bitcast(other, tl.dtype("int1"), builder) + return or_(input, other, builder) + + +def not_(input: tl.tensor, builder: ir.builder): + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + return invert(input, builder) + + +def lshr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_lshr(input.handle, other.handle), input.type) + + +def ashr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_ashr(input.handle, other.handle), input.type) + + +def shl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_shl(input.handle, other.handle), input.type) + + +# ===----------------------------------------------------------------------===// +# Unary Operators +# ===----------------------------------------------------------------------===// + + +def plus(input: tl.tensor) -> tl.tensor: + return input + + +def minus(input: tl.tensor, builder: ir.builder) -> tl.tensor: + input_sca_ty = input.type.scalar + if input_sca_ty.is_ptr(): + raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")") + _0 = tl.tensor(builder.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty) + return sub(_0, input, builder) + + +def invert(input: tl.tensor, builder: tl.tensor) -> tl.tensor: + input_sca_ty = input.type.scalar + if input_sca_ty.is_ptr() or input_sca_ty.is_floating(): + raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")") + _1 = tl.tensor(builder.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty) + return xor_(input, _1, builder) + + +# ===----------------------------------------------------------------------===// +# Comparison Operators +# ===----------------------------------------------------------------------===// +def _bool_like(v: tl.tensor) -> tl.block_type: + if not v.type.is_block(): + return tl.int1 + shape = v.type.shape + return tl.block_type(tl.int1, shape) + + +def greater_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float > float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOGT(input.handle, other.handle), _bool_like(input)) + # > int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSGT(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpUGT(input.handle, other.handle), _bool_like(input)) + assert False + + +def greater_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float >= float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOGE(input.handle, other.handle), _bool_like(input)) + # >= int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSGE(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpUGE(input.handle, other.handle), _bool_like(input)) + assert False + + +def less_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float < float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOLT(input.handle, other.handle), _bool_like(input)) + # < int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSLT(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpULT(input.handle, other.handle), _bool_like(input)) + assert False + + +def less_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float < float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input)) + # < int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input)) + assert False + + +def equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float == float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input)) + # == int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input)) + assert False + + +def not_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float == float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input)) + # == int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input)) + assert False + + +# ===----------------------------------------------------------------------===// +# Block Creation +# ===----------------------------------------------------------------------===// + + +def arange(start: int, end: int, builder: ir.builder) -> tl.tensor: + if not isinstance(start, int) or not isinstance(end, int): + raise ValueError("arange's arguments must be of type tl.constexpr") + is_start_int64 = bool(start >> 32) + is_end_int64 = bool(end >> 32) + if is_start_int64 or is_end_int64: + raise ValueError("arange must fit in int32") + if end <= start: + raise ValueError("arange's end argument must be greater than the start argument") + + shape = [end - start] + ret_ty = tl.block_type(tl.int32, shape) + return tl.tensor(builder.create_make_range(start, end), ret_ty) + + +def full(shape: List[int], value, dtype: tl.dtype, builder: ir.builder) -> tl.tensor: + if isinstance(value, tl.tensor): + assert value.numel.value == 1, "only accepts size-1 tensor" + value = cast(value, dtype, builder) + else: + # scalar + if dtype is None: + raise ValueError("dtype must be specified when value is not a tensor") + if value == 0: + value = builder.get_null_value(dtype.to_ir(builder)) + else: + get_value_fn = getattr(builder, f"get_{dtype.name}") + value = get_value_fn(value) + value = tl.tensor(value, dtype) + + return splat(value, shape, builder) + + +# ===----------------------------------------------------------------------===// +# Shape Manipulation +# ===----------------------------------------------------------------------===// + + +def splat(value: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor: + assert not value.type.is_block(), "Cannot splat a block tensor" + if len(shape) == 0: + return value + ret_ty = tl.block_type(value.dtype, shape) + return tl.tensor(builder.create_splat(value.handle, shape), ret_ty) + + +def view(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: + numel = 1 + for s in dst_shape: + numel *= s + if input.type.numel != numel: + raise ValueError("cannot view block of different shape") + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_reshape(input.handle, dst_shape, True), ret_ty) + + +def reshape(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_reshape(input.handle, dst_shape, False), ret_ty) + + +def expand_dims(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor: + dst_shape = [tl._constexpr_to_value(x) for x in input.shape] + dst_shape.insert(axis, 1) + + if not input.type.is_block(): + return splat(input, shape=dst_shape, builder=builder) + + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_expand_dims(input.handle, axis), ret_ty) + + +def cat(lhs: tl.tensor, rhs: tl.tensor, can_reorder: bool, builder: ir.builder) -> tl.tensor: + assert can_reorder, "current implementation of `cat` always may reorder elements" + assert len(lhs.shape) == 1 + ret_type = tl.block_type(lhs.type.scalar, [lhs.shape[0] + rhs.shape[0]]) + return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_type) + + +def trans(input: tl.tensor, builder: ir.builder) -> tl.tensor: + if len(input.shape) != 2: + raise ValueError("Only 2D tensors can be transposed") + ret_type = tl.block_type(input.type.scalar, [input.shape[1], input.shape[0]]) + return tl.tensor(builder.create_trans(input.handle), ret_type) + + +def broadcast_impl_shape(input: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor: + if not input.type.is_block(): + ret_ty = tl.block_type(input.type, shape) + return tl.tensor(builder.create_splat(input.handle, shape), ret_ty) + src_shape = input.type.get_block_shapes() + if len(src_shape) != len(shape): + raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}") + if shape == src_shape: + return input + for i, item in enumerate(src_shape): + if shape[i] != item and item != 1: + raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})" + f" must match the existing size ({item}) at non-singleton dimension" + f" {i}: {src_shape}, {shape}") + ret_ty = tl.block_type(input.type.scalar, shape) + return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty) + + +def broadcast_impl_value(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor: + lhs_ty = lhs.type + rhs_ty = rhs.type + + # make_shape_compatible(block, scalar) + if lhs_ty.is_block() and not rhs_ty.is_block(): + rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape) + rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty) + # make_shape_compatible(scalar, block) + elif not lhs_ty.is_block() and rhs_ty.is_block(): + lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape) + lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty) + # make_shape_compatible(block, block) + elif lhs_ty.is_block() and rhs_ty.is_block(): + lhs_shape = lhs_ty.get_block_shapes() + rhs_shape = rhs_ty.get_block_shapes() + + if len(lhs_shape) < len(rhs_shape): + # Add new axes to lhs + for dim in range(len(lhs_shape), len(rhs_shape)): + lhs = tl.tensor(builder.create_expand_dims(lhs.handle, 0), + tl.block_type(lhs_ty.scalar, [1] + lhs_shape)) + lhs_ty = lhs.type + lhs_shape = lhs_ty.get_block_shapes() + elif len(rhs_shape) < len(lhs_shape): + # Add new axes to rhs + for dim in range(len(rhs_shape), len(lhs_shape)): + rhs = tl.tensor(builder.create_expand_dims(rhs.handle, 0), + tl.block_type(rhs_ty.scalar, [1] + rhs_shape)) + rhs_ty = rhs.type + rhs_shape = rhs_ty.get_block_shapes() + assert len(rhs_shape) == len(lhs_shape) + + ret_shape = [] + for i, left in enumerate(lhs_shape): + right = rhs_shape[i] + if left == 1: + ret_shape.append(right) + elif right == 1: + ret_shape.append(left) + elif left == right: + ret_shape.append(left) + else: + raise ValueError("Cannot make_shape_compatible: incompatible dimensions " + "at index " + str(i) + ": " + str(left) + " and " + str(right)) + if lhs_shape != ret_shape: + ret_ty = tl.block_type(lhs_ty.scalar, ret_shape) + lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty) + if rhs_shape != ret_shape: + ret_ty = tl.block_type(rhs_ty.scalar, ret_shape) + rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty) + # (scalar, scalar) => returns original blocks + return lhs, rhs + + +####### +# cast +####### + + +def bitcast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: + src_ty = input.type + if src_ty.is_block(): + dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) + if src_ty == dst_ty: + return input + src_sca_ty = src_ty.scalar + dst_sca_ty = dst_ty.scalar + if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr(): + return cast(input, dst_ty, builder) + # Bitcast + src_bits = src_sca_ty.primitive_bitwidth + dst_bits = dst_sca_ty.primitive_bitwidth + if src_bits != dst_bits: + raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + " to " + "data-type of size " + str(dst_bits)) + return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) + + +def cast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: + src_ty = input.type + if isinstance(dst_ty, tl.constexpr): + dst_ty = dst_ty.value + if src_ty.is_block(): + dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) + if src_ty == dst_ty: + return input + + src_sca_ty = src_ty.scalar + dst_sca_ty = dst_ty.scalar + + if (src_sca_ty.is_fp8e4nv() or dst_sca_ty.is_fp8e4nv()): + assert builder.options.allow_fp8e4nv, "fp8e4nv data type is not supported on CUDA arch < 89" + + # Casting with customized floating types involved: fp8 <=> bf16, fp16, fp32, fp64 + if (src_sca_ty.is_fp8() and dst_sca_ty.is_floating()) or \ + (src_sca_ty.is_floating() and dst_sca_ty.is_fp8()): + return tl.tensor(builder.create_fp_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # bf16 <=> (not fp32) + if (src_sca_ty.is_fp16() and not dst_sca_ty.is_fp32()) or \ + (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()): + return cast(cast(input, tl.float32, builder), dst_sca_ty, builder) + + # Standard floating types' casting: truncation + # fp64 => fp32, fp16, bf16 + # fp32 => fp16, bf16 + truncate_fp = src_sca_ty.is_floating() and \ + dst_sca_ty.is_floating() and \ + src_sca_ty.primitive_bitwidth > dst_sca_ty.primitive_bitwidth + if truncate_fp: + return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Standard floating types' casting: extension + # fp32 => fp64 + # fp16 => fp32, fp64 + # bf16 => fp32, fp64 + ext_fp = src_sca_ty.is_floating() and \ + dst_sca_ty.is_floating() and \ + src_sca_ty.primitive_bitwidth < dst_sca_ty.primitive_bitwidth + if ext_fp: + return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting between integer types + if src_sca_ty.is_int() and dst_sca_ty.is_int() and \ + (src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness): + sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool() + if dst_sca_ty.is_bool(): + ty = input.dtype.to_ir(builder) + _0 = tl.tensor(builder.get_null_value(ty), input.dtype) + return not_equal(input, _0, builder) + else: + return tl.tensor(builder.create_int_cast(input.handle, dst_ty.to_ir(builder), sign_extend), dst_ty) + + # Casting standard floating types to integer types + if src_sca_ty.is_standard_floating() and dst_sca_ty.is_int(): + if dst_sca_ty.is_bool(): + ty = input.dtype.to_ir(builder) + _0 = tl.tensor(builder.get_null_value(ty), input.dtype) + return not_equal(input, _0, builder) + elif dst_sca_ty.is_int_signed(): + return tl.tensor(builder.create_fp_to_si(input.handle, dst_ty.to_ir(builder)), dst_ty) + else: + return tl.tensor(builder.create_fp_to_ui(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting integer types to standard floating types + if src_sca_ty.is_int() and dst_sca_ty.is_standard_floating(): + if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed(): + return tl.tensor(builder.create_ui_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + else: + return tl.tensor(builder.create_si_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting pointer types to integer types + if src_sca_ty.is_ptr() and dst_sca_ty.is_int(): + bitwidth = dst_sca_ty.int_bitwidth + if bitwidth == 64: + return tl.tensor(builder.create_ptr_to_int(input.handle, dst_ty.to_ir(builder)), dst_ty) + if bitwidth == 1: + return not_equal(cast(input, tl.int64, builder), tl.tensor(builder.get_int64(0), tl.int64), builder) + + # Casting integer types to pointer types + if src_sca_ty.is_int() and dst_sca_ty.is_ptr(): + return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting pointer types to pointer types + if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr(): + return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) + + assert False, f'cannot cast {input} to {dst_ty}' + + +# ===----------------------------------------------------------------------===// +# Memory Operators +# ===----------------------------------------------------------------------===// + + +def _str_to_load_cache_modifier(cache_modifier): + cache = ir.CACHE_MODIFIER.NONE # default + if cache_modifier: + if cache_modifier == ".ca": + cache = ir.CACHE_MODIFIER.CA + elif cache_modifier == ".cg": + cache = ir.CACHE_MODIFIER.CG + else: + raise ValueError(f"Cache modifier {cache_modifier} not supported") + return cache + + +def _str_to_store_cache_modifier(cache_modifier): + cache = ir.CACHE_MODIFIER.NONE # default + if cache_modifier: + if cache_modifier == ".wb": + cache = ir.CACHE_MODIFIER.WB + elif cache_modifier == ".cg": + cache = ir.CACHE_MODIFIER.CG + elif cache_modifier == ".cs": + cache = ir.CACHE_MODIFIER.CS + elif cache_modifier == ".wt": + cache = ir.CACHE_MODIFIER.WT + else: + raise ValueError(f"Cache modifier {cache_modifier} not supported") + return cache + + +def _str_to_eviction_policy(eviction_policy): + eviction = ir.EVICTION_POLICY.NORMAL # default + if eviction_policy: + if eviction_policy == "evict_last": + eviction = ir.EVICTION_POLICY.EVICT_LAST + elif eviction_policy == "evict_first": + eviction = ir.EVICTION_POLICY.EVICT_FIRST + else: + raise ValueError(f"Eviction policy {eviction_policy} not supported") + return eviction + + +def _str_to_padding_option(padding_option): + padding = None # default + if padding_option: + if padding_option == "zero": + padding = ir.PADDING_OPTION.PAD_ZERO + elif padding_option == "nan": + padding = ir.PADDING_OPTION.PAD_NAN + else: + raise ValueError(f"Padding option {padding_option} not supported") + return padding + + +def _str_to_sem(sem_option): + sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE + if sem_option: + if sem_option == "acquire": + sem = ir.MEM_SEMANTIC.ACQUIRE + elif sem_option == "release": + sem = ir.MEM_SEMANTIC.RELEASE + elif sem_option == "acq_rel": + sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE + elif sem_option == "relaxed": + sem = ir.MEM_SEMANTIC.RELAXED + else: + raise ValueError(f"Memory semantic {sem_option} not supported") + return sem + + +def _str_to_scope(scope_option): + scope = ir.MEM_SYNC_SCOPE.GPU + if scope_option: + if scope_option == "gpu": + scope = ir.MEM_SYNC_SCOPE.GPU + elif scope_option == "cta": + scope = ir.MEM_SYNC_SCOPE.CTA + elif scope_option == "sys": + scope = ir.MEM_SYNC_SCOPE.SYSTEM + else: + raise ValueError(f"Memory semantic {scope_option} not supported") + return scope + + +def _canonicalize_boundary_check(boundary_check, block_shape): + if boundary_check: + if not hasattr(boundary_check, "__iter__"): + boundary_check = [boundary_check] + boundary_check = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in boundary_check] + for dim in boundary_check: + assert isinstance(dim, int) and 0 <= dim < len(block_shape) + assert len(boundary_check) > 0 + assert len(boundary_check) == len(set(boundary_check)), "Duplicate dimension in `boundary_check`" + return sorted(boundary_check) + return tuple() + + +def _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): + # Load by a block pointer: `pointer_type>` + # Block pointer can not have `mask` and `other` arguments + if mask or other: + raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") + + elt_ty = ptr.type.element_ty.element_ty + assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" + if elt_ty.is_int() and padding == ir.PADDING_OPTION.PAD_NAN: + raise ValueError("Padding option `nan` is not supported for integer block pointers") + + # `dst_ty` is de-referenced type of the pointer type + dst_ty = ptr.type.element_ty + + # Check `boundary_check` argument + boundary_check = _canonicalize_boundary_check(boundary_check, dst_ty.get_block_shapes()) + + # Build IR + return tl.tensor( + builder.create_tensor_pointer_load(ptr.handle, boundary_check, padding, cache, eviction, is_volatile), dst_ty) + + +def _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): + # Load by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + if not ptr.type.scalar.is_ptr(): + raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.load`") + + # Check `mask`, `other`, `boundary_check`, and `padding` arguments + if not mask and other: + raise ValueError("`other` cannot be provided without `mask`") + if padding or boundary_check: + raise ValueError("`padding_option` or `boundary_check` argument is not supported for loading a tensor of" + "pointers or loading a scalar. Because the compiler does not know the boundary; please " + "use block pointers (defined by `make_block_ptr`) instead") + + # For a pointer of scalar, check the type of `mask` and `other` + if not ptr.type.is_block(): + if mask and mask.type.is_block(): + raise ValueError("Mask argument cannot be block type if pointer argument is not a block") + if other and other.type.is_block(): + raise ValueError("Other argument cannot be block type if pointer argument is not a block") + + # Make `mask` and `other` into the same shape as `ptr` + if ptr.type.is_block(): + if mask: + mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) + if other: + other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder) + + # Get `pointer_type` and `elt_ty` + ptr_ty = ptr.type.scalar + elt_ty = ptr_ty.element_ty + + # Treat `pointer_type` as `pointer_type` + if elt_ty == tl.int1: + elt_ty = tl.int8 + ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) + ptr = cast(ptr, ptr_ty, builder) + + # Cast `other` into `ele_ty` type + if other: + other = cast(other, elt_ty, builder) + + # Create loaded result type `dst_ty` + if ptr.type.is_block(): + shape = ptr.type.get_block_shapes() + dst_ty = tl.block_type(elt_ty, shape) + else: + # Load by de-referencing the pointer of scalar + dst_ty = elt_ty + + # Build IR + if not mask: + return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile), dst_ty) + else: + return tl.tensor( + builder.create_masked_load(ptr.handle, mask.handle, other.handle if other else None, cache, eviction, + is_volatile), dst_ty) + + +def load(ptr: tl.tensor, mask: Optional[tl.tensor], other: Optional[tl.tensor], boundary_check, padding_option: str, + cache_modifier: str, eviction_policy: str, is_volatile: bool, builder: ir.builder) -> tl.tensor: + # Cache, eviction and padding options + cache = _str_to_load_cache_modifier(cache_modifier) + eviction = _str_to_eviction_policy(eviction_policy) + padding = _str_to_padding_option(padding_option) + + if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): + # Load by a block pointer: `pointer_type>` + return _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) + else: + # Load by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + return _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) + + +def _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder): + # Store by a block pointer: `pointer_type>` + # Block pointers can not have the `mask` argument + if mask: + raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") + + # Check same shape and element type + block_shape = ptr.type.element_ty.get_block_shapes() + if not val.type.is_block(): + val = broadcast_impl_shape(val, block_shape, builder) + assert val.type.is_block(), "Value argument must be block type or a scalar" + assert block_shape == val.type.get_block_shapes( + ), f"Block shape({block_shape}) and value shape({val.type.get_block_shapes()}) mismatch" + assert ptr.type.element_ty.element_ty == val.type.element_ty, f"Block element type({ptr.type.element_ty.element_ty}) and value element type({val.type.element_ty}) mismatch" + + elt_ty = ptr.type.element_ty.element_ty + assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" + + # Check `boundary_check` argument + boundary_check = _canonicalize_boundary_check(boundary_check, block_shape) + + # Build IR + return tl.tensor(builder.create_tensor_pointer_store(ptr.handle, val.handle, boundary_check, cache, eviction), + tl.void) + + +def _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder): + # Store by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + if not ptr.type.scalar.is_ptr(): + raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.store`") + + # Check `boundary_check` argument + if boundary_check: + raise ValueError("`boundary_check` argument is not supported for storing a tensor of pointers or storing a " + "scalar. Because the compiler does not know the boundary; please use block pointers " + "(defined by `make_block_ptr`) instead") + + # For a pointer of scalar, check the type of `val` and `mask` + if not ptr.type.is_block(): + if val.type.is_block(): + raise ValueError("Value argument cannot be block type if pointer argument is not a block") + if mask and mask.type.is_block(): + raise ValueError("Mask argument cannot be block type if pointer argument is not a block") + + # Make `mask` and `val` into the same shape as `ptr` + if ptr.type.is_block(): + val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) + if mask: + mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) + + ptr_ty = ptr.type.scalar + elt_ty = ptr_ty.element_ty + + # Treat `pointer_type` as `pointer_type` + if elt_ty == tl.int1: + elt_ty = tl.int8 + ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) + ptr = cast(ptr, ptr_ty, builder) + + # Cast to target data type + val = cast(val, elt_ty, builder) + + # Build IR + if not mask: + return tl.tensor(builder.create_store(ptr.handle, val.handle, cache, eviction), tl.void) + if not mask.type.scalar.is_bool(): + raise ValueError("Mask must have boolean scalar type") + return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, cache, eviction), tl.void) + + +def store(ptr: tl.tensor, val: tl.tensor, mask: Optional[tl.tensor], boundary_check, cache_modifier: str, + eviction_policy: str, builder: ir.builder) -> tl.tensor: + # Cache and eviction options + cache = _str_to_store_cache_modifier(cache_modifier) + eviction = _str_to_eviction_policy(eviction_policy) + + if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): + # Store by a block pointer: `pointer_type>` + return _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder) + else: + # Store by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + return _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder) + + +######### +# atomic +######### + + +def atomic_cas(ptr: tl.tensor, cmp: tl.tensor, val: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + element_ty = ptr.type.scalar.element_ty + if element_ty.primitive_bitwidth not in [16, 32, 64]: + raise ValueError("atomic_cas only supports elements with width {16, 32, 64}") + return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle, sem, scope), val.type) + + +def atom_red_typechecking_impl(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, op: str, + builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]: + if not ptr.type.scalar.is_ptr(): + raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__()) + element_ty = ptr.type.scalar.element_ty + if element_ty is tl.float16 and op != 'add': + raise ValueError("atomic_" + op + " does not support fp16") + if element_ty in [tl.int1, tl.int8, tl.int16, tl.bfloat16]: + raise ValueError("atomic_" + op + " does not support " + str(element_ty)) + if ptr.type.is_block(): + if mask: + mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) + if val: + val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) + val = cast(val, ptr.type.scalar.element_ty, builder) + if not mask: + mask_ir = builder.get_int1(True) + mask_ty = tl.int1 + if ptr.type.is_block(): + mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes()) + mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes()) + mask = tl.tensor(mask_ir, mask_ty) + return ptr, val, mask + + +def atomic_max(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'max', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + sca_ty = val.type.scalar + # direct call to atomic_max for integers + if sca_ty.is_int(): + if sca_ty.is_int_signed(): + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + else: + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + # for float + # return atomic_smax(i_ptr, i_val) if val >= 0 + # return atomic_umin(i_ptr, i_val) if val < 0 + if sca_ty not in {tl.float32, tl.float64}: + raise TypeError(f"atomic_max not supported for dtype {sca_ty}") + + itype = tl.int32 if sca_ty == tl.float32 else tl.float64 + zero = full([], 0.0, sca_ty, builder) + + i_val = bitcast(val, itype, builder) + i_ptr = bitcast(ptr, tl.pointer_type(itype, 1), builder) + pos = greater_equal(val, zero, builder) + neg = less_than(val, zero, builder) + pos_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, + and_(mask, pos, builder).handle, sem, scope), i_val.type) + neg_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, + and_(mask, neg, builder).handle, sem, scope), i_val.type) + ret = where(pos, pos_ret, neg_ret, builder) + return bitcast(ret, sca_ty, builder) + + +def atomic_min(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'min', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + sca_ty = val.type.scalar + # direct call to atomic_min for integers + if sca_ty.is_int(): + if sca_ty.is_int_signed(): + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + else: + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + # for float + # return atomic_smin(i_ptr, i_val) if val >= 0 + # return atomic_umax(i_ptr, i_val) if val < 0 + if sca_ty not in {tl.float32, tl.float64}: + raise TypeError(f"atomic_min not supported for dtype {sca_ty}") + + itype = tl.int32 if sca_ty == tl.float32 else tl.float64 + zero = full([], 0.0, sca_ty, builder) + + i_val = bitcast(val, itype, builder) + i_ptr = bitcast(ptr, tl.pointer_type(itype, 1), builder) + pos = greater_equal(val, zero, builder) + neg = less_than(val, zero, builder) + pos_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, i_ptr.handle, i_val.handle, + and_(mask, pos, builder).handle, sem, scope), i_val.type) + neg_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, i_ptr.handle, i_val.handle, + and_(mask, neg, builder).handle, sem, scope), i_val.type) + ret = where(pos, pos_ret, neg_ret, builder) + return bitcast(ret, sca_ty, builder) + + +def atomic_add(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'add', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + sca_ty = val.type.scalar + op = ir.ATOMIC_OP.FADD if sca_ty.is_floating() else ir.ATOMIC_OP.ADD + return tl.tensor(builder.create_atomic_rmw(op, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + + +def atomic_and(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'and', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.AND, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +def atomic_or(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'or', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.OR, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +def atomic_xor(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xor', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XOR, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +def atomic_xchg(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, + builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xchg', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XCHG, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +# ===----------------------------------------------------------------------===// +# Linear Algebra +# ===----------------------------------------------------------------------===// + + +def gpu_has_mfma() -> bool: + if not is_hip(): + return False + return True # mfma supported in ['gfx908', 'gfx90a'] + + +def mfma_supported(M, N, K, allow_tf32, ret_scalar_ty) -> bool: + if not gpu_has_mfma(): + return False + # TODO: Add check for configurations and types. + return True + + +def dot(lhs: tl.tensor, rhs: tl.tensor, acc: tl.tensor, allow_tf32: bool, max_num_imprecise_acc: int, + out_dtype: tl.dtype, builder: ir.builder) -> tl.tensor: + + def assert_dtypes_valid(lhs_dtype, rhs_dtype, options): + if not options.allow_fp8e4nv: + assert not lhs_dtype.is_fp8e4nv() and not rhs_dtype.is_fp8e4nv( + ), "Dot op does not support fp8e4nv on CUDA arch < 90" + if lhs_dtype.is_fp8() and rhs_dtype.is_fp8(): + return + assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!" + else: + assert not lhs_dtype.is_fp8e4b15() and not rhs_dtype.is_fp8e4b15( + ), "Dot op does not support fp8e4b15 on CUDA arch >= 90" + assert not lhs_dtype.is_fp8e4b15x4() and not rhs_dtype.is_fp8e4b15x4( + ), "Dot op does not support fp8e4b15x4 on CUDA arch >= 90" + if lhs_dtype.is_int() or rhs_dtype.is_int(): + assert lhs_dtype == rhs_dtype, f"Both operands must be same type. First operand ({lhs_dtype}) and second operand ({rhs_dtype})" + assert lhs_dtype.is_int8() or lhs_dtype.is_uint8( + ), f"Both operands must be either int8 or uint8. Operand type ({lhs_dtype})" + elif lhs_dtype.is_fp8() or rhs_dtype.is_fp8(): + assert lhs_dtype.is_fp8e4nv() or lhs_dtype.is_fp8e5( + ), f"Only supports fp8e4nv or fp8e5. First operand ({lhs_dtype})" + assert rhs_dtype.is_fp8e4nv() or rhs_dtype.is_fp8e5( + ), f"Only supports fp8e4nv or fp8e5. Second operand ({rhs_dtype})" + else: + assert lhs_dtype.is_fp16() or lhs_dtype.is_bf16() or lhs_dtype.is_fp32() or lhs_dtype.is_int1( + ), f"Unsupported dtype {lhs_dtype}" + assert rhs_dtype.is_fp16() or rhs_dtype.is_bf16() or rhs_dtype.is_fp32() or rhs_dtype.is_int1( + ), f"Unsupported dtype {rhs_dtype}" + assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!" + + assert lhs.type.is_block() and rhs.type.is_block() + + assert_dtypes_valid(lhs.dtype, rhs.dtype, builder.options) + + assert len(lhs.shape) == 2, f"First input shape ({lhs.shape}) is not two dimensional!" + assert len(rhs.shape) == 2, f"Second input shape ({rhs.shape}) is not two dimensional!" + assert lhs.shape[1].value == rhs.shape[ + 0].value, f"First input shape ({lhs.shape}) and second input shape {rhs.shape} are not compatible for matmul (second index of first shape ({lhs.shape[1].value}) must be equal to first index of second shape ({rhs.shape[0].value})" + assert lhs.shape[0].value >= 16 and lhs.shape[1].value >= 16 \ + and rhs.shape[1].value >= 16, \ + f"All values in both first input shape ({lhs.shape}) and second input shape ({rhs.shape}) must be >= 16!" + if lhs.type.scalar.is_int(): + assert lhs.type.scalar == tl.int8, "only int8 supported!" + # TODO: This is CUDA specific, check if ROCm has the same limitation + assert lhs.shape[1].value >= 32, "small blocks not supported!" + _0 = builder.get_int32(0) + ret_scalar_ty = tl.int32 + elif out_dtype.is_bf16(): + raise ValueError( + "out_dtype=bfloat16 is unsupported. Please use out_dtype=float32/float16 and cast with `.to(tl.bfloat16)`") + elif lhs.type.scalar.is_fp32() or lhs.type.scalar.is_bf16(): + _0 = builder.get_fp32(0) + ret_scalar_ty = tl.float32 + else: + _0 = builder.get_fp16(0) if out_dtype.is_fp16() else builder.get_fp32(0) + ret_scalar_ty = out_dtype + + M = lhs.type.shape[0] + N = rhs.type.shape[1] + + # Cast operands of types f16 and i8 for configurations where FMA only supported. + if is_hip() and not mfma_supported(M, N, lhs.type.shape[1], allow_tf32, ret_scalar_ty): + ret_cast_scalar_ty = tl.float32 if lhs.type.scalar.is_int() else ret_scalar_ty + lhs = cast(lhs, ret_cast_scalar_ty, builder) + rhs = cast(rhs, ret_cast_scalar_ty, builder) + if ret_cast_scalar_ty == tl.float16: + _0 = builder.create_splat(builder.get_fp16(0), [M, N]) + else: + _0 = builder.create_splat(builder.get_fp32(0), [M, N]) + ret_ty = tl.block_type(ret_cast_scalar_ty, [M, N]) + ret = tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty) + return cast(ret, ret_scalar_ty, builder) + if is_hip() and mfma_supported(M, N, lhs.type.shape[1], allow_tf32, + ret_scalar_ty) and ret_scalar_ty.primitive_bitwidth < 32: + if lhs.type.scalar.is_int(): + ret_dot_scalar_ty = tl.int32 + _0 = builder.create_splat(builder.get_int32(0), [M, N]) + else: + ret_dot_scalar_ty = tl.float32 + _0 = builder.create_splat(builder.get_fp32(0), [M, N]) + ret_ty = tl.block_type(ret_dot_scalar_ty, [M, N]) + ret = tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty) + return cast(ret, ret_scalar_ty, builder) + ret_ty = tl.block_type(ret_scalar_ty, [M, N]) + if acc is None: + acc_handle = builder.create_splat(_0, [M, N]) + else: + acc_handle = acc.handle + assert acc.type == ret_ty + + # max_num_imprecise_acc only applies to fp8 -> fp32 dot on sm_90 + max_num_imprecise_acc = 0 + if lhs.dtype.is_fp8() and rhs.dtype.is_fp8(): + max_num_imprecise_acc = builder.options.max_num_imprecise_acc_default + if max_num_imprecise_acc is None: + max_num_imprecise_acc = 2**30 + + return tl.tensor(builder.create_dot(lhs.handle, rhs.handle, acc_handle, allow_tf32, max_num_imprecise_acc), ret_ty) + + +# ===----------------------------------------------------------------------===// +# Indexing +# ===----------------------------------------------------------------------===// + + +def where(condition: tl.tensor, x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: + condition = cast(condition, tl.int1, builder) + if condition.type.is_block(): + condition, x = broadcast_impl_value(condition, x, builder) + x, y = broadcast_impl_value(x, y, builder) + condition, x = broadcast_impl_value(condition, x, builder) + + x, y = binary_op_type_checking_impl(x, y, builder, True, True) + if not condition.type.is_block(): + condition, _ = broadcast_impl_value(condition, x, builder) + ret_ty = x.type + return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty) + + +# ===----------------------------------------------------------------------===// +# Reduction +# ===----------------------------------------------------------------------=== + + +def reduction(inputs: Sequence[tl.tensor], axis: int, region_builder_fn, builder: ir.builder) -> Tuple[tl.tensor, ...]: + if axis is None: + new_inputs = [] + for i in range(len(inputs)): + new_shape = [inputs[i].numel.value] + new_inputs.append(view(inputs[i], new_shape, builder)) + inputs = tuple(new_inputs) + axis = 0 + # get result shape + shape = inputs[0].type.shape + ret_shape = [s for i, s in enumerate(shape) if i != axis] + for t in inputs: + assert t.type.shape == shape + + def wrap_tensor(x, scalar_ty): + if ret_shape: + res_ty = tl.block_type(scalar_ty, ret_shape) + else: + # 0d-tensor -> scalar + res_ty = scalar_ty + return tl.tensor(x, res_ty) + + reduce_op = builder.create_reduce([t.handle for t in inputs], axis) + region_builder_fn(reduce_op) + reduce_op.verify() + + return tuple(wrap_tensor(reduce_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs))) + + +# ===----------------------------------------------------------------------=== +# Associative Scan +# ===----------------------------------------------------------------------=== + + +def associative_scan(inputs: Sequence[tl.tensor], axis: int, region_builder_fn, + builder: ir.builder) -> Tuple[tl.tensor, ...]: + if len(inputs) != 1: + raise ValueError("Current implementation only support single tensor input") + shape = inputs[0].type.shape + + def wrap_tensor(x, scalar_ty): + res_ty = tl.block_type(scalar_ty, shape) + return tl.tensor(x, res_ty) + + scan_op = builder.create_scan([t.handle for t in inputs], axis) + region_builder_fn(scan_op) + scan_op.verify() + + return tuple(wrap_tensor(scan_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs))) + + +# ===----------------------------------------------------------------------=== +# Math +# ===----------------------------------------------------------------------=== + + +def _check_dtype(dtypes: List[str]) -> T: + """ + We're following libdevice's convention to check accepted data types for math functions. + It is not a good practice to support all data types as accelerators/GPUs don't support + many float16 and bfloat16 math operations. + We should let the users know that they are using and invoke explicit cast to convert + the data type to the supported one. + """ + + def wrapper(fn): + + @wraps(fn) + def check(*args, **kwargs): + # concatenate args and kwargs + all_args = list(args) + list(kwargs.values()) + for arg in [a for a in all_args if isinstance(a, tl.tensor)]: + if arg.type.scalar.name not in dtypes: + raise ValueError(f"Expected dtype {dtypes} but got {arg.type.scalar.name}") + return fn(*args, **kwargs) + + return check + + return wrapper + + +def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: + x, y = binary_op_type_checking_impl(x, y, builder) + # FIXME(Keren): not portable, should be fixed + from . import math + return math.mulhi(x, y, _builder=builder) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def floor(x: tl.tensor, builder: ir.builder) -> tl.tensor: + # FIXME(Keren): not portable, should be fixed + from . import math + return math.floor(x, _builder=builder) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_exp(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def log(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_log(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def cos(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_cos(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def sin(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_sin(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_sqrt(x.handle), x.type) + + +def abs(x: tl.tensor, builder: ir.builder) -> tl.tensor: + dtype = x.dtype + if dtype.is_floating(): + return tl.tensor(builder.create_fabs(x.handle), x.type) + elif dtype.is_int_signed(): + return tl.tensor(builder.create_iabs(x.handle), x.type) + elif dtype.is_int_unsigned(): + return x # no-op + else: + assert False, f"Unexpected dtype {dtype}" + + +## + + +def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor: + if max(1, len(x.shape)) != len(values): + raise ValueError("Shape of input to multiple_of does not match the length of values") + x.handle.set_attr("tt.divisibility", ir.make_attr(values, x.handle.get_context())) + return x + + +def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor: + if len(x.shape) != len(values): + raise ValueError("Shape of input to max_contiguous does not match the length of values") + x.handle.set_attr("tt.contiguity", ir.make_attr(values, x.handle.get_context())) + return x + + +def max_constancy(x: tl.tensor, values: List[int]) -> tl.tensor: + if len(x.shape) != len(values): + raise ValueError("Shape of input to max_constancy does not match the length of values") + x.handle.set_attr("tt.constancy", ir.make_attr(values, x.handle.get_context())) + return x + + +def debug_barrier(builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_barrier(), tl.void) + + +def device_print(prefix: str, args: List[tl.tensor], builder: ir.builder) -> tl.tensor: + # It makes sense visually for prefix to end in ": "; make it so. Also, + # non-empty prefixes should start with " ". + if not prefix.endswith(" ") and args: + prefix += " " + if not prefix.endswith(": ") and args: + prefix = prefix[:-1] + ": " + if len(prefix) > 2 and not prefix.startswith(" "): + prefix = " " + prefix + + new_args = [] + for arg in args: + new_args.append(arg.handle) + return tl.tensor(builder.create_print(prefix, new_args), tl.void) + + +def device_assert(cond: tl.tensor, msg: str, file_name: str, func_name, lineno: int, builder: ir.builder) -> tl.tensor: + cond_ty = cond.type + if not cond_ty.is_block(): + cond_ty = tl.block_type(cond_ty.scalar, (1, )) + cond = tl.tensor(builder.create_splat(cond.handle, (1, )), cond_ty) + return tl.tensor(builder.create_assert(cond.handle, msg, file_name, func_name, lineno), tl.void) + + +def _convert_elem_to_ir_value(builder, elem, require_i64): + if isinstance(elem, int): + elem = tl.constexpr(elem) + if isinstance(elem, tl.constexpr): + return builder.get_int64(elem.value) if require_i64 else builder.get_int32(elem.value) + elif isinstance(elem, tl.tensor): + assert elem.numel.value == 1, "Expected a scalar in shape/strides/offsets" + assert elem.dtype.is_int(), "Expected an integer scalar type in shape/strides/offsets" + if elem.dtype != tl.int64 and require_i64: + return builder.create_int_cast(elem.handle, builder.get_int64_ty(), elem.dtype.is_int_signed()) + elif elem.dtype != tl.int32: + return builder.create_int_cast(elem.handle, builder.get_int32_ty(), elem.dtype.is_int_signed()) + return elem.handle + assert False, f"Unsupported element type in shape/strides/offsets: {type(elem)}" + + +def _convert_to_ir_values(builder, list_like, require_i64=True): + if hasattr(list_like, "__iter__"): + return [_convert_elem_to_ir_value(builder, elem, require_i64) for elem in list_like] + return [_convert_elem_to_ir_value(builder, list_like, require_i64)] + + +def make_block_ptr(base: tl.tensor, shape, strides, offsets, block_shape, order, builder: ir.builder) -> tl.tensor: + # Convert dynamic arguments to IR values + # NOTES(Chenggang): current `shape/strides` are `int64_t`, while `offsets/block_shape` are `int32_t` + shape = _convert_to_ir_values(builder, shape) + strides = _convert_to_ir_values(builder, strides) + offsets = _convert_to_ir_values(builder, offsets, require_i64=False) + + # Check `base` type + if not base.type.is_ptr() or base.type.element_ty.is_block(): + raise ValueError("Expected `base` to be a pointer type (but not a block pointer type or others)") + + # Treat `pointer_type` as `pointer_type` + if base.type.element_ty == tl.int1: + base = cast(base, tl.pointer_type(tl.int8, base.type.address_space), builder) + + # Check whether `block_shape` is static + if not hasattr(block_shape, "__iter__"): + block_shape = [block_shape] + block_shape = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in block_shape] + assert all([isinstance(elem, int) and -2**31 <= elem < 2**31 for elem in block_shape]), \ + "Expected a list of constant integers (`int32_t` range) in `block_shape`" + + # Check `order` + if not hasattr(order, "__iter__"): + order = [order] + order = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in order] + assert sorted(order) == list(range(len(order))), "Expected a permutation of (0, 1, ..., len(order)-1) in order" + + # Must have same length + assert all([len(block_shape) == len(list_like) for list_like in [shape, strides, offsets, order]]), \ + "Expected shape/strides/offsets/block_shape to have the same length" + + # Build value, the type is: + # `pointer_type>` in Python + # `tt.ptr>` in MLIR + handle = builder.create_make_block_ptr(base.handle, shape, strides, offsets, block_shape, order) + return tl.tensor(handle, tl.pointer_type(tl.block_type(base.type.element_ty, block_shape))) + + +def advance(base: tl.tensor, offsets, builder: ir.builder) -> tl.tensor: + # Convert dynamic offsets to IR values + offsets = _convert_to_ir_values(builder, offsets, require_i64=False) + + # Advanced block pointer type is the same as before + return tl.tensor(builder.create_advance(base.handle, offsets), base.type) diff --git a/llmeval-env/lib/python3.10/site-packages/triton/language/standard.py b/llmeval-env/lib/python3.10/site-packages/triton/language/standard.py new file mode 100644 index 0000000000000000000000000000000000000000..c211655b88bb024cdacb661701cc459754fedbe1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/language/standard.py @@ -0,0 +1,404 @@ +from __future__ import annotations + +from ..runtime.jit import jit +from . import core, math + +# ----------------------- +# Standard library +# ----------------------- + + +@jit +def cdiv(x, div): + """ + Computes the ceiling division of :code:`x` by :code:`div` + + :param x: the input number + :type x: Block + :param div: the divisor + :param div: Block + """ + return (x + div - 1) // div + + +@jit +@core._add_math_1arg_docstr("sigmoid") +def sigmoid(x): + return 1 / (1 + core.exp(-x)) + + +@jit +@core._add_math_1arg_docstr("softmax") +def softmax(x, ieee_rounding=False): + z = x - max(x, 0) + num = core.exp(z) + den = sum(num, 0) + return core.fdiv(num, den, ieee_rounding) + + +@jit +def ravel(x): + """ + Returns a contiguous flattened view of :code:`x`. + + :param x: the input tensor + :type x: Block + """ + return core.view(x, [x.numel]) + + +@jit +def swizzle2d(i, j, size_i, size_j, size_g): + """ + Transforms indices of a row-major size_i*size_j matrix into those + of one where indices are row major for each group of size_j rows. + For example, for size_i = size_j = 4 and size_g = 2, it will transform + [[0 , 1 , 2 , 3 ], + [4 , 5 , 6 , 7 ], + [8 , 9 , 10, 11], + [12, 13, 14, 15]] + into + [[0, 2, 4 , 6 ], + [1, 3, 5 , 7 ], + [8, 10, 12, 14], + [9, 11, 13, 15]] + """ + # "unrolled index in array" + ij = i * size_j + j + # number of elements in `size_g` groups + # of `size_j` columns + size_gj = size_g * size_j + # index of the group in which (i,j) is + group_id = ij // size_gj + # row-index of the first element of this group + off_i = group_id * size_g + # last group may have fewer rows + size_g = minimum(size_i - off_i, size_g) + # new row and column indices + new_i = off_i + (ij % size_g) + new_j = (ij % size_gj) // size_g + return new_i, new_j + + +@jit +def zeros(shape, dtype): + """ + Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`. + + :param shape: Shape of the new array, e.g., (8, 16) or (8, ) + :type shape: tuple of ints + :param dtype: Data-type of the new array, e.g., :code:`tl.float16` + :type dtype: DType + """ + return core.full(shape, 0, dtype) + + +@jit +def zeros_like(input): + return zeros(input.shape, input.dtype) + + +@jit +def minimum(x, y): + """ + Computes the element-wise minimum of :code:`x` and :code:`y`. + + :param input: the first input tensor + :type input: Block + :param other: the second input tensor + :type other: Block + """ + return math.min(x, y) + + +@jit +def maximum(x, y): + """ + Computes the element-wise maximum of :code:`x` and :code:`y`. + + :param input: the first input tensor + :type input: Block + :param other: the second input tensor + :type other: Block + """ + return math.max(x, y) + + +# max and argmax + + +@jit +def _argmax_combine(value1, index1, value2, index2, tie_break_left): + if tie_break_left: + tie = value1 == value2 and index1 < index2 + else: + tie = False + gt = value1 > value2 or tie + v_ret = core.where(gt, value1, value2) + i_ret = core.where(gt, index1, index2) + return v_ret, i_ret + + +@jit +def _argmax_combine_tie_break_left(value1, index1, value2, index2): + return _argmax_combine(value1, index1, value2, index2, True) + + +@jit +def _argmax_combine_tie_break_fast(value1, index1, value2, index2): + return _argmax_combine(value1, index1, value2, index2, False) + + +@jit +@core._add_reduction_docstr("maximum", return_indices_arg="return_indices", + tie_break_arg="return_indices_tie_break_left") +def max(input, axis=None, return_indices=False, return_indices_tie_break_left=True): + input = core._promote_reduction_input(input) + if return_indices: + if return_indices_tie_break_left: + return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_left) + else: + return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_fast) + else: + if core.constexpr(input.dtype.primitive_bitwidth) < core.constexpr(32): + if core.constexpr(input.dtype.is_floating()): + input = input.to(core.float32) + else: + assert input.dtype.is_integer_type() + input = input.to(core.int32) + return core.reduce(input, axis, maximum) + + +@jit +@core._add_reduction_docstr("maximum index", tie_break_arg="tie_break_left") +def argmax(input, axis, tie_break_left=True): + (_, ret) = max(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left) + return ret + + +# min and argmin + + +@jit +def _argmin_combine(value1, index1, value2, index2, tie_break_left): + if tie_break_left: + tie = value1 == value2 and index1 < index2 + else: + tie = False + lt = value1 < value2 or tie + value_ret = core.where(lt, value1, value2) + index_ret = core.where(lt, index1, index2) + return value_ret, index_ret + + +@jit +def _argmin_combine_tie_break_left(value1, index1, value2, index2): + return _argmin_combine(value1, index1, value2, index2, True) + + +@jit +def _argmin_combine_tie_break_fast(value1, index1, value2, index2): + return _argmin_combine(value1, index1, value2, index2, False) + + +@jit +@core._add_reduction_docstr("minimum", return_indices_arg="return_indices", + tie_break_arg="return_indices_tie_break_left") +def min(input, axis=None, return_indices=False, return_indices_tie_break_left=True): + input = core._promote_reduction_input(input) + if return_indices: + if return_indices_tie_break_left: + return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_left) + else: + return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_fast) + else: + if core.constexpr(input.dtype.primitive_bitwidth) < 32: + if core.constexpr(input.dtype.is_floating()): + input = input.to(core.float32) + else: + assert input.dtype.is_integer_type() + input = input.to(core.int32) + return core.reduce(input, axis, minimum) + + +@jit +@core._add_reduction_docstr("minimum index", tie_break_arg="tie_break_left") +def argmin(input, axis, tie_break_left=True): + _, ret = min(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left) + return ret + + +@jit +def _sum_combine(a, b): + return a + b + + +# sum + + +@jit +@core._add_reduction_docstr("sum") +def sum(input, axis=None): + input = core._promote_reduction_input(input) + return core.reduce(input, axis, _sum_combine) + + +@jit +def _xor_combine(a, b): + return a ^ b + + +# xor sum + + +@core.builtin +@core._add_reduction_docstr("xor sum") +def xor_sum(input, axis=None, _builder=None, _generator=None): + scalar_ty = input.type.scalar + if not scalar_ty.is_int(): + raise ValueError("xor_sum only supported for integers") + + input = core._promote_reduction_input(input, _builder=_builder) + return core.reduce(input, axis, _xor_combine, _builder=_builder, _generator=_generator) + + +# cumsum + + +@jit +@core._add_scan_docstr("cumsum") +def cumsum(input, axis=0): + # todo rename this to a generic function name + input = core._promote_reduction_input(input) + return core.associative_scan(input, axis, _sum_combine) + + +# cumprod + + +@jit +def _prod_combine(a, b): + return a * b + + +@jit +@core._add_scan_docstr("cumprod") +def cumprod(input, axis=0): + # todo rename this to a generic function name + input = core._promote_reduction_input(input) + return core.associative_scan(input, axis, _prod_combine) + + +# sort + + +@jit +def _indicator(n_dims: core.constexpr, idx: core.constexpr, pos: core.constexpr): + core.static_assert(idx < n_dims) + core.static_assert((pos == 0) or (pos == 1)) + y = core.arange(0, 2) + if pos == 0: + y = 1 - y + + for n in core.static_range(0, n_dims): + if n != n_dims - 1 - idx: + y = core.expand_dims(y, n) + return y + + +@jit +def _take_slice(x, n_dims: core.constexpr, idx: core.constexpr, pos: core.constexpr, keep_dim: core.constexpr = True): + y = sum(x * _indicator(n_dims, idx, pos), n_dims - 1 - idx) + if keep_dim: + y = core.expand_dims(y, n_dims - 1 - idx) + + return y + + +@jit +def _compare_and_swap(x, desc_mask, n_dims: core.constexpr, idx: core.constexpr): + l = _take_slice(x, n_dims, idx, 0) + r = _take_slice(x, n_dims, idx, 1) + + x_int = x + l_int = l + r_int = r + if x.dtype.is_floating(): + if core.constexpr(x.dtype.primitive_bitwidth) == 16: + dtype_int = core.int16 + elif core.constexpr(x.dtype.primitive_bitwidth) == 32: + dtype_int = core.int32 + elif core.constexpr(x.dtype.primitive_bitwidth) == 64: + dtype_int = core.int64 + else: + raise ValueError("Unsupported dtype") + x_int = x.to(dtype_int, bitcast=True) + l_int = l.to(dtype_int, bitcast=True) + r_int = r.to(dtype_int, bitcast=True) + desc_mask = desc_mask.to(x_int.dtype) + zero = zeros_like(x_int) + y = x_int ^ core.where((l > r) ^ desc_mask, l_int ^ r_int, zero) + y = y.to(x.dtype, bitcast=True) + return y + + +@jit +def _bitonic_merge(x, n_dims: core.constexpr, active_dims: core.constexpr, order_type: core.constexpr): + ''' + order_type 0 == ascending + order_type 1 == descending + order_type 2 == alternating + ''' + core.static_assert(active_dims <= n_dims) + + if order_type == 2: + desc_mask = _indicator(n_dims, active_dims, 1) + else: + desc_mask = order_type + + for i in core.static_range(active_dims): + x = _compare_and_swap(x, desc_mask, n_dims, active_dims - 1 - i) + + return x + + +def _log2(i: core.constexpr): + log2 = 0 + n = i.value + while n > 1: + n >>= 1 + log2 += 1 + return core.constexpr(log2) + + +def _is_power_of_two(i: core.constexpr): + n = i.value + return core.constexpr((n & (n - 1)) == 0 and n != 0) + + +def _unwrap_if_constexpr(o): + return o.value if isinstance(o, core.constexpr) else o + + +def _get_sort_dim(dim, shape): + dim = _unwrap_if_constexpr(dim) + shape = _unwrap_if_constexpr(shape) + if dim is None: + dim = len(shape) - 1 + assert dim == len(shape) - 1, "Currently only support sorting on the last dimension" + return core.constexpr(dim) + + +@jit +def sort(x, dim=None, descending: core.constexpr = 0): + core.static_assert(_is_power_of_two(x.shape[_get_sort_dim(dim, x.shape)])) + core.static_assert(_is_power_of_two(x.numel)) + # reshape the tensor to have all dimensions be 2. + # TODO: We shouldn't have to change the dimensions not sorted. + y = core.reshape(x, [2] * _log2(x.numel)) + for i in core.static_range(1, _log2(x.shape[_get_sort_dim(dim, x.shape)]) + 1): + y = _bitonic_merge(y, _log2(x.numel), i, (descending if + (i == _log2(x.shape[_get_sort_dim(dim, x.shape)])) else 2)) + + x = core.reshape(y, x.shape) + return x diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddf46fb289c33645f20f84dfd80e761dad37d8e3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/compile.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/compile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a3c2677a42901debedea8277ee0c50dc54a2698 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/compile.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/disasm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/disasm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa17f74fdf7e6c45b119b404de773fffacaed3eb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/triton/tools/__pycache__/disasm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/build_extern.py b/llmeval-env/lib/python3.10/site-packages/triton/tools/build_extern.py new file mode 100644 index 0000000000000000000000000000000000000000..6f00e8192593930018b55ac0daaaf6db69a379c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/tools/build_extern.py @@ -0,0 +1,376 @@ +import argparse +import subprocess +from abc import ABC, abstractmethod +from typing import Dict, List, Optional + + +class Symbol: + _name: str + _op_name: str + _ret_type: str + _arg_names: List[str] + _arg_types: List[str] + + def __init__( + self, + name: str, + op_name: str, + ret_type: str, + arg_names: List[str], + arg_types: List[str], + ) -> None: + ''' + A symbol is a function declaration. + :param name: name of the symbol + :param op_name: name of the operation + :param ret_type: return type of the operation + :param arg_names: names of the arguments + :param arg_types: types of the arguments + ''' + self._name = name + self._op_name = op_name + self._ret_type = ret_type + self._arg_names = list(arg_names) + self._arg_types = list(arg_types) + + @property + def name(self) -> str: + return self._name + + @property + def op_name(self) -> str: + return self._op_name + + @property + def ret_type(self) -> str: + return self._ret_type + + @property + def arg_names(self) -> List[str]: + return self._arg_names + + @property + def arg_types(self) -> List[str]: + return self._arg_types + + +def convert_type(type_str) -> Optional[str]: + if type_str == "i32": + return "int32" + elif type_str == "u32": + return "uint32" + elif type_str == "i64": + return "int64" + elif type_str == "u64": + return "uint64" + elif type_str == "float": + return "fp32" + elif type_str == "double": + return "fp64" + else: + # ignore other types, such as pointer types + return None + + +def to_unsigned(type_str) -> str: + if type_str == "int32": + return "uint32" + elif type_str == "int64": + return "uint64" + else: + return type_str + + +class ExternLibrary(ABC): + _name: str + _path: str + _symbols: Dict[str, Symbol] + _format: bool + _grouping: bool + + def __init__( + self, + name: str, + path: str, + format: bool = True, + grouping: bool = True, + ) -> None: + ''' + Abstract class for extern library. + :param name: name of the library + :param path: path of the library + :param format: whether to format the generated stub file + ''' + self._name = name + self._path = path + self._symbols = {} + self._format = format + self._grouping = grouping + + @property + def name(self) -> str: + return self._name + + @property + def path(self) -> str: + return self._path + + @property + def symbols(self) -> Dict[str, Symbol]: + return self._symbols + + @property + def grouping(self) -> bool: + return self._grouping + + @abstractmethod + def parse_symbols(self, input_file) -> None: + pass + + @abstractmethod + def _output_stubs(self) -> str: + pass + + def generate_stub_file(self, output_dir) -> None: + file_str = self._output_stubs() + if file_str is None or len(file_str) == 0: + raise Exception("file_str is empty") + + output_file = f"{output_dir}/{self._name}.py" + with open(output_file, "w") as f: + f.write(file_str) + f.close() + if self._format: + subprocess.Popen(["autopep8", "-a", "-r", "-i", output_file], stdout=subprocess.PIPE).communicate() + subprocess.Popen(["isort", output_file], stdout=subprocess.PIPE).communicate() + + +class Libdevice(ExternLibrary): + _symbol_groups: Dict[str, List[Symbol]] + + def __init__(self, path) -> None: + ''' + Constructor for Libdevice. + :param path: path of the libdevice library + ''' + super().__init__("libdevice", path) + self._symbol_groups = {} + self.is_pure = True + + @staticmethod + def _extract_symbol(line) -> Optional[Symbol]: + # Extract symbols from line in the following format: + # "define [internal] @(,)" + entries = line.split("@") + ret_str = entries[0] + func_str = entries[1] + # Get ret_type, skip internal symbols + ret_strs = ret_str.split() + if ret_strs[1] == "internal": + return None + ret_type = convert_type(ret_strs[1]) + if ret_type is None: + return None + # Get function name + func_strs = func_str.split("(") + func_name = func_strs[0].replace("@", "") + op_name = func_name.replace("__nv_", "") + if 'ieee' in op_name: + return None + # Get arg_types + arg_strs = func_strs[1].split(",") + arg_types = [] + arg_names = [] + for i, arg_str in enumerate(arg_strs): + arg_type = convert_type(arg_str.split()[0]) + if arg_type is None: + return None + arg_name = 'arg' + str(i) + arg_types.append(arg_type) + arg_names.append(arg_name) + if op_name == "sad": + # Special case for sad, where the last argument is an unsigned int + arg_types[-1] = to_unsigned(arg_types[-1]) + elif op_name.startswith("u"): + # LLVM does not differentiate between signed and unsigned integer type. + # We have to convert the types to unsigned + ret_type = to_unsigned(ret_type) + for i, arg_type in enumerate(arg_types): + arg_types[i] = to_unsigned(arg_type) + return Symbol(func_name, op_name, ret_type, arg_names, arg_types) + + def _group_symbols(self) -> None: + symbol_set = {} + for symbol in self._symbols.values(): + op_name = symbol.op_name + symbol_set[op_name] = symbol + + # Group functions together by renaming. + renaming = { + 'llabs': 'abs', 'acosf': 'acos', 'acoshf': 'acosh', 'dadd_rd': 'add_rd', 'fadd_rd': 'add_rd', 'dadd_rn': + 'add_rn', 'fadd_rn': 'add_rn', 'dadd_ru': 'add_ru', 'fadd_ru': 'add_ru', 'dadd_rz': 'add_rz', 'fadd_rz': + 'add_rz', 'asinf': 'asin', 'asinhf': 'asinh', 'atanf': 'atan', 'atan2f': 'atan2', 'atanhf': 'atanh', + 'brevll': 'brev', 'cbrtf': 'cbrt', 'ceilf': 'ceil', 'clzll': 'clz', 'copysignf': 'copysign', 'cosf': 'cos', + 'coshf': 'cosh', 'cospif': 'cospi', 'cyl_bessel_i0f': 'cyl_bessel_i0', 'cyl_bessel_i1f': 'cyl_bessel_i1', + 'fdiv_rd': 'div_rd', 'ddiv_rd': 'div_rd', 'fdiv_rn': 'div_rn', 'ddiv_rn': 'div_rn', 'fdiv_ru': 'div_ru', + 'ddiv_ru': 'div_ru', 'fdiv_rz': 'div_rz', 'ddiv_rz': 'div_rz', 'erff': 'erf', 'erfcf': 'erfc', 'erfcinvf': + 'erfcinv', 'erfcxf': 'erfcx', 'erfinvf': 'erfinv', 'expf': 'exp', 'exp10f': 'exp10', 'exp2f': 'exp2', + 'expm1f': 'expm1', 'fabsf': 'abs', 'fabs': 'abs', 'fast_fdividef': 'fast_dividef', 'fdimf': 'fdim', 'ffsll': + 'ffs', 'floorf': 'floor', 'fmaf': 'fma', 'fmaf_rd': 'fma_rd', 'fmaf_rn': 'fma_rn', 'fmaf_ru': 'fma_ru', + 'fmaf_rz': 'fma_rz', 'fmodf': 'fmod', 'uhadd': 'hadd', 'hypotf': 'hypot', 'ilogbf': 'ilogb', 'isinff': + 'isinf', 'isinfd': 'isinf', 'isnanf': 'isnan', 'isnand': 'isnan', 'j0f': 'j0', 'j1f': 'j1', 'jnf': 'jn', + 'ldexpf': 'ldexp', 'lgammaf': 'lgamma', 'llrintf': 'llrint', 'llroundf': 'llround', 'logf': 'log', 'log10f': + 'log10', 'log1pf': 'log1p', 'log2f': 'log2', 'logbf': 'logb', 'umax': 'max', 'llmax': 'max', 'ullmax': + 'max', 'fmaxf': 'max', 'fmax': 'max', 'umin': 'min', 'llmin': 'min', 'ullmin': 'min', 'fminf': 'min', + 'fmin': 'min', 'dmul_rd': 'mul_rd', 'fmul_rd': 'mul_rd', 'dmul_rn': 'mul_rn', 'fmul_rn': 'mul_rn', + 'dmul_ru': 'mul_ru', 'fmul_ru': 'mul_ru', 'dmul_rz': 'mul_rz', 'fmul_rz': 'mul_rz', 'umul24': 'mul24', + 'umulhi': 'mulhi', 'mul64hi': 'mulhi', 'umul64hi': 'mulhi', 'nearbyintf': 'nearbyint', 'nextafterf': + 'nextafter', 'norm3df': 'norm3d', 'norm4df': 'norm4d', 'normcdff': 'normcdf', 'normcdfinvf': 'normcdfinv', + 'popcll': 'popc', 'powif': 'pow', 'powi': 'pow', 'powf': 'pow', 'rcbrtf': 'rcbrt', 'frcp_rd': 'rcp_rd', + 'drcp_rd': 'rcp_rd', 'frcp_rn': 'rcp_rn', 'drcp_rn': 'rcp_rn', 'frcp_ru': 'rcp_ru', 'drcp_ru': 'rcp_ru', + 'frcp_rz': 'rcp_rz', 'drcp_rz': 'rcp_rz', 'remainderf': 'remainder', 'urhadd': 'rhadd', 'rhypotf': 'rhypot', + 'rintf': 'rint', 'rnorm3df': 'rnorm3d', 'rnorm4df': 'rnorm4d', 'roundf': 'round', 'rsqrtf': 'rsqrt', + 'frsqrt_rn': 'rsqrt_rn', 'usad': 'sad', 'scalbnf': 'scalbn', 'signbitf': 'signbit', 'signbitd': 'signbit', + 'sinf': 'sin', 'sinhf': 'sinh', 'sinpif': 'sinpi', 'sqrtf': 'sqrt', 'fsqrt_rd': 'sqrt_rd', 'dsqrt_rd': + 'sqrt_rd', 'fsqrt_rn': 'sqrt_rn', 'dsqrt_rn': 'sqrt_rn', 'fsqrt_ru': 'sqrt_ru', 'dsqrt_ru': 'sqrt_ru', + 'fsqrt_rz': 'sqrt_rz', 'dsqrt_rz': 'sqrt_rz', 'fsub_rd': 'sub_rd', 'dsub_rd': 'sub_rd', 'fsub_rn': 'sub_rn', + 'dsub_rn': 'sub_rn', 'fsub_ru': 'sub_ru', 'dsub_ru': 'sub_ru', 'fsub_rz': 'sub_rz', 'dsub_rz': 'sub_rz', + 'tanf': 'tan', 'tanhf': 'tanh', 'tgammaf': 'tgamma', 'truncf': 'trunc', 'y0f': 'y0', 'y1f': 'y1', 'ynf': + 'yn' + } + + for symbol in self._symbols.values(): + op_name = symbol.op_name + if op_name in renaming: + op_name = renaming[op_name] + symbol._op_name = op_name + if op_name in self._symbol_groups: + self._symbol_groups[op_name].append(symbol) + else: + self._symbol_groups[op_name] = [symbol] + + def parse_symbols(self, input_file) -> None: + if len(self.symbols) > 0: + return + output = subprocess.check_output(["grep", "define", input_file]).decode().splitlines() + for line in output: + symbol = self._extract_symbol(line) + if symbol is None: + continue + self._symbols[symbol.name] = symbol + + self._group_symbols() + + def _output_stubs(self) -> str: + # Generate python functions in the following format: + # @extern.extern + # def (, _builder=None): + # arg_type_symbol_dict = {[arg_type]: {(symbol, ret_type)}} + # return core.extern_elementwise("libdevice", , , , _builder) + import_str = "from . import core\n" + import_str += "import os\n" + import_str += "import functools\n" + + header_str = "" + header_str += "@functools.lru_cache()\n" + header_str += "def libdevice_path():\n" + header_str += " import torch\n" + header_str += " third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"third_party\")\n" + header_str += " if torch.version.hip is None:\n" + header_str += " default = os.path.join(third_party_dir, \"cuda\", \"lib\", \"libdevice.10.bc\")\n" + header_str += " else:\n" + header_str += " default = ''\n" + header_str += " return os.getenv(\"TRITON_LIBDEVICE_PATH\", default)\n" + func_str = "" + for symbols in self._symbol_groups.values(): + func_str += "@core.extern\n" + func_name_str = f"def {symbols[0].op_name}(" + for arg_name in symbols[0].arg_names: + func_name_str += f"{arg_name}, " + func_name_str += "_builder=None):\n" + + return_str = f"\treturn core.extern_elementwise(\"{self._name}\", libdevice_path(), [" + for arg_name in symbols[0].arg_names: + return_str += f"{arg_name}, " + return_str += "], \n" + + arg_type_symbol_dict_str = "{" + for symbol in symbols: + arg_type_symbol_dict_str += "(" + for arg_type in symbol.arg_types: + arg_type_symbol_dict_str += f'core.dtype("{arg_type}"),' + ret_type = f'core.dtype("{symbol.ret_type}")' + arg_type_symbol_dict_str += "): (\"" + symbol.name + "\", " + ret_type + "),\n" + arg_type_symbol_dict_str += "}" + + return_str += arg_type_symbol_dict_str + return_str += f", is_pure={self.is_pure}" + return_str += ", _builder=_builder)\n" + + func_str += func_name_str + return_str + "\n" + file_str = import_str + header_str + func_str + + return file_str + + +class LLVMDisassembler: + _path: str + _ll_file: str + + def __init__(self, path) -> None: + ''' + Invoke llvm-dis to disassemble the given file. + :param path: path to llvm-dis + ''' + self._path = path + self._ll_file = "/tmp/extern_lib.ll" + + def disasm(self, lib_path: str) -> None: + subprocess.Popen([self._path, lib_path, "-o", self.ll_file], stdout=subprocess.PIPE).communicate() + + @property + def ll_file(self) -> str: + return self._ll_file + + @property + def path(self) -> str: + return self._path + + +extern_libs = ["libdevice"] + + +def build( + llvm_dis_path: str, + lib_path: str, + lib_name: str, + output_dir: str, +) -> None: + ''' + Interface function to build the library file. + :param llvm_dis_path: path to the llvm-dis binary + :param lib_path: path to the external library file + :param lib_name: name of the library + :param output_dir: path to the output directory + ''' + if lib_name == "libdevice": + extern_lib = Libdevice(lib_path) + else: + raise Exception(f"Unknown extern library: {lib_name}") + + llvm_disassembler = LLVMDisassembler(llvm_dis_path) + llvm_disassembler.disasm(lib_path) + + extern_lib.parse_symbols(llvm_disassembler.ll_file) + extern_lib.generate_stub_file(output_dir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--llvm-dis", dest="llvm_dis_path", help="Path to llvm-dis", default="llvm-dis") + parser.add_argument("--lib-path", dest="lib_path", help="Path to the extern library") + parser.add_argument("--lib-name", dest="lib_name", help="Name of the extern library") + parser.add_argument("--output", dest="output_dir", help="Output file path", default="/tmp/") + args = parser.parse_args() + + build(args.llvm_dis_path, args.lib_path, args.lib_name, args.output_dir)