from glob import glob import urllib.parse import pandas as pd import json import os import datasets _CITATION = """\ @InProceedings{huggingface:dataset, title = {Code Commits for Instruction Tuning}, author={BigCode}, year={2023} } """ _DESCRIPTION = """\ Code Commits for Instruction Tuning """ _HOMEPAGE = "" _LICENSE = "" languages = ['svg', 'ocaml', 'scheme', 'modelica', 'xpages', 'qmake', 'ada', 'pike', 'pod', 'bro', 'edn', 'html+django', 'groff', 'pascal', 'nsis', 'applescript', 'ceylon', 'haml', 'augeas', 'toml', 'c++', 'pony', 'lean', 'supercollider', 'moonscript', 'gas', 'zig', 'vcl', 'tex', 'html+erb', 'robotframework', 'unrealscript', 'go', 'racket', 'apl', 'lolcode', 'nix', 'nu', 'zephir', 'textile', 'inno-setup', 'hy', 'turtle', 'systemverilog', 'mako', 'sparql', 'gnuplot', 'yang', 'unity3d-asset', 'qml', 'cmake', 'gdscript', 'xquery', 'python', 'css', 'metal', 'flux', 'clojure', 'less', 'lfe', 'cartocss', 'asp', 'elixir', 'smt', 'processing', 'awk', 'antlr', 'chapel', 'igor-pro', 'literate-agda', 'literate-coffeescript', 'dockerfile', 'dart', 'makefile', 'org', 'shell', 'objective-c++', 'linker-script', 'markdown', 'haxe', 'mtml', 'assembly', 'jupyter-notebook', 'cuda', 'scss', 'graphviz-dot', 'perl', 'typescript', 'smalltalk', 'c#', 'dylan', 'pawn', 'stylus', 'vue', 'rust', 'freemarker', 'viml', 'cucumber', 'maple', 'purescript', 'io', 'pure-data', 'api-blueprint', 'erlang', 'batchfile', 'llvm', 'protocol-buffer', 'r', 'logtalk', 'lsl', 'liquid', 'eagle', 'literate-haskell', 'xproc', 'cython', 'logos', 'mathematica', 'openscad', 'java', 'bluespec', 'desktop', 'diff', 'jsoniq', 'elm', 'html', 'abap', 'kotlin', 'crystal', 'gentoo-ebuild', 'aspectj', 'renpy', 'sqf', 'webidl', 'smarty', 'nimrod', 'xtend', 'ragel-in-ruby-host', 'haskell', 'factor', 'php', 'coldfusion-cfc', 'visual-basic', 'ooc', 'netlinx', 'xs', 'lua', 'nesc', 'xml', 'latte', 'ninja', 'html+eex', 'solidity', 'thrift', 'glsl', 'html+php', 'red', 'powershell', 'arduino', 'opencl', 'jasmin', 'harbour', 'urweb', 'dns-zone', 'clean', 'vhdl', 'slash', 'tcl', 'pov-ray-sdl', 'inform-7', 'mask', 'ini', 'perl6', 'json', 'brainfuck', 'lilypond', 'purebasic', 'xbase', 'autohotkey', 'saltstack', 'module-management-system', 'asciidoc', 'handlebars', 'nginx', 'scala', 'http', 'hcl', 'vala', 'swift', 'graphql', 'csv', 'parrot-assembly', 'sas', 'postscript', 'mirah', 'ioke', 'tcsh', 'dm', 'mupad', 'json5', 'livescript', 'emberscript', 'piglatin', 'boo', 'jflex', 'uno', 'agda', 'volt', 'gettext-catalog', 'rebol', 'm4', 'fancy', 'yaml', 'fortran', 'g-code', 'monkey', 'nit', 'standard-ml', 'digital-command-language', 'krl', 'julia', 'oz', 'sourcepawn', 'groovy', 'coffeescript', 'idris', 'creole', 'text', 'wisp', 'unknown', 'raml', 'slim', 'xslt', 'ats', 'javascript', 'parrot-internal-representation', 'csound', 'yacc', 'f#', 'twig', 'coldfusion', 'apacheconf', 'bitbake', 'arc', 'jade', 'rhtml', 'eiffel', 'blitzmax', 'sass', 'scaml', 'propeller-spin', 'hlsl', 'forth', 'rdoc', 'rouge', 'c', 'fish', 'jsonld', 'scilab', 'capn-proto', 'restructuredtext', 'squirrel', 'common-lisp', 'mediawiki', 'c2hs-haskell', 'jsx', 'isabelle', 'java-server-pages', 'ecl', 'ruby', 'actionscript', 'pan', 'sage', 'emacs-lisp', 'genshi', 'groovy-server-pages', 'ston', 'stata', 'sql'] base_url = "https://huggingface.co/datasets/bigcode/commits_ft/resolve/main/data/{lang}.zip" _URLS = {k: base_url.format(lang=urllib.parse.quote(k)) for k in languages} VERSION = datasets.Version("1.1.0") class CommitIT(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ datasets.BuilderConfig(name=lang, version=VERSION, description="This part of dataset covers {lang} language".format(lang=lang)) for lang in languages ] DEFAULT_CONFIG_NAME = "python" def _info(self): features = datasets.Features( { 'commit': datasets.Value("string"), 'old_file': datasets.Value("string"), 'new_file': datasets.Value("string"), 'old_contents': datasets.Value("string"), 'new_contents': datasets.Value("string"), 'subject': datasets.Value("string"), 'message': datasets.Value("string"), 'lang': datasets.Value("string"), 'license': datasets.Value("string"), 'repos': datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _URLS[self.config.name] data_dir = dl_manager.download_and_extract(urls) # print(glob(f"{data_dir}/*")) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": os.path.join(data_dir, f"{self.config.name}/data.json"), "split": "train", }, ) ] def _generate_examples(self, filepath, split): with open(filepath, encoding="utf-8") as f: for key, row in enumerate(f): data = json.loads(row) # Yields examples as (key, example) tuples yield key, { 'commit': data["commit"], 'old_file': data["old_file"], 'new_file': data["new_file"], 'old_contents': data["old_contents"], 'new_contents': data["new_contents"], 'subject': data["subject"], 'message': data["message"], 'lang': data["lang"], 'license': data["license"], 'repos': data["repos"] }