python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import torch
from torch import nn, Tensor
from torchmultimodal.modules.layers.text_embedding import BERTTextEmbeddings
from torchmultimodal.modules.layers.transformer import (
TransformerEncoder,
TransformerOutput,
)
from torchmultimodal.utils.attention import get_extended_attention_mask
class BERTTextEncoder(nn.Module):
"""
General text transformer encoder with embeddings, following BERT.
Can be constructed with any user-provided embeddings and encoder.
Based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py#L870
Attributes:
embeddings (nn.Module): Module that projects text token ids into embeddings.
See :py:class: `torchmultimodal.modules.layers.text_embedding.BERTTextEmbeddings` for interface.
encoder (nn.Module): Module for transformer encoder. See :py:class:
`torchmultimodal.modules.layers.transformer.TransformerEncoder` for interface.
layernorm (nn.Module, optional): Module for layernorm to be applied after encoder. Defaults to ``None``.
pooler (nn.Module, optional): Module for pooler to be applied after layernorm. Defaults to ``None``.
weight_init_fn (Callable, optional): function for custom weight initialization of both the transformer
encoder and embeddings. See :py:func: `torchmultimodal.models.flava.transformer.init_transformer_weights`
as an example. Defaults to ``None``.
Args:
input_ids (Tensor, optional): Tensor of input vocab token ids of shape [batch, seq_len].
attention_mask (Tensor, optional): Tensor indicating which tokens to attend to, shape [batch, seq_len]
token_type_ids (Tensor, optional): Tensor of input token type ids of shape [batch, seq_len]. In BERT,
used to indicate whether a word is in sentence A or B for next sentence prediction
position_ids (Tensor, optional): Tensor of input position ids of shape [batch, seq_len]
inputs_embeds (Tensor, optional): Tensor of input embeddings of shape [batch, hidden_size],
if embeddings are calculated elsewhere
Raises:
ValueError: if input_ids and inputs_embeds are both ``None``.
"""
def __init__(
self,
embeddings: nn.Module,
encoder: nn.Module,
layernorm: Optional[nn.Module] = None,
pooler: Optional[nn.Module] = None,
weight_init_fn: Optional[Callable] = None,
) -> None:
super().__init__()
self.embeddings = embeddings
self.encoder = encoder
# TODO: could be upstreamed to TransformerEncoder?
self.layernorm = layernorm
self.pooler = pooler
if weight_init_fn:
self.apply(weight_init_fn)
def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> TransformerOutput:
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
else:
raise ValueError("input_ids or inputs_embeds must not be None")
# only mask out padding token if no mask specified
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if hasattr(self.embeddings, "pad_token_id"):
attention_mask[input_ids == self.embeddings.pad_token_id] = 0
# massage attention mask to correct shape for transformer
attention_mask = get_extended_attention_mask(attention_mask)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_output = self.encoder(
embedding_output,
attention_mask=attention_mask,
return_attn_weights=return_attn_weights,
return_hidden_states=return_hidden_states,
)
last_hidden_state = encoder_output.last_hidden_state
pooled_output = encoder_output.pooler_output
if self.layernorm:
last_hidden_state = self.layernorm(last_hidden_state)
if self.pooler:
pooled_output = self.pooler(last_hidden_state)
return TransformerOutput(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_output.hidden_states,
attentions=encoder_output.attentions,
)
def bert_text_encoder(
# transformer encoder params
hidden_size: int = 768,
num_hidden_layers: int = 6,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
dropout: float = 0.1,
transform_act_fn: Callable[..., nn.Module] = nn.GELU,
layer_norm_eps: float = 1e-12,
norm_first: bool = False,
# text embedding params
vocab_size: int = 30522,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
pad_token_id: int = 0,
offset_pos_ids: bool = False,
# layernorm and pooler
layernorm: Optional[nn.Module] = None,
pooler: Optional[nn.Module] = None,
weight_init_fn: Optional[Callable] = None,
) -> BERTTextEncoder:
"""
Returns a BERTTextEncoder with default params identical to HuggingFace's ``bert-base-uncased``.
Ref: https://huggingface.co/bert-base-uncased/resolve/main/config.json. See :py:class:
`torchmultimodal.modules.layers.text_embedding.BERTTextEmbeddings` and :py:class:
`torchmultimodal.modules.layers.transformer.TransformerEncoder` for details on parameters.
"""
embeddings = BERTTextEmbeddings(
hidden_size=hidden_size,
vocab_size=vocab_size,
pad_token_id=pad_token_id,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
offset_pos_ids=offset_pos_ids,
)
encoder = TransformerEncoder(
n_layer=num_hidden_layers,
d_model=hidden_size,
n_head=num_attention_heads,
dim_feedforward=intermediate_size,
dropout=dropout,
activation=transform_act_fn,
layer_norm_eps=layer_norm_eps,
norm_first=norm_first,
)
return BERTTextEncoder(
embeddings=embeddings,
encoder=encoder,
layernorm=layernorm,
pooler=pooler,
weight_init_fn=weight_init_fn,
)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/encoders/bert_text_encoder.py |
import os
import pandas as pd
from tqdm import tqdm
BASE_URL="https://archive.org/download/stackexchange/"
table = pd.read_html(BASE_URL)[0]
sources = [x.replace(" (View Contents)", "") for x in table['Name'].tolist()]
sources = [x for x in sources if x.endswith(".7z")]
for source in tqdm(sources):
# if ".meta." not in source:
print(f"source: {source}")
os.system("wget "+BASE_URL+source+" -O "+"./data/"+source)
os.system("7z x ./data/"+source+" -o./data/"+source[:-3])
os.system(f"mv ./data/{source[:-3]}/Posts.xml ./data/{source[:-3]}.xml")
os.system(f"rm -rf ./data/{source[:-3]}")
os.system(f"rm ./data/{source}")
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/download.py |
EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/__init__.py |
|
import os
import json
LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./data/")
if __name__ == "__main__":
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT,"token_counts", "tokens.json"), "r") as f:
counts = json.load(f)
'''
print a table of the counts
'''
print("|Idx|Site|Token Count|")
print("|---|---|---|")
for idx, (site, count) in enumerate(counts.items()):
print(f"|{idx}|{site}|{count}|")
print(f"|{len(counts.values())}|Total|{sum(counts.values())}|") | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/print_stats.py |
import os
import json
import tiktoken
from multiprocessing import Pool
from transformers import AutoTokenizer
# enc = tiktoken.get_encoding("r50k_base")
enc = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
# "gpt2"
)
def get_token_count(qa_pair):
# return len(enc.encode(qa_pair['text']))
return len(enc.tokenize(qa_pair['text']))
LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./stackexchange/")
# if x is a file, not a dir
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts"), exist_ok=True)
token_counts = {}
for site in sites:
print(f"[INFO] Processing {site}...")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
print(f"[INFO] Got {len(qa_pairs)} QA pairs for {site}.")
# token count
token_count = 0
with Pool(24) as p:
token_count = sum(p.map(get_token_count, qa_pairs))
token_counts[site] = token_count
print(f"[INFO] Got {token_count} tokens for {site}.")
# write to file
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts", "tokens.json"), "w") as f:
json.dump(token_counts, f) | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/token_count.py |
import os
import json
import sys
import xml.etree.ElementTree as ET
from tqdm import tqdm
sys.path.append("./")
from src.stack_exchange.count import get_sites_count
LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/")
if os.path.exists(os.path.join(LEMMA_DATA_DIR_SE, "counts.json")):
with open(os.path.join(LEMMA_DATA_DIR_SE, "counts.json"), "r") as fp:
counts = json.load(fp)
else:
print("[INFO] Getting counts for sites...")
counts = get_sites_count(LEMMA_DATA_DIR_SE)
# write this to a file
with open(os.path.join(LEMMA_DATA_DIR_SE, "counts.json"), "w") as f:
json.dump(counts, f)
# take first 28
sites = list(counts.keys())[:28]
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE, "parents"), exist_ok=True)
def process_site(site):
parents = {}
qa_pairs = []
print(f"[INFO] Processing {site}...")
# first get the parents dump
if os.path.exists(os.path.join(LEMMA_DATA_DIR_SE, "parents", site)):
with open(os.path.join(LEMMA_DATA_DIR_SE, "parents", site), "r") as f:
parents = json.load(f)
else:
with open(os.path.join(LEMMA_DATA_DIR_SE, site), "r") as f:
for i, line in enumerate(tqdm(f, total=counts[site])):
# first 2 lines are header
# e.g., counts = 2: total=5 lines, 2,3 are data
# last line is footer
if i>1 and i<=counts[site]+1:
root = ET.fromstring(line)
if "ParentId" in root.attrib:
# this is an answer
if root.attrib["ParentId"] not in parents:
parents[root.attrib["ParentId"]] = []
parents[root.attrib["ParentId"]].append({
"id": root.attrib["Id"],
"text": root.attrib["Body"],
"score": root.attrib["Score"]
})
# write parents to file
with open(os.path.join(LEMMA_DATA_DIR_SE, "parents", site), "w") as f:
json.dump(parents, f)
print(f"[INFO] Got {len(parents)} questions for {site}.")
# now we have the Q-A pairs
# now we need to get the texts
with open(os.path.join(LEMMA_DATA_DIR_SE, site), "r") as f:
for i, line in enumerate(tqdm(f, total=counts[site])):
if i>1 and i<=counts[site]+1:
root = ET.fromstring(line)
if "ParentId" not in root.attrib:
post_id = root.attrib["Id"]
if post_id in parents:
# this is a question
qa_pairs.append({
"question": {
"id": post_id,
"text": f"{root.attrib['Title']} {root.attrib['Body']}",
"score": root.attrib["Score"]
},
"answers": parents[post_id]
})
else:
if "Title" in root.attrib:
# if there's a title => then a valid question
body = root.attrib["Body"] if "Body" in root.attrib else ""
score = root.attrib["Score"] if "Score" in root.attrib else 0
qa_pairs.append({
"question": {
"id": post_id,
"text": f"{root.attrib['Title']} {body}",
"score": score
},
})
# write qa_pairs to file
print(f"[INFO] Writing {site} to file...")
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs"), exist_ok=True)
with open(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs", site.removesuffix(".xml")+".jsonl"), "w") as f:
for qa_pair in qa_pairs:
f.write(json.dumps(qa_pair)+"\n")
for each in sites:
process_site(each) | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/filter.py |
import re
import os
import sys
import json
import fasttext
from bs4 import BeautifulSoup
from multiprocessing import Pool
sys.path.append("./")
site_name = ""
CLEANR = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
def cleanhtml(raw_html):
raw_html = raw_html.replace("<li>", "\n*")
raw_html = raw_html.replace("</li>", "")
raw_html = raw_html.replace("<ol>", "\n*")
raw_html = raw_html.replace("</ol>", "")
soup = BeautifulSoup(raw_html, "lxml")
return soup.get_text()
class LanguageIdentification:
def __init__(self):
pretrained_lang_model = "data/lid.176.bin"
self.model = fasttext.load_model(pretrained_lang_model)
def predict_lang(self, text):
text = text.replace("\n", " ")
predictions = self.model.predict(text, k=1) # returns top 2 matching languages
return predictions[0][0].replace("__label__", "")
lang_id = LanguageIdentification()
LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/")
LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./data/")
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT), exist_ok=True)
def process_qa_pair(pair):
# sort answers by score
if "answers" in pair:
pair["answers"] = sorted(pair["answers"], key=lambda x: x["score"], reverse=True)
answers = "\nA: ".join([ cleanhtml(x["text"]) for x in pair["answers"]])
text = f"Q: { cleanhtml(pair['question']['text'])}\nA: {answers}"
else:
text = f"Q: { cleanhtml(pair['question']['text'])}"
return {
"text": text,
"meta": {
"language": lang_id.predict_lang(text),
"url": f"https://{site_name}/questions/{pair['question']['id']}",
"timestamp": "2023-03-29",
"source": "stackexchange",
"question_score": pair["question"]["score"],
}
}
# load qa_pairs
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs"))]
# if needed:
# sort sites such that stackoverflow is processed first - to understand the memory pressure
# if OOM -> split stackoverflow into multiple files
# this won't hurt the completeness of the data, as each line is self-contained
for site in sites:
print(f"Processing {site}")
results = []
site_name = site.removesuffix(".jsonl")
if "stackoverflow_part" in site_name:
site_name = "stackoverflow.com"
# load qa_pairs
with open(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs", site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
# process html to text
with Pool(24) as p:
results = p.map(process_qa_pair, qa_pairs)
print(f"Writing {len(results)} results to {os.path.join(LEMMA_DATA_DIR_SE_OUT, site)}")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "w") as f:
for result in results:
f.write(json.dumps(result) + "\n") | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/post_processing.py |
import os
import json
from tqdm import tqdm
import xml.etree.ElementTree as ET
LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/stack_exchange/")
def get_sites_count(path=LEMMA_DATA_DIR_SE):
sites = os.listdir(path)
sites = [x for x in sites if x.endswith(".xml")]
counts = {}
for site in tqdm(sites):
if site == ".DS_Store":
continue
# read the file
with open(os.path.join(path, site), "r") as f:
# read # lines
count = sum(1 for line in f)
counts[site] = count-3 # subtract the header
# sort the counts
counts = {k: v for k, v in sorted(counts.items(), key=lambda item: item[1], reverse=True)}
return counts
if __name__ == "__main__":
counts = get_sites_count()
'''
print a table of the counts
'''
print("|Idx|Site|Count|")
print("|---|---|---|")
# take the first 28 sites
for idx, (site, count) in enumerate(counts.items()):
if idx < 28:
print(f"|{idx}|{site}|{count}|")
# write to file
with open(os.path.join(LEMMA_DATA_DIR_SE, "counts.json"), "w") as f:
json.dump(counts, f) | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/stack_exchange/count.py |
import argparse
from datasets import load_dataset
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default=None,
help="Path to the wikipedia data directory.")
args = parser.parse_args()
LANGUAGES = [
"bg", "ca", "cs", "da", "de", "en", "es", "fr", "hr", "hu",
"it", "nl", "pl", "pt", "ro", "ru", "sl", "sr", "sv", "uk"
]
DUMP_DATE = "20230320"
def get_data(lan, date, data_dir: pathlib.Path):
wiki_dataset = load_dataset(
"wikipedia", language=lan, date=date, beam_runner="DirectRunner"
)
for split, dataset in wiki_dataset.items():
tgt_fp = data_dir / f"wiki_{lan}_{date}_{split}.jsonl"
dataset.to_json(tgt_fp)
print("Finished Downloading %s %s. There are total %d pages." % (
lan, date, len(dataset["id"])))
if __name__ == "__main__":
if args.data_dir is None:
raise ValueError("missing arg --data_dir.")
for lang in LANGUAGES:
get_data(lang, DUMP_DATE, data_dir=pathlib.Path(args.data_dir))
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/wiki/download.py |
EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/wiki/__init__.py |
|
import os
import json
from multiprocessing import Pool
from transformers import AutoTokenizer
print("start loading!")
enc = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
)
print("end loading!")
def get_token_count(qa_pair):
return len(enc.tokenize(qa_pair['text']))
LEMMA_DATA_DIR_SE_OUT = "./data/wikipedia/"
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts"), exist_ok=True)
token_counts = {}
for site in sites:
print(f"[INFO] Processing {site}...")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
print(f"[INFO] Got {len(qa_pairs)} wikipedia pages for {site}.")
token_count = 0
with Pool(100) as p:
token_count = sum(p.map(get_token_count, qa_pairs))
token_counts[site] = token_count
print(f"[INFO] Got {token_count} tokens for {site}.")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts", site), "w") as f:
json.dump(token_counts, f) | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/wiki/token_count.py |
import os
import json
LEMMA_DATA_DIR_SE_OUT = "./data/wikipedia/"
LEMMA_DATA_SAVE_DIR = "./data/wikipedia/wiki-full.jsonl"
files = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
files.sort()
with open(LEMMA_DATA_SAVE_DIR, "w") as fw:
for file in files:
lan = file.split("_")[1]
date = file.split("_")[2]
print("Now proceeding %s"%file, lan, date)
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, file), "r") as f:
lines = f.readlines()
for line in lines:
now = json.loads(line)
new = {"text": now["text"], "meta": {"title": now["title"], "url": now["url"], "language": lan, "timestamp": date}}
fw.write(json.dumps(new) + "\n")
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/wiki/convert_format.py |
import argparse
import hashlib
import gzip
import json
import re
import uuid
from datetime import datetime
from typing import Dict, Union
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default=None)
parser.add_argument('--target_dir', type=str,
default="./data/github/processed")
args = parser.parse_args()
# Regex to strip repated copyright comment blocks
CPAT = re.compile("copyright", re.IGNORECASE)
PAT = re.compile("/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/")
def get_timestamp() -> str:
return datetime.now().isoformat()
def clean_copyright_comments(content: str):
r = PAT.search(content)
if r:
# found one, now see if it contains "copyright", if so strip it
span = r.span()
sub = content[span[0]:span[1]]
if CPAT.search(sub):
# cut it
content = content[: span[0]] + content[span[1]:]
return content
lines = content.split('\n')
skip = 0
# Greedy replace any file that begins with comment block, most
# are copyright headers
for k in range(len(lines)):
if (
lines[k].startswith("//") or
lines[k].startswith("#") or
lines[k].startswith("--") or
not lines[k]
):
skip = skip + 1
else:
break
if skip:
# we skipped, consume it
content = "\n".join(lines[skip:])
return content
def get_filecontent_stats(content: str) -> Dict[str, Union[int, str]]:
# split content into lines and get line lengths
line_lengths = list(map(len, content.splitlines()))
if len(line_lengths) == 0:
return {
"line_count": 0,
"max_line_length": 0,
"avg_line_length": 0,
"alnum_prop": 0,
}
# get max line length
max_length = max(line_lengths)
# get average line length
avg_length = len(content) / len(line_lengths)
# get proportion of alphanumeric characters
alnum_count = sum(map(lambda char: 1 if char.isalnum() else 0, content))
alnum_prop = alnum_count / len(content)
return {
"line_count": len(line_lengths),
"max_line_length": max_length,
"avg_line_length": avg_length,
"alnum_prop": alnum_prop,
}
def preprocess_source(source_fp: pathlib.Path, hash_table: dict):
chunk_stats = []
cleaned_records = []
with gzip.open(source_fp, mode="rt", encoding="utf-8") as in_file:
while True:
jstr = in_file.readline()
if not jstr:
break
result = json.loads(jstr)
# skip pub/key certfiicates
if result['path'].endswith(".crt"):
continue
if result['path'] == "LICENSE":
continue
# comptue hash of content
digest = hashlib.md5(result['content'].encode('utf8')).hexdigest()
# skip if we've seen this before
if digest in hash_table:
continue
# add to hash table
hash_table[digest] = 1
# look for C style multi line comment blocks
try:
content = clean_copyright_comments(result['content'])
except Exception as e:
print(f"[{get_timestamp()}][ERROR] "
f"fp={source_fp}; "
f"Error cleaning copyright comments: {e}")
continue
# get file content stats (line count, max line length, avg line
# length)
try:
file_stats = get_filecontent_stats(content)
except Exception as e:
print(f"[{get_timestamp()}][ERROR] "
f"fp={source_fp}; "
f"Error getting file stats: {e}")
continue
# add hash to file stats for later deduplication
file_stats["content_hash"] = digest
file_stats["path"] = result.get('path', "")
chunk_stats.append(file_stats)
# bring result into the right format
record = {
"text": content,
"meta": {
"content_hash": digest,
"timestamp": "",
"source": "github",
"line_count": file_stats["line_count"],
"max_line_length": file_stats["max_line_length"],
"avg_line_length": file_stats["avg_line_length"],
"alnum_prop": file_stats["alnum_prop"],
**{
k: v for k, v in result.items() if k != "content"
}
}
}
cleaned_records.append(record)
return chunk_stats, cleaned_records
def main():
flush_every = 20
run_id = uuid.uuid4().hex
run_fp = pathlib.Path(args.target_dir) / f"run_{run_id}.jsonl"
stats_fp = pathlib.Path(args.target_dir) / f"stats_{run_id}.jsonl"
print(f"[{get_timestamp()}][INFO] Writing records to {run_fp}")
print(f"[{get_timestamp()}][INFO] Writing stats to {stats_fp}")
stats_file = open(stats_fp, "w")
records_file = open(run_fp, "w")
# process list of *.gz files in input_file
with open(args.input, "r") as input_file:
files_to_process = input_file.readlines()
total_files_to_process = len(files_to_process)
hash_table = {}
for file_num, fp in enumerate(files_to_process, start=1):
fp = fp.strip()
if not fp:
print(f"[{get_timestamp()}][WARNING]"
f"[{file_num}/{total_files_to_process}] "
f"Skipping empty line {fp}")
continue
if not fp.endswith(".gz"):
print(f"[{get_timestamp()}][WARNING]"
f"[{file_num}/{total_files_to_process}] "
f"Skipping {fp}")
continue
source_fp = pathlib.Path(fp)
print(f"[{get_timestamp()}][INFO]"
f"[{file_num}/{total_files_to_process}] "
f"Processing {fp}")
# get file stats and clean records
chunk_stats, cleaned_records = preprocess_source(
source_fp, hash_table
)
# write out stats
for stats in chunk_stats:
stats_file.write(json.dumps(stats) + "\n")
# write out cleaned records
for record in cleaned_records:
records_file.write(json.dumps(record) + "\n")
if file_num % flush_every == 0:
# make sure data is written to disk
print(f"[{get_timestamp()}][INFO] Flushing ...")
stats_file.flush()
records_file.flush()
stats_file.close()
records_file.close()
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/github/github_clean_dedup_local.py |
EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/github/__init__.py |
|
import argparse
from datetime import datetime
import json
import multiprocessing as mp
import os
import gzip
from transformers import AutoTokenizer
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', type=str, default=None)
parser.add_argument('--target_dir', type=str, default=None)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-6.9b-deduped")
extensions_whitelist = (".asm", ".bat", ".cmd", ".c", ".h", ".cs", ".cpp",
".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H",
".cmake", ".css", ".dockerfile", ".f90", ".f", ".f03",
".f08", ".f77", ".f95", ".for", ".fpp", ".go", ".hs",
".html", ".java", ".js", ".jl", ".lua", ".md",
".markdown", ".php", ".php3", ".php4", ".php5",
".phps", ".phpt", ".pl", ".pm", ".pod", ".perl",
".ps1", ".psd1", ".psm1", ".py", ".rb", ".rs", ".sql",
".scala", ".sh", ".bash", ".command", ".zsh", ".ts",
".tsx", ".tex", ".vb", "Dockerfile", "Makefile",
".xml", ".rst", ".m", ".smali")
def get_token_count(text):
token_count = len(tokenizer.tokenize(text))
return token_count
def get_timestamp() -> str:
return datetime.now().isoformat()
def discard_record(record):
""" return True if we discard the record """
text = record["text"]
metadata = record["meta"]
# discard empty records
if len(text) == 0:
return True
# discard all records that are not whitelisted
if not metadata["path"].endswith(extensions_whitelist):
return True
# discard files whose maximum line length is greater than 1000
if metadata["max_line_length"] > 1000:
return True
# discard files whose average line length is greater than 100
if metadata["avg_line_length"] > 100:
return True
# discard files whose proportion of alphanumeric characters is less than
# 0.25
if metadata["alnum_prop"] < 0.25:
return True
num_tokens = get_token_count(text)
num_alpha = len([c for c in text if c.isalpha()])
if num_alpha / num_tokens < 1.5:
return True
return False
def filter_line(line):
try:
record = json.loads(line)
except json.decoder.JSONDecodeError:
return None
if discard_record(record):
return None
return line
def process_lines_batch(lines_batch, out_file, num_cpus):
if len(lines_batch) == 0:
return
with mp.Pool(processes=num_cpus - 1) as pool:
filtered_lines = pool.map(filter_line, lines_batch)
for line in filtered_lines:
if line is not None:
out_file.write(line)
out_file.flush()
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", mp.cpu_count()))
batch_size = num_cpus * 5_000
input_fp = pathlib.Path(args.data_file)
target_dir = pathlib.Path(args.target_dir)
output_fp = target_dir / input_fp.name.replace("deduped_", "filtered_")
output_fp = output_fp.with_suffix(".jsonl.gz")
print(f"[{get_timestamp()}][INFO] Processing {input_fp}")
print(f"[{get_timestamp()}][INFO] Writing to {output_fp}")
out_file = gzip.open(output_fp, "wt", encoding="utf-8")
try:
with open(input_fp, "r") as in_file:
while True:
lines_batch = []
# accumulate batch
while True:
line = in_file.readline()
if not line:
raise StopIteration
lines_batch.append(line)
if len(lines_batch) == batch_size:
break
process_lines_batch(lines_batch, out_file, num_cpus)
except StopIteration:
process_lines_batch(lines_batch, out_file, num_cpus)
out_file.close()
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/github/github_run_filter.py |
import argparse
import os
from transformers import AutoTokenizer
import json
import multiprocessing as mp
import pathlib
from datetime import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', type=str, default=None)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-6.9b-deduped")
FRACTION = 0.1
def get_timestamp() -> str:
return datetime.now().isoformat()
def get_token_count(text):
return len(tokenizer.tokenize(text))
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", mp.cpu_count()))
data_fp = pathlib.Path(args.data_file)
# get total number of records in file
print(f"[{get_timestamp()}][INFO] Counting records in {data_fp} ...")
with open(data_fp, "r") as f:
num_records = sum(1 for _ in f)
print(f"[{get_timestamp()}][INFO] Found {num_records} records.")
print(f"[{get_timestamp()}][INFO] Loading data...")
with open(data_fp, "r") as f:
# get a batch of records
records = []
for _ in range(int(num_records * FRACTION)):
line = f.readline()
if not line:
break
try:
record = json.loads(line)
except json.decoder.JSONDecodeError:
continue
records.append(record["text"])
print(f"[{get_timestamp()}][INFO] Start token count...")
# count tokens in records
with mp.Pool(num_cpus) as pool:
token_counts = pool.map(get_token_count, records)
total_token_count = sum(token_counts)
result = {
"total_token_count": total_token_count,
"sampling_fraction": FRACTION,
"total_count_estimate": total_token_count / FRACTION
}
out_fp = data_fp.parent / \
f"{data_fp.stem.replace('deduped', 'token_count')}.json"
with open(out_fp, mode="w") as out:
out.write(json.dumps(result))
print(json.dumps(result, indent=4))
print(f"[{get_timestamp()}][INFO] Result written to {out_fp}.")
print(f"[{get_timestamp()}][INFO] Done.")
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/github/github_token_count.py |
import argparse
import json
from datetime import datetime
from typing import Dict
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--first_step_dir', type=str, default=None)
parser.add_argument('--target_dir', type=str, default=None)
args = parser.parse_args()
def get_timestamp() -> str:
return datetime.now().isoformat()
def process_stats_file(source_fp: pathlib.Path, hash_table: Dict[str, str]):
deduped_stats = []
deduped_hashes = []
with open(source_fp, mode="r") as in_file:
while True:
jstr = in_file.readline()
if not jstr:
break
record_stats = json.loads(jstr)
content_hash = record_stats["content_hash"]
if content_hash in hash_table:
# skip this record since it's a duplicate
continue
hash_table[content_hash] = content_hash
deduped_stats.append(record_stats)
deduped_hashes.append(content_hash)
return hash_table, deduped_stats, deduped_hashes
def main():
first_step_dir = pathlib.Path(args.first_step_dir)
deduped_stats_fp = pathlib.Path(args.target_dir) / "stats_deduped.jsonl"
print(f"[{get_timestamp()}][INFO] Deduplicating "
f"records from {first_step_dir}")
# get list of stats files
stats_filepaths = list(first_step_dir.glob("stats_*.jsonl"))
total_files_to_process = len(stats_filepaths)
deduped_stats_file = open(deduped_stats_fp, "w")
hash_set = {}
for file_num, fp in enumerate(stats_filepaths, start=1):
print(f"[{get_timestamp()}][INFO]"
f"[{file_num}/{total_files_to_process}] "
f"Processing {fp}")
hash_set, deduped_stats, deduped_hashes = process_stats_file(
fp, hash_set
)
# write out stats
for stats in deduped_stats:
deduped_stats_file.write(json.dumps(stats) + "\n")
# write out jsonl to hashes
out_fn = fp.name.replace("stats_", "hashes_")
with open(pathlib.Path(args.target_dir) / out_fn, "w") as f:
f.write(json.dumps({"hashes": deduped_hashes}) + "\n")
print(f"[{get_timestamp()}][INFO] Flushing ...")
deduped_stats_file.flush()
deduped_stats_file.close()
print(f"[{get_timestamp()}][INFO] "
f"Total number of unique records: {len(hash_set)}")
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/github/github_global_dedup.py |
import argparse
import json
from datetime import datetime
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument(
'--first_step_dir', type=str,
default="./data/github/processed_v3"
)
parser.add_argument(
'--input', type=str,
default="data/github/processed_v3/run_ce60fbbc14684ed8b659054801e419c8.jsonl"
)
parser.add_argument(
'--target_dir', type=str,
default="./data/github/processed_v3_deduped"
)
args = parser.parse_args()
def get_timestamp() -> str:
return datetime.now().isoformat()
def main():
input_fp = pathlib.Path(args.input)
target_dir = pathlib.Path(args.target_dir)
output_fp = target_dir / input_fp.name.replace("run_", "deduped_")
# load hashes into memory
hashes_fp = target_dir / input_fp.name.replace("run_", "hashes_")
with open(hashes_fp) as hf:
globally_unique_hashes = hf.readlines()[0]
globally_unique_hashes = set(json.loads(globally_unique_hashes)["hashes"])
output_file = open(output_fp, "w")
print(f"[{get_timestamp()}][INFO]"
f" Processing {input_fp}")
print(f"[{get_timestamp()}][INFO]"
f" Writing to {output_fp}")
print(f"[{get_timestamp()}][INFO]"
f" Using hashes from {hashes_fp}")
nrecs = 0
with open(input_fp, "r") as in_file:
while True:
jstr = in_file.readline()
if not jstr:
break
record = json.loads(jstr)
content_hash = record["meta"]["content_hash"]
if content_hash not in globally_unique_hashes:
continue
# write to output file
output_file.write(json.dumps(record) + "\n")
nrecs += 1
output_file.close()
print(f"[{get_timestamp()}][INFO]"
f" Processed {nrecs} records")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/github/github_merge_dedup.py |
from datasets import load_dataset
book_dataset = load_dataset("the_pile_books3")
for split, dataset in book_dataset.items():
dataset.to_json(f"./data/book/books3-{split}.jsonl")
pg19_dataset = load_dataset("pg19")
for split, dataset in pg19_dataset.items():
dataset.to_json(f"./data/book/pg19-{split}.jsonl") | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/book/download.py |
EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/book/__init__.py |
|
# Copyright 2023 Ontocord.ai, Together Computer, ETH Zürich, Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process, Queue
import pickle
import tarfile
import os
import re
from multiprocessing import Pool
from simhash import Simhash
import json
from datetime import datetime
width = 6
hash_k = 5
max_hash_len = 0
def get_features(s):
s = s.lower()
s = re.sub(r'[^\w]+', '', s)
return [s[i:i + width] for i in range(max(len(s) - width + 1, 1))]
def pg19_index(num):
hashes = []
members = []
print("Starting pg19_%0.3d"%(num), len(hashes))
with open("./data/book/split/pg19_%0.3d"%(num), "r") as f:
lines = f.readlines()
for idx, i in enumerate(lines):
if idx % 200 == 0:
print("This is pg19_%0.3d"%(num), idx)
member = json.loads(i)
try:
if max_hash_len == 0:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text']))))
else:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text'][:max_hash_len]))))
members.append(member)
except:
continue
print("Finishing pg19_%0.3d"%(num), len(hashes), len(members))
return (hashes, members)
def book_index(num):
hashes = []
members = []
print("Starting book_%0.3d"%(num), len(hashes))
with open("./data/book/split/books3_%0.3d"%(num), "r") as f:
lines = f.readlines()
for idx, i in enumerate(lines):
if idx % 200 == 0:
print("This is book_%0.3d"%(num), idx)
member = json.loads(i)
try:
if max_hash_len == 0:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text']))))
else:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text'][:max_hash_len]))))
members.append(member)
except:
continue
print("Finishing book_%0.3d"%(num), len(hashes), len(members))
return (hashes, members)
def get_pg19(njobs):
with Pool(n_jobs) as p:
hashes_members = p.map(pg19_index, [i for i in range(15)])
return hashes_members
def get_book(njobs):
with Pool(n_jobs) as p:
hashes_members = p.map(book_index, [i for i in range(99)])
return hashes_members
def split_list(list, n):
length = len(list)
return [list[i*length // n: (i+1)*length // n] for i in range(n)]
def find_match(args):
i, index = args
value_dict = {}
for item in i:
flag = 1
try:
now_list = index.get_near_dups(item[1])
for x in now_list:
if int(x) >= int(item[0]):
continue
flag = 0
break
value_dict[item[0]] = flag
except:
value_dict[item[0]] = flag
return value_dict
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-w', type=int, default=6, help='the window size')
parser.add_argument('-k', type=int, default=5, help='find K nearest region')
parser.add_argument('-l', type=int, default=0, help='the max length of the text for hashing, 0 means no limit')
parser.add_argument('-n', type=int, default=100, help='the number of processes to run')
args = parser.parse_args()
width = args.w
hash_k = args.k
max_hash_len = args.l
n_jobs = args.n
outfile = "./data/book/book.jsonl"
hashes_members = get_pg19(n_jobs)
hashes_members.extend(get_book(n_jobs))
print("Finish getting hashes and members!")
import itertools
hashes = list(itertools.chain(*[item[0] for item in hashes_members]))
import itertools
members = list(itertools.chain(*[item[1] for item in hashes_members]))
import re
from simhash import Simhash, SimhashIndex
index = SimhashIndex(hashes, k=hash_k)
print("Finish building index!")
from multiprocessing import Pool
n_hashes = split_list(hashes, n_jobs)
with Pool(n_jobs) as p:
temp_dict = p.map(find_match, [(i, index) for i in n_hashes])
value_dict = {}
for dict in temp_dict:
for i in dict:
value_dict[i] = dict[i]
print("Finish finding matches!")
mem_hashes = list(zip(members, hashes))
with open(outfile, 'w') as f:
for mem, a_hash in mem_hashes:
if value_dict[a_hash[0]] == 1:
meta = {}
for feature in mem:
if feature != "text":
meta[feature] = mem[feature]
new = {"meta": meta, "text": mem["text"]}
f.write(json.dumps(new) + '\n')
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/book/dedup.py |
import os
import json
from multiprocessing import Pool
from transformers import AutoTokenizer
enc = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
)
def get_token_count(qa_pair):
return len(enc.tokenize(qa_pair['text']))
LEMMA_DATA_DIR_SE_OUT = "./data/book/"
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
sites.sort()
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts"), exist_ok=True)
token_counts = {}
for site in sites:
print(f"[INFO] Processing {site}...")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
print(f"[INFO] Got {len(qa_pairs)} books for {site}.")
token_count = 0
with Pool(100) as p:
token_count = sum(p.map(get_token_count, qa_pairs))
token_counts[site] = token_count
print(f"[INFO] Got {token_count} tokens for {site}.")
summ = 0
for i in token_counts:
print(f"{i}: {token_counts[i]}")
summ += token_counts[i]
print("Total: ", summ)
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts", site), "w") as f:
json.dump(token_counts, f)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/book/token_count.py |
import argparse
from datetime import datetime
import json
import gzip
import os
import pathlib
import joblib
from joblib import Parallel, delayed
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./data/c4/en")
parser.add_argument('--output_dir', type=str, default="./data/c4/processed_en")
parser.add_argument('--max_files', type=int, default=-1)
args = parser.parse_args()
def get_timestamp() -> str:
return datetime.now().isoformat()
def process_record(record):
return {
"text": record["text"],
"meta": {
"timestamp": record["timestamp"],
"url": record["url"],
"language": "en",
"source": "c4"
}
}
def process_file(fp):
print(f"[{get_timestamp()}][INFO] start processing {fp}...")
out_dir = pathlib.Path(args.output_dir)
out_fp = out_dir / fp.with_suffix("").name.replace("json", "jsonl")
with gzip.open(fp, "r") as in_f:
records = [json.loads(line) for line in in_f.readlines()]
with open(out_fp, "w") as out_f:
for record in records:
record = process_record(record)
if record is not None:
out_f.write(json.dumps(record) + "\n")
print(f"[{get_timestamp()}][INFO] done processing {fp}...")
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", joblib.cpu_count()))
print(f"Using {num_cpus} processes")
out_dir = pathlib.Path(args.output_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
records_files = list(pathlib.Path(args.data_dir).glob("*.json.gz"))
if args.max_files > 0:
records_files = records_files[:args.max_files]
Parallel(n_jobs=num_cpus)(
delayed(process_file)(fp) for fp in records_files
)
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/c4/c4_reformat.py |
EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/c4/__init__.py |
|
import argparse
import boto3
from botocore.exceptions import ClientError
import configparser
import itertools
import numpy as np
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--aws_config', type=str, help='aws config file')
parser.add_argument('--target_dir', type=str, default="./data/arxiv")
parser.add_argument('--workers', type=int, default=1)
parser.add_argument('--input', type=str,
help='input file from which to read keys. '
'This is only used when running on slurm.')
parser.add_argument('--local', action='store_true')
parser.add_argument('--setup', action='store_true',
help='if set, we partition the keys into chunks.')
parser.add_argument('--max_files', type=int, default=-1,
help='max files to download, useful for testing')
args = parser.parse_args()
class ArxivDownloader:
def __init__(self, config_file: str):
# import configs from config file
configs = configparser.SafeConfigParser()
configs.read(config_file)
# Create S3 resource & set configs
self.s3resource = boto3.resource(
's3', # the AWS resource we want to use
aws_access_key_id=configs['DEFAULT']['ACCESS_KEY'],
aws_secret_access_key=configs['DEFAULT']['SECRET_KEY'],
region_name='us-east-1' # same region arxiv bucket is in
)
def run(self, input_file: str, tgt_dir: pathlib.Path, max_files=-1):
(tgt_dir / 'src').mkdir(exist_ok=True, parents=True)
with open(input_file, 'r') as f:
file_keys = f.readlines()
files_downloaded = 0
for key in file_keys:
self.__download_file(tgt_dir=tgt_dir, key=key.strip())
files_downloaded += 1
if files_downloaded >= max_files > 0:
break
def __download_file(self, key, tgt_dir: pathlib.Path):
print('\nDownloading s3://arxiv/{} t'
'o {}...'.format(key, pathlib.Path(tgt_dir, key)))
try:
self.s3resource.meta.client.download_file(
Bucket='arxiv',
Key=key,
Filename=pathlib.Path(tgt_dir, key),
ExtraArgs={'RequestPayer': 'requester'})
except ClientError as e:
if e.response['Error']['Code'] == "404":
print('ERROR: ' + key + " does not exist in arxiv bucket")
else:
try:
code = e.response['Error']['Code']
msg = e.response['Error']['Message']
print(f"UNKNOWN ERROR: code={code}; msg={msg}")
except Exception as e:
print("UNKNOWN ERROR for key ", key, e)
def partition_keys(
partitions_dir: pathlib.Path, config_file: str, workers: int
):
r"""Partitions the keys of the arxiv bucket into chunks for parallel
download.
@param partitions_dir: the directory to save the partition files to (will be
created if it doesn't exist)
@param config_file: the path to the config file containing the aws
credentials
@param workers: the number of workers to partition the keys into
"""
partitions_dir = pathlib.Path(partitions_dir).absolute()
partitions_dir.mkdir(parents=True, exist_ok=True)
# Securely import configs from private config file
configs = configparser.SafeConfigParser()
configs.read(config_file)
# Create S3 resource & set configs
print('Connecting to Amazon S3...')
s3resource = boto3.resource(
's3', # the AWS resource we want to use
aws_access_key_id=configs['DEFAULT']['ACCESS_KEY'],
aws_secret_access_key=configs['DEFAULT']['SECRET_KEY'],
region_name='us-east-1' # same region arxiv bucket is in
)
# Create a reusable Paginator
paginator = s3resource.meta.client.get_paginator('list_objects_v2')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(
Bucket='arxiv',
RequestPayer='requester',
Prefix='src/'
)
# partition keys into chunks
file_parts = np.array_split(list(
itertools.chain(
*[
[
file['Key'] for file in page['Contents']
if file['Key'].endswith(".tar")
]
for page in page_iterator
]
)),
indices_or_sections=workers
)
# save chunks to disk as text files
for i, part in enumerate(file_parts):
part_fp = partitions_dir / f"part_{i}.txt"
with open(part_fp, "w") as f:
f.write("\n".join(part))
print(f"Created partition {part_fp}.")
def run_download(
input_file: str,
target_dir: pathlib.Path,
max_files: int,
aws_config: str
):
# create downloader
arxiv_downloader = ArxivDownloader(config_file=aws_config)
# run download
arxiv_downloader.run(
input_file=input_file,
tgt_dir=target_dir,
max_files=max_files
)
def main():
if not args.local and not args.setup:
# here we only download the files; this requires that setup has already
# been run
run_download(input_file=args.input,
target_dir=pathlib.Path(args.target_dir),
max_files=args.max_files,
aws_config=args.aws_config)
return
# create directories
target_dir = pathlib.Path(args.target_dir)
partitions_dir = target_dir / 'partitions'
if args.setup:
# here we only partition the keys into chunks; no download yet
partition_keys(partitions_dir=partitions_dir,
config_file=args.aws_config,
workers=args.workers)
return
if args.local:
partition_keys(partitions_dir=partitions_dir,
config_file=args.aws_config,
workers=args.workers)
run_download(input_file=str(partitions_dir / 'part_0.txt'),
target_dir=pathlib.Path(args.target_dir),
max_files=args.max_files,
aws_config=args.aws_config)
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/arxiv/run_download.py |
import concurrent.futures
from datetime import datetime
import fasttext
import json
import pathlib
import tarfile
from typing import List, Tuple, Dict, Union
import gzip
import tempfile
import uuid
import re
from utils import predict_lang, get_timestamp, format_arxiv_id
# suppress fasttext warning
fasttext.FastText.eprint = lambda x: None
# constants
ARXIV_URL = "https://arxiv.org/abs/"
FT_MODEL_PATH = "models/lid.176.bin"
class ArxivCleaner:
r""" Class for cleaning raw arxiv data. """
def __init__(
self,
data_dir: pathlib.Path,
work_dir: pathlib.Path,
target_dir: pathlib.Path,
worker_id: str = None
):
self._data_dir = data_dir
self._work_dir = work_dir
self._target_dir = target_dir
self._worker_id = worker_id if worker_id else str(uuid.uuid4())
# make sure dirs exist
for d in [self._work_dir, self._target_dir]:
if not d.exists():
d.mkdir(parents=True)
def run_parallel(
self, max_files: int = None, workers: int = None,
tar_fp_list: List[str] = None
):
r""" function to run the cleaning process in parallel. This function
will iterate over all arxiv projects and clean the tex files. The
cleaned tex files are then written to a jsonl file.
@param max_files: maximum number of files to process, defaults to -1
which means all files are processed. This is useful for testing.
@param workers: number of workers to use, defaults to None which means
that all cores are used.
@param tar_fp_list: list of tars to process. Defaults to None which
means that all files in data_dir are processed.
"""
out_file = self._target_dir / f"arxiv_{self._worker_id}.jsonl"
with open(out_file, "w") as f:
with concurrent.futures.ProcessPoolExecutor(workers) as executor:
for record, arxiv_id in executor.map(
create_record_single_arg,
self.arxiv_iterator(
max_files=max_files, tar_fp_list=tar_fp_list
)
):
if record is None:
print(f"[{get_timestamp()}][ERROR] "
f"failed to process {arxiv_id}")
continue
if len(record["text"]) == 0:
print(f"[{get_timestamp()}][WARNING] "
f"empty text for {arxiv_id}")
continue
f.write(json.dumps(record) + "\n")
print(f"[{get_timestamp()}][INFO] "
f"processed {arxiv_id}")
executor.shutdown(wait=True)
def run(self, max_files: int = -1, out_fname: str = "arxiv.jsonl"):
r""" function to run the cleaning process. This function will iterate
over all arxiv projects and clean the tex files. The cleaned tex files
are then written to a jsonl file.
@param max_files: maximum number of files to process, defaults to -1
which means all files are processed. This is useful for testing.
@param out_fname: name of the output file, defaults to "arxiv.jsonl"
"""
with open(self._target_dir / out_fname, "w") as f:
for tex_files, yymm, arxiv_id, timestamp in self.arxiv_iterator(
max_files=max_files
):
record, arxiv_id = create_record(
tex_files=tex_files,
yymm=yymm,
arxiv_id=arxiv_id,
timestamp=timestamp
)
if record is None:
print(f"[{get_timestamp()}][ERROR] "
f"failed to process {arxiv_id}")
continue
if len(record["text"]) == 0:
print(f"[{get_timestamp()}][WARNING] "
f"empty text for {arxiv_id}")
continue
f.write(json.dumps(record) + "\n")
print(f"[{get_timestamp()}][INFO] "
f"processed {arxiv_id}")
def arxiv_iterator(
self, max_files: int = -1, tar_fp_list: List[str] = None
):
r""" iterator over arxiv shards. Each shard contains tex projects or
files that are compressed using gzip. This function will extract the
tex files and yield them together with yymm, the raw arxiv id and the
timestamp of the project.
@param max_files: maximum number of files to process, defaults to -1
which means all files are processed.
@param tar_fp_list: optional list of tar files to process, defaults to
None. In this case all tar files in data_dir are processed.
@return: iterator over tex files, yymm, arxiv id and timestamp.
"""
if tar_fp_list is None:
def _tar_fp_iterator():
for _tar_fp in self._data_dir.glob("*.tar"):
yield _tar_fp
else:
def _tar_fp_iterator():
for _tar_fp in tar_fp_list:
yield _tar_fp
failed = 0
processed = 0
for tar_fp in _tar_fp_iterator():
print(f"[{get_timestamp()}][INFO] start processing {tar_fp}")
with tempfile.TemporaryDirectory(dir=self._work_dir) as tmpdir:
with tarfile.open(tar_fp) as tf:
tf.extractall(members=tf.getmembers(), path=tmpdir)
for proj_dir_or_file in pathlib.Path(tmpdir).rglob("*.gz"):
# get arxiv id and month from the filename
yymm = proj_dir_or_file.parent.stem
arxiv_id = proj_dir_or_file.stem
# load the tex source files (we also get the timestamp
# here)
data = _tex_proj_loader(proj_dir_or_file)
if data is None:
failed += 1
continue
tex_files, timestamp = data
processed += 1
if processed > max_files > 0:
break
yield tex_files, yymm, arxiv_id, timestamp
else:
continue
break
print(f"[{get_timestamp()}][INFO] # Failed loading : {failed}")
print(f"[{get_timestamp()}][INFO] done.")
def create_record_single_arg(args):
r""" convenience function to create a record from a single argument. """
return create_record(*args)
def create_record(
tex_files: List[str],
yymm: str,
arxiv_id: str,
timestamp: float
) -> Tuple[Union[Dict[str, Union[str, Dict[str, str]]], str, None], str]:
r""" function to create a record from the tex files, yymm, arxiv id and
timestamp. The function also detects the language of the tex files using a
fasttext model.
@param tex_files: list of tex file contents as strings
@param yymm: yymm of the arxiv project
@param arxiv_id: raw arxiv id
@param timestamp: timestamp of the arxiv project
@return: dictionary containing the cleaned tex text and metadata
"""
# clean tex files
try:
cleaned_str = clean_tex_files(tex_files)
except Exception as e:
return None, arxiv_id
if len(cleaned_str) == 0:
return {"text": "", "meta": {}}, arxiv_id
# get the arxiv id in the correct format
try:
clean_arxiv_id = format_arxiv_id(arxiv_id)
except Exception as e:
print(f"[WARNING] failed to format arxiv id {arxiv_id}; excpetion={e}")
clean_arxiv_id = arxiv_id
# detect language
ft_model = fasttext.load_model(path=str(FT_MODEL_PATH))
lang, _ = predict_lang(text=cleaned_str, lang_model=ft_model, k=1)
try:
lang = lang[0]
except IndexError:
lang = "unknown"
if timestamp is not None:
timestamp = datetime.fromtimestamp(timestamp).isoformat()
return (
{
"text": cleaned_str,
"meta": {
"timestamp": timestamp,
"yymm": yymm,
"arxiv_id": clean_arxiv_id,
"language": lang,
"url": f"{ARXIV_URL}{clean_arxiv_id}",
"source": "arxiv"
}
},
clean_arxiv_id
)
def _tex_proj_loader(
file_or_dir_path: pathlib.Path
) -> Union[Tuple[List[str], float], None]:
r""" function to load the tex files from a tar file or a gzip file. The
function will return a tuple containing a list of tex files and the
timestamp of the project.
@param file_or_dir_path: path to the tar file or the gzip file
@return: tuple containing a list of tex files and the timestamp of the
project
"""
files_and_content = []
timestamp = file_or_dir_path.lstat().st_mtime
try:
# if it is a directory, open it as a tarfile
with tarfile.open(file_or_dir_path) as sub_tf:
for member in sub_tf.getmembers():
if member.name.endswith(".tex"):
file_content = sub_tf.extractfile(member).read()
try:
file_content = file_content.decode("utf-8")
except UnicodeDecodeError:
print(f"[{get_timestamp()}][ERROR] "
f"UnicodeDecodeError: {file_or_dir_path}")
return None
files_and_content.append(file_content)
except tarfile.ReadError:
# otherwise we try opening it as a gzip file
try:
with gzip.open(file_or_dir_path, "rb") as gz:
file_content = gz.read()
except Exception as e:
# all fails, we skip this file
print(f"[ERROR] {e}: {file_or_dir_path}")
return None
try:
file_content = file_content.decode("utf-8")
except UnicodeDecodeError:
print(f"[{get_timestamp()}][ERROR] "
f"UnicodeDecodeError: {file_or_dir_path}")
return None
files_and_content.append(file_content)
except Exception as e:
print(f"[ERROR] {e}: {file_or_dir_path}")
return None
return files_and_content, timestamp
def clean_tex_files(tex_files: List[str]) -> str:
r""" function takes a list of tex files and returns a cleaned version of
the tex project. The cleaned version is a concatenation of the tex files
with the following modifications:
- if multiple latex files, then concatenate them
- remove all comments (i.e. all lines starting with %)
- remove everything before the first \section header
- remove everything after the first occurrence of either \appendix or
\bibliography
- inline-expand definitions and macros
@param tex_files: list of file_content strings
@return: cleaned tex project as a string, empty string if no tex files are
provided
"""
if len(tex_files) == 0:
return ""
# build dictionaries that contain the definitions of all macros in all tex
# files. This is later used to expand all macros used in the text with
# their definitions, so that consistency among different authors is
# ensured.
non_arg_macros = {}
for file_content in tex_files:
non_arg_macros.update(_build_non_arg_macros_dict(file_content))
# TODO: macros that take arguments are not supported yet
arg_macros = {}
# join multiple latex files with a newline character
cleaned_latex_file_str = "\n".join(
_clean_tex_file(
file_content=file_content,
arg_macros=arg_macros,
non_arg_macros=non_arg_macros
)
for file_content in tex_files
)
return cleaned_latex_file_str
def _clean_tex_file(
file_content: str, arg_macros: Dict, non_arg_macros: Dict
) -> str:
r""" function takes a tex file as input and returns a cleaned version. The
cleaned version is a concatenation of the tex files with the
following modifications:
- remove all comments (i.e. all lines starting with %)
- remove everything before the first section-like header
- remove everything after the first occurrence of either \appendix or
\bibliography
- inline-expand definitions and macros
@param file_content: the content of the tex file as a string.
@return: cleaned tex file as a string
"""
# find the first occurence of a \section-like header and replace everything
# before it with an empty string. This matches the following pattern:
# \<section-type>[optional-args]{name}
pattern = r"^(.*?)("
pattern += r"\\\bchapter\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bpart\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bsubsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bsubsubsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bparagraph\b\*?(?:\[(.*?)\])?\{(.*?)\}"
pattern += r"\\\bsubparagraph\b\*?(?:\[(.*?)\])?\{(.*?)\}"
pattern += r")"
# if no section like header is found, then we return an empty string
if not re.search(pattern, file_content, flags=re.DOTALL):
return ""
# replace everything with the second group of the match (i.e. everything
# after and including the section header)
file_content = re.sub(
pattern=pattern,
repl=r"\2",
string=file_content,
flags=re.DOTALL # make sure that the dot matches also newlines
)
# remove all line comments
file_content = re.sub(
pattern=r"(?m)^%.*\n?",
repl=r"",
string=file_content,
flags=re.MULTILINE
)
# remove all in comments within a line
file_content = re.sub(
# pattern matches a "%" that is not preceded by a backslash (=comment)
pattern=r"[^\\]%.+$",
repl=r"",
string=file_content,
flags=re.MULTILINE
)
# find the first occurence of either \appendix or \bibliography and
# replace everything after it with an empty string
pattern = r"("
pattern += r"\\appendix|"
pattern += r"\\begin\{references\}|"
pattern += r"\\begin\{REFERENCES\}|"
pattern += r"\\begin\{thebibliography\}|"
pattern += r"\\bibliography\{.*\}"
pattern += r").*$"
file_content = re.sub(
pattern=pattern,
repl=r'',
string=file_content,
flags=re.DOTALL # make sure that the dot matches also newlines
)
# inline-expand all non-arg macros
for macro_name, macro_value in non_arg_macros.items():
file_content = re.sub(
# make pattern grouped to make sure that the macro is not part
# of a longer alphanumeric word
pattern=r"(" + macro_name + r")" + r"([^a-zA-Z0-9])",
# replace the macro with its value and add back the character that
# was matched after the macro
repl=macro_value + r"\2",
string=file_content
)
# inline-expand all macros that use args
# TODO: inline-expand macros with args
for macro_name, macro_value in arg_macros.items():
pass
return file_content
def _build_non_arg_macros_dict(file_content: str) -> Dict[str, str]:
r""" function takes the content of a tex file and returns a dictionary
that contains the definitions of all macros that do not use arguments.
The dictionary is of the form {macro_name: macro_value}.
@param file_content: the content of the tex file as a string.
@return: dict
"""
# regex for extracting \newcommand macros without arguments
non_arg_nc_reg = re.compile(
# this regex matches the following:
# \newcommand{\macro_name}{macro_value}
# \newcommand*{\macro_name}{macro_value}
# where macro_name is only allowed to contain letters and numbers;
# macro_value can contain any character.
pattern=r'\\\bnewcommand\b\*?\{(\\[a-zA-Z0-9]+?)\}\{(.*?)\}$',
flags=re.MULTILINE
)
# regex for extracting \def macros without arguments
non_arg_def_reg = re.compile(
# this regex matches the following:
# \def\macro_name{macro_value}
# where macro_name is only allowed to contain letters and numbers;
# macro_value can contain any character.
pattern=r'\\def\s*(\\[a-zA-Z0-9]+?)\s*\{(.*?)\}$',
flags=re.MULTILINE
)
# Extract all user-defined LaTeX macros from the preamble
macros = {}
for reg in [non_arg_nc_reg, non_arg_def_reg]:
for match in reg.finditer(file_content):
# convert the macro name and value to a raw string that can be
# used in re.sub
macro_name = match \
.group(1).encode("unicode-escape").decode("utf-8")
macro_val = match \
.group(2).encode("unicode-escape").decode("utf-8")
macros[macro_name] = macro_val
return macros
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/arxiv/arxiv_cleaner.py |
from datetime import datetime
import fasttext
import re
from typing import List, Tuple
def get_timestamp() -> str:
return datetime.now().isoformat()
def predict_lang(
text: str, lang_model: fasttext.FastText._FastText, k=5
) -> Tuple[List[str], List[float]]:
r""" Predict top-k languages of text.
@param text: text to predict language of
@param lang_model: language model
@param k: number of predictions to return, defaults to 5
@return: list of predicted languages and list of corresponding
confidence scores
"""
# preprocess text
text = text.lower().replace("\n", " ").replace("\t", " ")
tags, confs = lang_model.predict(text, k=k)
# convert confs to float
confs = [float(conf) for conf in confs]
# convert lang codes to names
tags = [tag.replace("__label__", "") for tag in tags]
return tags, confs
def format_arxiv_id(arxiv_id: str) -> str:
r""" this function brings the raw arxiv-id into a format compliant with the
specification from arxiv. This is used to create the url to the arxiv
abstract page.
- Format prior to March 2007:
<archive>/YYMMNNN where N is a 3-digit number
- Format after March 2007: <archive>/YYMM.NNNNN where N is a 5 (or 6)-digit
number
References: https://info.arxiv.org/help/arxiv_identifier.html
@param arxiv_id: raw arxiv id which can be in one of the following formats:
- <archive><YY><MM><NNN>
- <YY><MM><NNNNN|NNNNNN>
@return: formatted arxiv id
"""
match = re.search(r'^([a-zA-Z-]*)([\d\.]+)$', arxiv_id)
if match is None:
raise ValueError(f"Invalid arxiv id: {arxiv_id}")
if match.group(1) == "":
return match.group(2)
return f"{match.group(1)}/{match.group(2)}"
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/arxiv/utils.py |
import argparse
import os
from collections import defaultdict
from datetime import datetime
from transformers import AutoTokenizer
import json
import multiprocessing as mp
import pathlib
import pandas as pd
from tabulate import tabulate
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./data/arxiv/processed")
parser.add_argument('--max_files', type=int, default=-1,
help="max lines to process; this is useful for testing")
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
)
def get_token_count(text):
return len(tokenizer.tokenize(text))
def process_record(record):
token_count = get_token_count(text=record["text"])
year = record["meta"]["yymm"][:2]
return token_count, year
def get_timestamp() -> str:
return datetime.now().isoformat()
def print_stats(token_count_data):
df = pd.DataFrame.from_dict(
token_count_data, orient="index"
)
df = df.reset_index()
df.columns = ["year", "count"]
df = df.set_index("year")
df["count"] = df["count"].astype(int)
df["count"] = df["count"] / 1e12
df = df.sort_values(by="count", ascending=False)
df.loc['Total'] = df.sum(numeric_only=True)
print(tabulate(
df, headers=["year", "count (T)"], tablefmt="github", floatfmt=".4f"
))
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", mp.cpu_count()))
print(f"Using {num_cpus} workers")
files_processed = 0
token_count_data = defaultdict(int)
for filenum, fp in enumerate(pathlib.Path(args.data_dir).glob("*.jsonl")):
with open(fp, "r") as f:
records = [json.loads(rec) for rec in f.readlines()]
with mp.Pool(processes=num_cpus - 2) as pool:
results = pool.map(process_record, records)
for counts, year in results:
token_count_data[year] += int(counts)
total_tokens = sum(token_count_data.values())
print(f"[{get_timestamp()}][INFO] "
f"processed {filenum} files; "
f"total tokens: {total_tokens}")
if files_processed > args.max_files > 0:
print(f"[{get_timestamp()}][INFO] "
f"reached max lines")
break
print(json.dumps(token_count_data, indent=4))
print(f"Total tokens: {sum(token_count_data.values())}")
print("\n" + "=" * 80 + "\n")
print_stats(token_count_data)
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/arxiv/token_count.py |
import argparse
import os
import uuid
import numpy as np
import pathlib
import tempfile
from typing import List
import joblib
from arxiv_cleaner import ArxivCleaner
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./data/arxiv/src")
parser.add_argument('--target_dir', type=str, default="./data/arxiv/processed")
parser.add_argument('--workers', type=int, default=1)
parser.add_argument('--input', type=str, default=None,
help='input file from which to read keys. '
'This is only used when running on slurm.')
parser.add_argument('--local', action='store_true')
parser.add_argument('--setup', action='store_true',
help='if set, we partition the keys and into chunks.')
parser.add_argument('--max_files', type=int, default=-1,
help='max files to download, useful for testing')
args = parser.parse_args()
WORK_DIR = os.getenv('WORK_DIR', pathlib.Path(__file__).parent / "work")
WORK_DIR = pathlib.Path(WORK_DIR)
if not WORK_DIR.exists():
WORK_DIR.mkdir()
print(f"Created work directory {WORK_DIR}")
def run_clean(
data_dir: pathlib.Path,
target_dir: pathlib.Path,
input_file: pathlib.Path = None,
max_files: int = -1,
):
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", joblib.cpu_count()))
print(f"Using {num_cpus} processes")
worker_id = os.getenv('SLURM_ARRAY_TASK_ID', None)
if worker_id is None:
worker_id = str(uuid.uuid4())
# create temporary work directory
work_dir = pathlib.Path(
tempfile.mkdtemp(dir=WORK_DIR, prefix=worker_id + "_")
)
if input_file is not None:
# we are running on slurm
assert input_file.exists()
with open(input_file, 'r') as f:
tar_fp_list = f.read().splitlines()
else:
tar_fp_list = None
# create cleaner
arxiv_cleaner = ArxivCleaner(
data_dir=data_dir, work_dir=work_dir, target_dir=target_dir,
worker_id=worker_id
)
arxiv_cleaner.run_parallel(
max_files=max_files, tar_fp_list=tar_fp_list
)
def partition_tar_files(
data_dir: pathlib.Path, workers: int
) -> List[List[str]]:
return np.array_split(
list(str(fp) for fp in data_dir.glob('*.tar')),
indices_or_sections=workers
)
def main():
# create target directory where we store the processed data
target_dir = pathlib.Path(args.target_dir)
if not target_dir.exists():
target_dir.mkdir()
data_dir = pathlib.Path(args.data_dir)
assert data_dir.exists()
if not args.local and not args.setup:
# here we only download the files; this requires that setup has already
# been run
run_clean(
data_dir=data_dir,
target_dir=target_dir,
input_file=pathlib.Path(args.input),
max_files=args.max_files
)
return
if args.setup:
parts = partition_tar_files(data_dir=data_dir, workers=args.workers)
if not (target_dir / "partitions").exists():
(target_dir / "partitions").mkdir()
for i, part in enumerate(parts):
with open(
target_dir / "partitions" / f'tars_part_{i}.txt', 'w'
) as f:
f.write('\n'.join(part))
return
# run locally; here we don't partition the tar files as slurm is not used
if args.local:
run_clean(
data_dir=pathlib.Path(args.data_dir),
target_dir=pathlib.Path(args.target_dir),
input_file=None,
max_files=args.max_files
)
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/arxiv/run_clean.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="[email protected]",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/[email protected]"],
},
package_data={"cc_net": ["data/*"]},
)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
min_shard: start at shard `min_shard` if specified
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hashes_in_mem: number of shards hashes to use for dedup
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
min_shard: int = -1
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
cleanup_after_regroup: bool = False
task_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=self.task_parallelism,
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.min_shard == -1:
shard_range = list(range(conf.num_shards))
else:
shard_range = list(range(conf.min_shard, conf.num_shards))
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in shard_range]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in shard_range
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in shard_range]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
)
# Compute hashes firsts.
if "dedup" in conf.pipeline:
hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
print(conf.pipeline)
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
pattern = str(tmp_output / "{language}_{bucket}.json.gz")
steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
steps["split_by_segment"] = jsonql.split(
split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
)
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/mine.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/get_wiki_cirrus.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers) :]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start : start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
self._open_next_handle() # Opening 1st handle allows to write directly.
def write(self, content) -> int:
# Avoid splitting newlines to a new file.
# use current_block_size since it's faster than `tell()`
if content != "\n" and self.current_block_size >= self.max_size:
self._open_next_handle()
if self.current_handle is None:
raise Exception("No more files to write to...")
written = self.current_handle.write(content)
self.current_block_size += written
return written
def _open_next_handle(self) -> bool:
self.close()
file = next(self.files, None)
if file is None:
return False
self.current_handle = open_write(file).__enter__()
self.current_block_size = 0
return True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def closed(self):
return self.current_handle is None
def close(self):
if self.current_handle is None:
return
# log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
self.current_handle.__exit__(None, None, None)
self.current_handle = None
# not sure it helps since connections are reseted anyway.
_session = functools.lru_cache()(requests.Session)
def request_get_content(url: str, n_retry: int = 3) -> bytes:
"""Retrieve the binary content at url.
Retry on connection errors.
"""
t0 = time.time()
logging.info(f"Starting download of {url}")
for i in range(1, n_retry + 1):
try:
r = _session().get(url)
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
# Sleep and try again on error, unless it's a 404.
message = e.args[0] if isinstance(e.args[0], str) else ""
if i == n_retry or "Client Error" in message:
raise e
warnings.warn(
f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
)
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
dl_speed = len(r.content) / dl_time / 1024
logging.info(
f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
)
return r.content
def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
"""Download the files at the given url to memory and opens it as a file.
Assumes that the file is small, and fetch it when this function is called.
"""
if cache and cache.exists():
return open_read(cache)
# TODO: open the remote file in streaming mode.
# The hard part is that we need to write the content on disk at the same time,
# to implement disk caching.
raw_bytes = request_get_content(url)
content = io.BytesIO(raw_bytes)
if url.endswith(".gz"):
f: TextIO = gzip.open(content, mode="rt") # type: ignore
else:
f = io.TextIOWrapper(content)
if cache and not cache.exists():
# The file might have been created while downloading/writing.
tmp_cache = _tmp(cache)
tmp_cache.write_bytes(raw_bytes)
if not cache.exists():
tmp_cache.replace(cache)
else:
tmp_cache.unlink()
return _close_when_exhausted(f)
def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
folder, name = file_pattern.parent, file_pattern.name
assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
n = name.count("?")
assert 0 < n < 8
assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
assert "r" not in mode
files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
return MultiFile(files, mode, max_size)
class SplitFile:
def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
assert mode == "r"
size = os.path.getsize(filename)
self.handle = open(filename, mode)
start = chunk * size // n_chunks
self.end: int = (chunk + 1) * size // n_chunks
if start > 0:
self.handle.seek(start - 1)
# Skip incomplete line. This avoid crashing when reading eg the middle
# of a unicode char. `self.handle.buffer` is a binary file reader.
self.handle.buffer.readline() # type: ignore
def __enter__(self):
return self
def __iter__(self):
while True:
line = self.handle.readline()
if not line:
return
yield line
if self.handle.tell() >= self.end:
return
def readlines(self):
return list(self.__iter__())
def close(self):
self.handle.close()
def __exit__(self, *args):
self.close()
def get_block_readers(filename: Path, n_readers, mode="t"):
index_filename = filename.parent / (filename.name + ".index")
if not index_filename.exists():
return [gzip.open(filename, "r" + mode)]
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
start = 0
readers = []
for i in range(n_readers):
end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
r = _blocked_gzip_reader(filename, start, end, mode)
readers.append(r)
start = end
return readers
def block_reader(filename: Path) -> Iterable[str]:
root, pattern = str(filename)[:-1].split("[", 1)
assert root.endswith(".gz"), "Can only read block of a .gz file for now."
ii, nn = pattern.strip().split("/")
i, n_readers = int(ii), int(nn)
index_filename = root + ".index"
assert os.path.exists(
index_filename
), f"Index {index_filename} not found for {filename}"
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
# I'm not sure how to handle the case where there is less reader than expected.
# Currently we return empty readers.
start = 0
if i > 0:
start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
end = index[min(i * chunk_per_reader, n_chunks - 1)]
return _blocked_gzip_reader(root, start, end, mode="t")
def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
handle = gzip.open(filename, "r" + mode)
handle.seek(start)
try:
while handle.tell() < end:
line = handle.readline()
if not line:
break
yield line
finally:
handle.close()
class BlockedGzipWriter(MultiFile):
"""Writes a Gzip files which can be read by block.
Decreasing the block size may hurt compression, but provides more split points.
"""
def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
assert "w" in mode
self.filename = Path(filename)
self.index: List[int] = []
self.zipfile: Optional[gzip.GzipFile] = None
super().__init__([], mode, block_size)
def _open_next_handle(self) -> bool:
"""Here we never actually close/open handles,
we just write the end of block sequence."""
if not self.current_handle:
mode = self.mode + "t"
self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
assert isinstance(self.current_handle.buffer, gzip.GzipFile)
self.zipfile = self.current_handle.buffer
return True
# Use Z_FULL_FLUSH to allow random access:
# https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
self.index.append(self.current_handle.tell())
self.current_block_size = 0
return True
def flush(self):
assert self.current_handle is not None
self.current_handle.flush()
def close(self):
if self.current_handle is None:
return
self.current_handle.flush()
self.index.append(self.current_handle.tell())
self.current_handle.close()
self.current_handle = None
index = np.array(self.index, dtype=np.uint64)
with open(str(self.filename) + ".index", "wb") as o:
np.save(o, index)
def grouper(iterable, n):
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if group:
yield group
PROCESS = psutil.Process()
def mem_footprint_gb(pid=None):
rss = PROCESS.memory_info().rss
return rss / 1_000_000_000
def _tmp(output: Path) -> Path:
suffix = "".join(output.suffixes)
suffix = ".tmp" + suffix
prefix = output.name[: -len(suffix)]
_, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
return Path(tmp_path)
@functools.lru_cache()
def _tmp_dir() -> Path:
job_id = os.environ.get("SLURM_JOB_ID")
if job_id:
return Path("/scratch/slurm_tmpdir") / job_id
checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
if checkpoint.exists():
tmp = checkpoint / "tmp"
tmp.mkdir(exist_ok=True)
return tmp
return Path("/tmp")
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
main(sys.argv[1:])
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/jsonql.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in
execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
if task_parallelism == -1: # we are on slurm
ex.parameters['slurm_time'] = int(timeout_hour * 60)
else:
ex.parameters['timeout_min'] = int(timeout_hour * 60)
if ex.cluster == "local":
ex.parameters['timeout_min'] = int(timeout_hour * 60)
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
ex.parameters['timeout_min'] = int(timeout_hour * 60)
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
slurm_time=int(timeout_hour * 60),
slurm_mem_per_cpu=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
else:
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str],
*args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).",
job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]],
*args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(
f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).",
job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/execution.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/flat_hash_set.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/minify.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/text_normalizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/regroup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/perplexity.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/tokenizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/dedup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://data.commoncrawl.org"
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""BEFORE 2020, Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
AFTER 2020, Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: http://100greatpiano.com/video/wilhelm-kempff-plays-beethovens-moonlight-sonata/
WARC-Date: 2023-01-26T22:21:08Z
WARC-Record-ID: <urn:uuid:ccafeba8-a08b-47d0-86be-cf0855f4f6d0>
WARC-Refers-To: <urn:uuid:935a6ef4-8708-41f5-a152-412cdf1b48c1>
WARC-Block-Digest: sha1:2WURD74BLDCLPV6INBQEQ6OOJRQDPJBA
WARC-Identified-Content-Language: eng,jpn
Content-Type: text/plain
Content-Length: 886
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
# process length, may be in the 8th or 9th position
try:
length = int(headers[9].split()[1])
except IndexError as e:
length = int(headers[8].split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers, doc)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = self.segment_url(segment)
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/process_wet_file.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/__main__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/split_by_lang.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/tools/dl_cc_100.py |
EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/tools/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/tools/make_dmoz_corpus.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/cc_net/tools/expand_corpus.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_dedup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_normalizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_parse_wet_file.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_flat_hash_set.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/conftest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_regroup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_jsonql.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_minify.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/cc_net/tests/test_transformer.py |
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
## Load data from the Wikipedia corpus
## And output them as label "__label__wiki"
#
files = ["cc_net/data/mined/wikipedia/en_head_0000.json.gz", "cc_net/data/mined/wikipedia/en_middle_0000.json.gz"]
unique = {}
i = 0
for f in files:
for jstr in gzip.open(f, "rt"):
i = i + 1
result = json.loads(jstr)
result["class"] = "wiki"
if result["digest"] in unique:
continue
unique["digest"] = 1
if(len(result["raw_content"]) < 1000):
continue
print("__label__wiki " + " ".join(result["raw_content"].splitlines()))
jobs = []
for file in glob.glob("common_crawl/*/*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
## Fetch `perfile` number of webpages for each CommonCrawl partition
#
perfile = i / len(jobs)
## Output Commoncrawl data as label "__label__wiki"
#
n = 0
for job in jobs:
j = 0
for jstr in gzip.open(job, "rt"):
j = j + 1
if j > perfile:
break
result = json.loads(jstr)
result["class"] = "cc"
print("__label__cc " + " ".join(result["raw_content"].splitlines()))
n = n + 1
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/classifier/create_corpus.py |
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
from multiprocessing import Pool
# Get all jobs.
# Each job corresponds to a file ends with .gz, with middle or head in it
#
jobs = []
os.chdir(sys.argv[1])
for file in glob.glob("*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
print("TOTAL # JOBS:", len(jobs))
# For each row, run classifier and output
# (text: [...], source, pred_label, pred_label_prob, wiki_prob)
#
def run(job):
import fasttext
model = fasttext.load_model("../fastText/model.bin")
print(job)
ofile = gzip.open(job + ".dedup.classifier.gz", "wt")
ostat = open(job + ".dedup.classifier.gz.stat", "wt")
line = 0
for jstr in gzip.open(job + ".result", "rt"):
result = json.loads(jstr)
content = result["raw_content"]
output = {}
# run classifier
text = " ".join(content.strip().splitlines())
pred = model.predict(text)
(pred_label, pred_prob) = pred
pred_label = pred_label[0]
wiki_prob = pred_prob[0]
if pred_label == "__label__cc":
wiki_prob = 1 - wiki_prob
output["pred_label"] = pred_label
output["pred_label_prob"] = pred_prob[0]
output["wiki_prob"] = wiki_prob
output["text"] = content
output["source"] = "cc/" + job + f"/line{line}"
line = line + 1
nchars = len(content)
ostat.write(f"{nchars}\t{wiki_prob}\n")
ofile.write(json.dumps(output) + "\n")
ofile.close()
ostat.close()
with Pool(224) as p:
p.map(run, jobs) | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/classifier/classify.py |
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--data",
"-d",
help="path to articles xml",
default="enwiki-20230401-pages-articles-multistream.xml",
)
parser.add_argument(
"--output",
"-o",
help="path to extracted urls file",
default="./extracted_urls.txt",
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="show progress",
)
args = parser.parse_args()
def get_urls():
with open(args.data, "r", errors="ignore") as f, open(args.output, "w") as out:
for i, line in enumerate(f, start=1):
refs = re.search("<ref>(.*)</ref>", line)
if refs is not None:
results = re.findall(
r"\b(?:https?|telnet|gopher|file|wais|ftp):[\w/#~:.?+=&%@!\-.:?\\-]+?(?=[.:?\-]*(?:[^\w/#~:.?+=&%@!\-.:?\-]|$))",
refs.group(0),
)
if len(results) > 0:
for result in results:
out.write(result + "\n")
if args.verbose and i % 1000000 == 0:
print("Lines searched: {}".format(i), end="\r")
def main():
get_urls()
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/classifier/extract_urls.py |
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
from multiprocessing import Pool
# Get all jobs.
# Each job corresponds to a file ends with .gz, with middle or head in it
#
jobs = []
os.chdir(sys.argv[1])
for file in glob.glob("*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
print("TOTAL # JOBS:", len(jobs))
# Output (URL, digest) pairs for each job
#
def run(job):
print(job)
ofile = gzip.open( job + ".dedup", "wt")
for jstr in gzip.open(job, "rt"):
result = json.loads(jstr)
ofile.write(result['url'] + " " + result['digest'] + "\n")
ofile.close()
with Pool(64) as p:
p.map(run, jobs)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/dedup/dedup_phase1.py |
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
from multiprocessing import Pool, Value
import multiprocessing
import gc
# Get all jobs
#
jobs = []
os.chdir(sys.argv[1])
for file in glob.glob("*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
print("TOTAL # JOBS:", len(jobs))
# Load all pairs of (fileid, digest)
#
counter = Value('i', 0)
lock = multiprocessing.Lock()
def load(job):
load_job = {}
global counter
with counter.get_lock():
counter.value += 1
print(counter.value, job)
# test: early stop
#if counter.value > 10:
# return {}
for line in gzip.open(job + ".dedup", mode='rt'):
(fileid, digest) = line.split(" ")
load_job[fileid] = digest
return load_job
with Pool(64) as p:
loaded_ = p.map(load, jobs)
loaded = {}
for j in range(0, len(jobs)):
loaded[jobs[j]] = loaded_[j]
# Dedup
# unique fileIDs are in unique_fileid
# also write unique fileID for each job in its own file
#
table = {}
unique_fileid = {}
#ufile = gzip.open("uniqie_fileids", "wt")
for job in loaded:
print("loaded", job, len(loaded[job]))
ufile = gzip.open(job + ".uniqie_fileids", "wt")
for fileid in loaded[job]:
digest = loaded[job][fileid]
if digest not in table:
table[digest] = 1
unique_fileid[fileid] = 1
ufile.write(fileid + "\n")
ufile.close()
print("total unique", len(unique_fileid))
# GC
#
del loaded_
del loaded
gc.collect()
# Write out the result
#
def write(job):
global counter
with counter.get_lock():
counter.value += 1
print("write", counter.value, job)
ofile = gzip.open( job + ".result", "wt")
wrote = 0
total = 0
for jstr in gzip.open(job, "rt"):
result = json.loads(jstr)
if result['url'] in unique_fileid:
wrote = wrote + 1
ofile.write(jstr)
total = total + 1
print(" wrote", wrote, "/", total)
ofile.close()
with Pool(64) as p:
p.map(write, jobs)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/data_prep/cc/dedup/dedup_phase2.py |
from megatron.data.indexed_dataset import MMapIndexedDataset
from transformers import AutoTokenizer
import argparse
# get the first argument as a file name, and an output file
parser = argparse.ArgumentParser()
parser.add_argument("file_name", help="the file name to read")
parser.add_argument("output_file", help="the file name to write")
args = parser.parse_args()
ds = MMapIndexedDataset(args.file_name)
tok = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
num_tokens = [
len(ds[i]) for i in range(len(ds))
]
# write it out to an output_file
with open(args.output_file, "w") as f:
for i in num_tokens:
f.write(f"{i}\n")
print(f'Total tokens: {sum(num_tokens)}') | EXA-1-master | exa/libraries/data_prep/RedPajama-Data/tokenization/count_tokens.py |
"""
Embed each row of a `.jsonl` file using a HuggingFace model and save the embeddings.
Authors: The Meerkat Team (Karan Goel, Sabri Eyuboglu, Arjun Desai)
License: Apache License 2.0
"""
import os
from argparse import ArgumentParser
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.json
import torch
import torch.nn.functional as F
from rich import print
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
import meerkat as mk
class TruncatedDataset:
def __init__(
self,
df: mk.DataFrame,
tokenizer: AutoTokenizer,
chunk_size: int,
):
self.df = df
self.tokenizer = tokenizer
self.chunk_size = chunk_size
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
data = self.df[idx]
tokenized = self.tokenizer(
data["text"],
pad_to_multiple_of=self.chunk_size,
padding=True,
)
return {
"input_ids": torch.tensor(tokenized["input_ids"][: self.chunk_size]),
"attention_mask": torch.tensor(
tokenized["attention_mask"][: self.chunk_size]
),
"doc_id": data["id"],
"chunk_id": 0,
}
def create_model_and_tokenizer(
model_name: str,
cache_dir: str,
):
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
print("Loading model...")
model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir).cuda()
return model, tokenizer
def prepare(feature_dir: str, savepath: str):
if not os.path.exists(feature_dir):
os.makedirs(feature_dir)
if os.path.exists(savepath):
exit()
def load_dataframe(path):
print("Loading dataframe...")
# Load in the JSON.
df = mk.from_json(
path,
lines=True,
backend="arrow",
read_options=pa.json.ReadOptions(**{"block_size": 10 << 20}),
)
if "meta" in df.columns:
struct_array = df["meta"].data
result = {}
for field_index in range(struct_array.type.num_fields):
field = struct_array.type.field(field_index)
result[field.name] = mk.ArrowScalarColumn(
pc.struct_field(struct_array, field.name)
)
meta_df = mk.DataFrame(result)
else:
meta_df = mk.DataFrame()
if "id" in meta_df.columns:
df["id"] = meta_df["id"]
elif "arxiv_id" in meta_df.columns:
df["id"] = meta_df["arxiv_id"]
else:
try:
df["id"] = meta_df["pkey"]
except:
df.create_primary_key("id")
df = df.set_primary_key("id")
try:
df = df.drop("pkey")
except ValueError:
pass
assert set(df.columns) >= set(
["id", "text"]
), f"Unexpected columns: {set(df.columns)}"
return df
def create_dataloader(
filepath: str,
tokenizer: AutoTokenizer,
chunk_size: int,
batch_size: int,
num_workers: int,
):
dataset = TruncatedDataset(
load_dataframe(filepath),
tokenizer,
chunk_size=chunk_size,
)
return torch.utils.data.DataLoader(
dataset,
shuffle=False,
batch_size=batch_size,
num_workers=num_workers,
)
@torch.no_grad()
def extract_features(
model: torch.nn.Module,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
):
"""Extract features from the model."""
# Extract features from the model
attention_mask = attention_mask.cuda()
outputs = model.forward(input_ids.cuda(), attention_mask=attention_mask)[0]
# Use the attention mask to average the output vectors.
outputs = outputs.cpu()
attention_mask = attention_mask.cpu()
features = (outputs * attention_mask.unsqueeze(2)).sum(1) / attention_mask.sum(
1
).unsqueeze(1).cpu()
# Normalize embeddings
features = F.normalize(features, p=2, dim=1).numpy()
return features
def run_feature_extraction(
model: torch.nn.Module,
dataloader: torch.utils.data.DataLoader,
):
print("Feature extraction...")
storage = []
for batch in tqdm(dataloader):
features = extract_features(model, batch["input_ids"], batch["attention_mask"])
storage.append(features)
# Save the features to disk.
return np.concatenate(storage, axis=0).reshape(-1, 384)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--filepath", type=str)
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--chunk_size", type=int, default=256)
parser.add_argument(
"--model_name",
type=str,
default="sentence-transformers/all-MiniLM-L6-v2",
)
parser.add_argument("--cache_dir", type=str, default="/home/karan/models/")
parser.add_argument(
"--feature_dir",
type=str,
default=f"/home/karan/data/pyjama/features/",
)
args = parser.parse_args()
feature_dir = os.path.join(args.feature_dir, args.model_name)
CUDA_VISIBLE_DEVICES = args.gpu
os.environ["CUDA_VISIBLE_DEVICES"] = str(CUDA_VISIBLE_DEVICES)
# Get num_gpus on this machine.
num_gpus = torch.cuda.device_count()
filepath = args.filepath
filename = os.path.basename(filepath)
savepath = os.path.join(feature_dir, filename.replace(".jsonl", ".npy"))
prepare(feature_dir, savepath)
model, tokenizer = create_model_and_tokenizer(args.model_name, args.cache_dir)
dataloader = create_dataloader(
filepath,
tokenizer,
chunk_size=args.chunk_size,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
features = run_feature_extraction(model, dataloader)
np.save(savepath, features)
print("Done.")
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/viz/embed_jsonl.py |
import os
from argparse import ArgumentParser
from glob import glob
import faiss
import numpy as np
from tqdm.auto import tqdm
def build_pca(
xb: np.ndarray,
d_in: int = 384,
d_out: int = 32,
):
pca = faiss.PCAMatrix(d_in, d_out)
pca.train(xb)
return pca
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--feature_dir",
type=str,
default="~/data/pyjama/features/sentence-transformers/all-MiniLM-L6-v2/",
)
args = parser.parse_args()
dir = os.path.expanduser(args.feature_dir)
# Load in all the files.
files = sorted(list(glob(f"{dir}/*.sampled.npy")))
print(f"Loading {len(files)} files into memory...")
arrs = [np.load(file) for file in tqdm(files)]
# Concatenate all the arrays
arr = np.concatenate(arrs, axis=0)
print("Combined arr:", arr.shape)
# Create the PCA
pca = build_pca(arr)
faiss.write_VectorTransform(pca, f"{dir}/pca32.faiss")
# Apply to all vectors.
arr_reduced = pca.apply(arr)
# Save the reduced array.
np.save(f"{dir}/pca32.npy", arr_reduced)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/viz/reduce_pca32.py |
import faiss
import numpy as np
import torch
import torch.nn.functional as F
from rich import print
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
def build_flat_index(
xb: np.ndarray,
d: int = 32,
):
index = faiss.IndexFlatL2(d)
index.add(xb)
return index
def load_index(
path: str,
):
"""Load the index from a path."""
index = faiss.read_index(path)
return index
def load_pca(path: str):
"""Load the PCA from a path."""
pca = faiss.read_VectorTransform(path)
return pca
def create_model_and_tokenizer(
model_name: str,
cache_dir: str = None,
):
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
print("Loading model...")
model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir)
return model, tokenizer
@torch.no_grad()
def extract_features(
model: torch.nn.Module,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
):
"""Extract features from the model."""
# Extract features from the model
attention_mask = attention_mask
outputs = model.forward(input_ids, attention_mask=attention_mask)[0]
# Use the attention mask to average the output vectors.
outputs = outputs.cpu()
attention_mask = attention_mask.cpu()
features = (outputs * attention_mask.unsqueeze(2)).sum(1) / attention_mask.sum(
1
).unsqueeze(1).cpu()
# Normalize embeddings
features = F.normalize(features, p=2, dim=1).numpy()
return features
def extract_features_single(
text: str,
model: torch.nn.Module,
tokenizer: AutoTokenizer,
chunk_size: int = 512,
):
"""Extract features from the model."""
tokenized = tokenizer(
[text],
pad_to_multiple_of=chunk_size,
padding=True,
)
return extract_features(
model,
torch.tensor(tokenized["input_ids"][:chunk_size]),
torch.tensor(tokenized["attention_mask"][:chunk_size]),
)
def run_feature_extraction(
model: torch.nn.Module,
dataloader: torch.utils.data.DataLoader,
):
print("Feature extraction...")
storage = []
carry = (None, None)
for batch in tqdm(dataloader):
features = extract_features(model, batch["input_ids"], batch["attention_mask"])
chunk_id = np.array(batch["chunk_id"])
doc_id = np.array(batch["doc_id"])
if (chunk_id == 0).all():
storage.append(features)
elif (chunk_id == 0).any():
# Close out the previous document.
# Aggregate based on the document ID.
agg = np.array(
[features[doc_id == i].mean(axis=0) for i in np.unique(doc_id)]
)
# Number of chunks in the first document.
num_chunks_first = (doc_id == doc_id[0]).sum()
# Number of chunks in the last document.
num_chunks_last = (doc_id == doc_id[-1]).sum()
# Batch falls on a document boundary.
if chunk_id[0] == 0:
# Close out the previous document and update the carry.
storage.append(carry[0])
carry = (None, None)
# Batch does not fall on a document boundary.
if carry[0] is not None:
# Reweight the first chunk.
agg[0] = (agg[0] * num_chunks_first + carry[0] * carry[1]) / (
num_chunks_first + carry[1]
)
# Update the carry.
carry = (agg[-1], num_chunks_last)
# Put the features in storage.
storage.append(agg[:-1])
else:
# All chunks should have the same document ID.
assert (doc_id == doc_id[0]).all()
# Aggregate.
agg = np.mean(features, axis=0)
# Reweight.
agg = (agg * len(features) + carry[0] * carry[1]) / (
len(features) + carry[1]
)
# Update the carry: make sure to keep track of the number of chunks.
carry = (agg, len(features) + carry[1])
# Save the features to disk.
return np.concatenate(storage, axis=0).reshape(-1, 384)
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/viz/utils.py |
import os
from argparse import ArgumentParser
import faiss
import numpy as np
def build_index(
xb: np.ndarray,
d: int = 32,
):
index = faiss.index_factory(d, "IVF100,PQ8")
# Sample 1_000_000 vectors to train the index.
xt = xb[np.random.choice(xb.shape[0], 1_000_000, replace=False)]
index.train(xt)
index.add(xb)
return index
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="~/data/pyjama/features/sentence-transformers/all-MiniLM-L6-v2",
)
args = parser.parse_args()
dir = os.path.expanduser(args.dir)
# Load in the embeddings.
arr = np.load(f"{dir}/pca32.npy")
print(arr.shape)
# Create the index.
index = build_index(arr)
faiss.write_index(index, f"{dir}/index_ivf100_pq8.faiss")
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/viz/index_faiss.py |
"""
A Meerkat app for visualizing the Github subset of the RedPajama dataset.
Authors: The Meerkat Team (Karan Goel, Sabri Eyuboglu, Arjun Desai)
License: Apache License 2.0
"""
import numpy as np
import tempfile
from utils import extract_features_single, load_pca, create_model_and_tokenizer
import meerkat as mk
from meerkat.datasets.utils import download_url
with tempfile.TemporaryDirectory() as temp_dir:
path = download_url(
"https://huggingface.co/datasets/meerkat-ml/lemma/resolve/main/pca.faiss",
temp_dir,
)
pca = load_pca(path)
model_name = "sentence-transformers/all-MiniLM-L6-v2"
model, tokenizer = create_model_and_tokenizer(model_name)
df = mk.read(
"https://huggingface.co/datasets/meerkat-ml/lemma/resolve/main/filtered_08cdfa755e6d4d89b673d5bd1acee5f6.mk.tar.gz"
)
def get_full_text(text_sample: str, repo_name: str, ref: str, path: str):
"""
Get the full text of a code sample from Github.
"""
ref = ref.split("/")[-1]
import requests
return requests.get(
f"https://raw.githubusercontent.com/{repo_name}/{ref}/{path}"
).text
df["text_sample"] = df["text_sample"].format(mk.format.CodeFormatterGroup())
df["full_text"] = df.defer(get_full_text).format(mk.format.CodeFormatterGroup().defer())
df["search"] = mk.ArrowScalarColumn(np.zeros(len(df)))
df["embeddings"] = df["embeddings"].format(mk.format.TextFormatterGroup())
@mk.endpoint
def search(df: mk.DataFrame, new_code: str = ""):
"""The endpoint for executing a search query."""
if new_code != "":
features = extract_features_single(new_code, model, tokenizer)
pca_features = pca.apply(features)
df["search"] = np.matmul(df["embeddings"].data, pca_features.T).squeeze()
df.set(df)
editor = mk.gui.Editor(on_run=search.partial(df), title="Search")
# build controls for the scatter plot
NUM_PCA_COMPONENTS = 5
for i in range(NUM_PCA_COMPONENTS):
df[f"pca_{i+1}"] = df["embeddings"][:, i]
options = [f"pca_{i+1}" for i in range(NUM_PCA_COMPONENTS)] + ["search"]
x_select = mk.gui.Select(
options,
value="pca_1",
)
x_control = mk.gui.html.div(
[mk.gui.Text("X Axis"), x_select], classes="grid grid-cols-[auto_1fr] gap-2"
)
y_select = mk.gui.Select(
options,
value="pca_2",
)
y_control = mk.gui.html.div(
[mk.gui.Text("Y Axis"), y_select], classes="grid grid-cols-[auto_1fr] gap-2"
)
color_select = mk.gui.Select(
options,
value="search",
)
color_control = mk.gui.html.div(
[mk.gui.Text("Color"), color_select], classes="grid grid-cols-[auto_1fr] gap-2"
)
select = mk.gui.html.div(
[x_control, y_control, color_control], classes="grid grid-cols-3 gap-8 px-10"
)
scatter = mk.gui.plotly.DynamicScatter(
df=df,
x=x_select.value,
y=y_select.value,
color=color_select.value,
max_points=10_000,
)
gallery = mk.gui.Gallery(
scatter.filtered_df, main_column="text_sample", tag_columns=["language"]
)
page = mk.gui.Page(
component=mk.gui.html.div(
[
mk.gui.html.div(
[editor, select, scatter],
classes="h-screen grid grid-rows-[1fr_auto_3fr] gap-4",
),
gallery,
],
classes="grid grid-cols-2 gap-4 h-screen py-6",
),
id="lemma",
)
page.launch()
| EXA-1-master | exa/libraries/data_prep/RedPajama-Data/viz/main.py |
import os
import sys
import argparse
from os.path import join
from tools import *
import logging
from api import set_api_logger
from chat import ChatBot, Turn, set_chat_logger
import gradio as gr
args: argparse.Namespace = None
bot: ChatBot = None
def summarize_embed_one_turn(bot: ChatBot, dialogue_text, dialogue_text_with_index):
lang2template = {
LANG_EN: 'Below is a conversation between a user and an AI assistant. Please write a summary for each of them in one sentence and list them in separate paragraphs, while trying to preserve the key information of the user’s question and the assistant’s answer as much as possible. \n\nconversation content: \n\n{}\n\nSummary:',
LANG_ZH: '以下是用户和人工智能助手的一段对话,请分别用一句话写出用户摘要、助手摘要,分段列出,要求尽可能保留用户问题和助手回答的关键信息。\n\n对话内容:\n\n{}\n\n摘要:'
}
tmp = choose_language_template(lang2template, dialogue_text)
input_text = tmp.format(dialogue_text)
logger.info(f'turn summarization input_text: \n\n{input_text}')
# 如果原文很短,保留原文即可
summarization = input_text
if get_token_count_davinci(input_text) > 300:
logger.info(f'current turn text token count > 300, summarize !\n\n')
summarization = bot.ask(input_text)
logger.info(f'Summarization is:\n\n{summarization}\n\n')
embedding = bot.vectorize(dialogue_text_with_index)
return summarization, embedding
# todo: 这部分长度可能会超长,需要动态设置一下。
def get_concat_input(user_str, pre_sre, hist_str=None):
templates_no_hist_zh = '以下是用户和人工智能助手的对话,请根据历史对话内容,回答用户当前问题:\n\n上一轮对话:\n\n{}\n\n###\n\n用户:{}\n\n助手:'
templates_no_hist_en = 'The following is a conversation between a user and an AI assistant. Please answer the current question based on the history of the conversation:\n\nPrevious conversation:\n\n{}\n\n###\n\nUser: {}\n\nAssistant:'
lang2template = {
LANG_EN: templates_no_hist_en,
LANG_ZH: templates_no_hist_zh
}
templates_no_hist = choose_language_template(lang2template, user_str)
templates_hist_zh = '以下是用户和人工智能助手的对话,请根据历史对话内容,回答用户当前问题:\n\n相关历史对话:\n\n{}\n\n上一轮对话:\n\n{}\n\n###\n\n用户:{}\n\n助手:'
templates_hist_en = 'The following is a conversation between a user and an AI assistant. Please answer the current question based on the history of the conversation:\n\nRelated conversation history:\n\n{}\n\nPrevious conversation:\n\n{}\n\n###\n\nUser: {}\n\nAssistant:'
lang2template = {
LANG_EN: templates_hist_en,
LANG_ZH: templates_hist_zh
}
templates_hist = choose_language_template(lang2template, user_str)
if hist_str:
input_text = templates_hist.format(hist_str, pre_sre, user_str)
else:
input_text = templates_no_hist.format(pre_sre, user_str)
return input_text
def check_key_file(key_file):
if not os.path.exists(key_file):
print(f'[{key_file}] not found! Please put your apikey in the txt file.')
sys.exit(-1)
def judge_need_history(user_instruction):
templates_zh = '给定一个用户指令,判断执行该指令是否需要历史信息或者上文的信息,或者需要回忆对话内容,只需要回答是(A)或者否(B),不需要解释信息:\n\n指令:{}'
templates_en = 'Given a user command, determine whether executing the command requires historical or previous information, or whether it requires recalling the conversation content. Simply answer yes (A) or no (B) without explaining the information:\n\nCommand:{}'
lang2template = {
LANG_EN: templates_en,
LANG_ZH: templates_zh
}
tmp = choose_language_template(lang2template, user_instruction)
input_text = tmp.format(user_instruction)
is_need = bot.is_history_need(input_text)
logger.info(f'\n--------------\nis_need: {is_need}\n--------------\n')
return is_need
def get_first_prompt(user_text, model_name):
if model_name in ['gpt-3.5-turbo']:
return user_text
else:
templates_zh = '假设你是人工智能助手, 请回答用户的问题和请求:\n\n用户:{}\n\n助手:'
templates_en = 'Assuming you are an AI assistant, please answer the user\'s questions and requests:\n\nUser: {}\n\nAssistant:'
lang2template = {
LANG_EN: templates_en,
LANG_ZH: templates_zh
}
tmp = choose_language_template(lang2template, user_text)
concat_input = tmp.format(user_text)
return concat_input
def my_chatbot(user_input, history):
history = history or []
user_input = user_input.strip()
my_history = list(sum(history, ()))
COMMAND_RETURN = '命令已成功执行!'
if user_input in ['清空', 'reset']:
# history.append((user_input, COMMAND_RETURN))
history = []
bot.clear_history()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['导出', 'export']:
# history.append((user_input, COMMAND_RETURN))
bot.export_history()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['回退', '回滚', 'roll back']:
history.pop()
bot.roll_back()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
# 历史: my_history
# 当前输入: user_input
len_hist = len(bot.history)
cur_turn_index = len_hist + 1
if len_hist == 0:
concat_input = get_first_prompt(user_input, args.model_name)
else:
retrieve = None
is_need = judge_need_history(user_input)
# 并且 需要历史信息才给
if cur_turn_index > 2 and is_need:
retrieve = bot.get_related_turn(user_input, args.similar_top_k)
concat_input = get_concat_input(user_input, bot.get_turn_for_previous(), hist_str=retrieve)
logger.info(f'\n--------------\n[第{cur_turn_index}轮] concat_input:\n\n{concat_input}\n--------------\n')
try:
rsp: str = bot.ask(concat_input)
except Exception as e:
logger.error(f'ERROR: \n\n{e}')
rsp = '喵呜,您的请求好像掉进了喵喵的世界里了~'
history.append((user_input, rsp))
return history, history
system_text = rsp.strip()
cur_text_without_index = '用户:{}\n\n助手:{}'.format(user_input, system_text)
cur_text_with_index = '[第{}轮]\n\n用户:{}\n\n助手:{}'.format(cur_turn_index, user_input, system_text)
if detect_language(user_input) == LANG_EN:
cur_text_without_index = 'User: {}\n\nAssistant: {}'.format(user_input, system_text)
cur_text_with_index = '[Turn {}]\n\nUser: {}\n\nAssistant: {}'.format(cur_turn_index, user_input, system_text)
try:
summ, embedding = summarize_embed_one_turn(bot, cur_text_without_index, cur_text_with_index)
except Exception as e:
logger.error(f'summarize_embed_one_turn ERROR: \n\n{e}')
rsp = '摘要出错,喵呜,您的请求好像掉进了喵喵的世界里了~'
history.append((user_input, rsp))
return history, history
cur_turn = Turn(user_input=user_input, system_response=system_text, user_sys_text=cur_text_with_index, summ=summ, embedding=embedding)
bot.add_turn_history(cur_turn)
my_history.append(user_input)
output = system_text
history.append((user_input, output))
return history, history
if __name__ == '__main__':
parser = argparse.ArgumentParser()
model_choices = ['text-davinci-003', 'gpt-3.5-turbo', 'bloom', 'alpaca', 'llama']
parser.add_argument("--apikey_file", type=str, default="./config/apikey.txt")
parser.add_argument("--model_name", type=str, default="text-davinci-003", choices=model_choices)
parser.add_argument("--target_file", type=str)
parser.add_argument("--logfile", type=str, default="./logs/log.txt")
parser.add_argument("--history_file", type=str)
parser.add_argument("--similar_top_k", type=int, default=4)
args = parser.parse_args()
check_key_file(args.apikey_file)
log_path = args.logfile
makedirs(log_path)
# 配置日志记录
logger = logging.getLogger('dialogue_logger')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('【%(asctime)s - %(levelname)s】 - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
file_handler = logging.FileHandler(log_path, encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
set_chat_logger(logger)
set_api_logger(logger)
logger.info('\n\n\n')
logger.info('#################################')
logger.info('#################################')
logger.info('#################################')
logger.info('\n\n\n')
logger.info(f"args: \n\n{args}\n")
stamp = datetime2str()
# print(stamp)
if args.target_file:
history_file = f'{args.target_file}'
else:
history_file = f'./history/{stamp}.json'
embedding_file = history_file + '.emb.json'
bot = ChatBot(model_name=args.model_name)
if args.history_file:
history_file = args.history_file
embedding_file = history_file + '.emb.json'
bot.load_history(args.history_file)
makedirs(history_file)
makedirs(embedding_file)
# if args.target_file:
# with open(history_file, 'w') as file: pass
# with open(embedding_file, 'w') as file: pass
with gr.Blocks() as demo:
gr.Markdown(f"<h1><center>Long Dialogue Chatbot ({args.model_name})</center></h1>")
chatbot = gr.Chatbot()
state = gr.State()
txt = gr.Textbox(show_label=False, placeholder="Ask me a question and press enter.").style(container=False)
txt.submit(my_chatbot, inputs=[txt, state], outputs=[chatbot, state])
demo.launch(share = True) | EXA-1-master | exa/libraries/SCM4LLMs/dialogue-ui-demo.py |
# coding=utf-8
import os
import sys
import time
import json
import pickle
import string
import tiktoken
from langdetect import detect_langs
LANG_EN = 'English'
LANG_ZH = 'Chinese'
LANG_UN = 'Unknown'
def detect_language(text):
# 获取主要语言列表
langs = detect_langs(text)
# 整理语言列表,将每个语言和其概率整理为一个字典
lang_dict = {}
for lang in langs:
lang_dict[lang.lang] = lang.prob
print(langs)
# 判断主要语言
if 'zh-cn' in lang_dict:
return LANG_ZH
elif 'en' in lang_dict:
return LANG_EN
else:
return LANG_UN
def get_token_count_davinci(text):
tokenizer = tiktoken.encoding_for_model('text-davinci-003')
tokens = tokenizer.encode(text)
return len(tokens)
# 递归新建文件夹
def makedirs(filename):
dir_path = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print('makedirs %s' % dir_path)
# 将数据保存为pickle文件
def save_pickle_file(data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f)
print('saved pkl file ', filename)
# 加载pickle文件
def load_pickle_file(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
# 把content列表保存成文本文件
def save_file(filename, content):
"""
:param filename: 输出文件名
:param content: 句子列表 默认每个元素自带换行啊
:return:
"""
with open(filename, 'w', encoding='utf-8') as f:
f.writelines(content)
print('save file %s successful!' % filename)
def save_jsonl_file(filename, data, indent=None):
"""
:param filename: 输出文件名
:param data: 数据对象,List[json]
:param indent: 缩进
:return: json line format file
"""
with open(filename, 'w', encoding='utf-8') as fp:
for js in data:
if indent:
js_str = json.dumps(js, indent=indent, ensure_ascii=False)
else:
js_str = json.dumps(js, ensure_ascii=False)
fp.write(js_str + '\n')
print('save file %s successful!' % filename)
def save_json_file(filename, data):
"""
:param filename: 输出文件名
:param data: 数据对象,json/list
:return:
"""
with open(filename, 'w', encoding='utf-8') as fp:
json.dump(data, fp, indent=2, ensure_ascii=False)
print('save file %s successful!' % filename)
def load_json_file(filename):
"""
:param filename: 文件名
:return: 数据对象,json/list
"""
with open(filename, encoding='utf-8') as fp:
data = json.load(fp)
return data
def load_jsonl_file(path):
lines = get_lines(path)
data = [json.loads(x) for x in lines]
return data
# 给定文件名,和待pickle的对象,用新文件将其覆盖
def overwrite_pkl_file(filename, data):
tmp_filename = filename + '.swp'
save_pickle_file(data, tmp_filename)
if os.path.exists(filename):
os.rename(filename, filename + '.old.' + datetime2str())
os.rename(tmp_filename, filename)
print('overwrite %s successful!' % filename)
# 给定文件名,和待保存的字符串列表,用新文件将其覆盖
def overwrite_txt_file(filename, data):
tmp_filename = filename + '.swp'
save_file(tmp_filename, data)
if os.path.exists(filename):
os.rename(filename, filename + '.old.' + datetime2str())
os.rename(tmp_filename, filename)
print('overwrite %s successful!' % filename)
# 读取文件的每一行, 返回列表
def get_lines(filename):
with open(filename, encoding='utf-8') as f:
data = [i.strip() for i in f.readlines() if i.strip() != '']
return data
def get_files(root, suffix):
"""
获取指定目录下的所有指定后缀的文件
:param root: 指定目录 str 类型 如:'.'
:param suffix: 指定后缀 str 类型 如:'.txt'
:return: 文件列表
"""
import os
import glob
if not os.path.exists(root):
raise FileNotFoundError(f'path {root} not found.')
res = glob.glob(f'{root}/**/*{suffix}', recursive=True)
res = [os.path.abspath(p) for p in res]
return res
# 判断一个词语是不是纯中文词语,即不包含汉字以外的其他符号
def is_chinese_word(word):
for c in word:
if not ('\u4e00' <= c <= '\u9fa5'):
# print(word)
return False
return True
# 判断一个字符是不是中文字符
def is_chinese_char(c):
if len(c.strip()) == 1 and '\u4e00' <= c <= '\u9fa5':
return True
return False
# 判断一个单词是否只含有大小写字母
def is_letters(word):
for c in word:
if (c >= '\u0041' and c <= '\u005a') or (c >= '\u0061' and c <= '\u007a'):
continue
return False
return True
# 判断一个单词是否只含有大写字母
def is_upper_letters(word):
for c in word:
if c >= '\u0041' and c <= '\u005a':
continue
return False
return True
def is_upper_letters(word):
for c in word:
if c not in string.ascii_uppercase:
return False
return True
# 判断一个短语/单词,是不是纯英文短语/单词,即只含有26个大小写字母和空格
def is_pure_english_phrase(word):
for c in word:
if (c >= '\u0041' and c <= '\u005a') or (c >= '\u0061' and c <= '\u007a') or c == ' ':
continue
return False
return True
def datetime2str():
from datetime import datetime
return datetime.now().strftime('%Y%m%d-%H%M%S')
# 计算从start到现在花费的时间
def time_cost(start):
cost = int(time.time() - start)
h = cost // 3600
m = (cost % 3600) // 60
print('')
print('【%s】到目前总耗时: cost %s hours %s mins' % (datetime2str(), h, m))
# 将 content_list 追加到filename中
def append_file(filename, content_list, new_line=False):
if not content_list:
return
if new_line:
content_list = [text if text.endswith('\n') else text+'\n' for text in content_list]
with open(filename, 'a+', encoding='utf-8') as f:
f.writelines(content_list)
timestamp = datetime2str()
print('[%s]: append_file %s successful!' % (timestamp, filename))
def get_multi_line_input():
while True:
try:
print(f"\nuser(press q to exit): \n")
text=sys.stdin.read()
text = text.strip()
if text == '':
print('Empty input. Try again.\n\n')
continue
else:
return text
except KeyboardInterrupt:
print(f'\nTry again: \n\n')
except Exception as e:
print(f'Exception: {e}.\n\nTry again: \n\n')
def choose_language_template(lang2template: dict, text: str):
lang = detect_language(text)
if lang not in lang2template:
lang = LANG_EN
return lang2template[lang]
if __name__ == '__main__':
print(is_upper_letters('ADFFF') is True)
print(is_upper_letters('Agfdg') is False)
pass
| EXA-1-master | exa/libraries/SCM4LLMs/tools.py |
import os
import openai
import torch
import time
from tools import get_lines, time_cost, append_file
from transformers import AutoTokenizer, AutoModelForCausalLM
BLOOM_MODEL = None
BLOOM_TOKENIZER = None
LOCAL_API_LOGGER = None
def set_api_logger(one):
global LOCAL_API_LOGGER
LOCAL_API_LOGGER = one
class KeyManager(object):
index_save_file = '.key.index'
def __init__(self, filename) -> None:
self.keys = get_lines(filename)
self.key_index = 0
if os.path.exists(self.index_save_file):
index = int(get_lines(self.index_save_file)[-1])
index += 1
index %= len(self.keys)
self.key_index = index
def get_api_key(self):
self.key_index += 1
self.key_index %= len(self.keys)
append_file(self.index_save_file, [str(self.key_index)+'\n'])
cur_key = self.keys[self.key_index]
print(f'\n-----------------\nkey: {cur_key}\nindex:{self.key_index}\n-----------------\n')
return cur_key
def get_initialized_hf_model(path):
model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(path)
return model, tokenizer
KEY_MANAGER = KeyManager('config/apikey.txt')
def call_embedding_openai(text):
openai.api_key = KEY_MANAGER.get_api_key()
response = openai.Embedding.create(
model="text-embedding-ada-002",
input=text
)
embedding = response['data'][0]['embedding']
return embedding
def call_embedding_bloom(text):
global BLOOM_MODEL
global BLOOM_TOKENIZER
checkpoint_path = '/your/checkpoint/path'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not BLOOM_MODEL:
t0 = time.time()
LOCAL_API_LOGGER.info('Loading bloom model ... Please wait a few minutes ...')
BLOOM_MODEL, BLOOM_TOKENIZER = get_initialized_hf_model(checkpoint_path)
BLOOM_MODEL.to(device)
LOCAL_API_LOGGER.info('Model Loaded Success !!!')
time_cost(t0)
model, tokenizer = BLOOM_MODEL, BLOOM_TOKENIZER
input_ids = tokenizer.encode(text, return_tensors='pt')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_ids = input_ids.to(device)
with torch.no_grad():
model_output = model(input_ids, output_hidden_states=True, return_dict=True)
# 获取嵌入向量
last_hidden_state = model_output.hidden_states
LOCAL_API_LOGGER.info(f'len last_hidden_state: {len(last_hidden_state)}')
# 获取最后一个 token 的嵌入
last_indx = input_ids.size()[1] - 1
if last_indx == 0:
last_token_embedding = last_hidden_state[-1].squeeze()
else:
last_token_embedding = last_hidden_state[-1].squeeze()[last_indx].squeeze()
LOCAL_API_LOGGER.info(f'last_token_embedding len: {len(last_token_embedding)}')
# print(f'last_token_embedding[:4] : {last_token_embedding[:3]}')
last_token_embedding = last_token_embedding.tolist()
return last_token_embedding
def call_text_davinci_003(prompt):
api_model_index = 'text-davinci-003'
openai.api_key = KEY_MANAGER.get_api_key()
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=1024,
stop=["\n\n\n", "###"],
)
LOCAL_API_LOGGER.info(f"[{api_model_index} request cost token]: {response['usage']['total_tokens']}")
LOCAL_API_LOGGER.info(f"[{api_model_index} available tokens]: {4000 - response['usage']['total_tokens']}")
text = response['choices'][0]['text'].strip()
return text
def call_gpt3_5_turbo(prompt):
api_model_index = 'gpt-3.5-turbo'
openai.api_key = KEY_MANAGER.get_api_key()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.1,
stop=["###"]
)
LOCAL_API_LOGGER.info(f"[{api_model_index} request cost token]: {response['usage']['total_tokens']}")
LOCAL_API_LOGGER.info(f"[{api_model_index} available tokens]: {4000 - response['usage']['total_tokens']}")
text = response['choices'][0]['message']['content'].strip()
return text
def call_bloom(prompt):
print(f'call_bloom : \n\nprompt \n\n{prompt}')
global BLOOM_MODEL
global BLOOM_TOKENIZER
checkpoint_path = '/mnt/bn/slp-llm/sft_lxn/bloom-alpaca/bloomz-alpaca-chat+data0407-allin-bz1k_epoch2_lr3e-6_global_step11364_hf'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not BLOOM_MODEL:
t0 = time.time()
LOCAL_API_LOGGER.info('Loading bloom model ... Please wait a few minutes ...')
BLOOM_MODEL, BLOOM_TOKENIZER = get_initialized_hf_model(checkpoint_path)
BLOOM_MODEL.to(device)
LOCAL_API_LOGGER.info('Model Loaded Success !!!')
time_cost(t0)
model, tokenizer = BLOOM_MODEL, BLOOM_TOKENIZER
model.eval()
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
input_ids = input_ids.to(device)
LOCAL_API_LOGGER.info('generating ...')
max_new_tokens = min(512, 2000 - len(input_ids))
LOCAL_API_LOGGER.info(f'len input_ids = {len(input_ids[0])}')
LOCAL_API_LOGGER.info(f'max_new_tokens: {max_new_tokens}')
outputs = model.generate(input_ids, max_new_tokens=max_new_tokens, do_sample = True, top_k = 30, top_p = 0.85, temperature = 0.5, repetition_penalty=1., eos_token_id=2, bos_token_id=1, pad_token_id=0)
rets = tokenizer.batch_decode(outputs, skip_special_tokens=True)
LOCAL_API_LOGGER.info('generating done!')
text = rets[0].strip().replace(prompt, "")
return text
def call_alpaca():
pass
def call_llama():
pass
MODEL_MAP = {
'text-davinci-003': call_text_davinci_003,
'gpt-3.5-turbo': call_gpt3_5_turbo,
'bloom': call_bloom,
'alpaca': call_alpaca,
'llama': call_llama
}
MODEL_EMBEDDING_MAP = {
'text-embedding-ada-002': call_embedding_openai,
'text-davinci-003': call_embedding_openai,
'gpt-3.5-turbo': call_embedding_openai,
'bloom': call_embedding_bloom,
'alpaca': call_alpaca,
'llama': call_llama
}
MODEL_LIST = [k for k in MODEL_MAP.keys()]
| EXA-1-master | exa/libraries/SCM4LLMs/api.py |
import torch
import torch.nn.functional as F
import numpy as np
from api import *
import tiktoken
from tools import *
import json
from transformers import GPT2Tokenizer
from tools import load_jsonl_file, datetime2str, save_json_file, save_file
LOCAL_CHAT_LOGGER = None
def set_chat_logger(one):
global LOCAL_CHAT_LOGGER
LOCAL_CHAT_LOGGER = one
def get_tokenizer_func(model_name):
# todo: add bloom, alpaca, llama tokenizer
if model_name in ['gpt-3.5-turbo', 'text-davinci-003']:
tokenizer = tiktoken.encoding_for_model(model_name)
return tokenizer.encode
else:
# default: gpt2 tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
return tokenizer.tokenize
class SummaryTurn(object):
def __init__(self, paragraph, summary, embedding):
self.paragraph = paragraph
self.summary = summary
self.embedding = embedding
self.content_tokens_length = 0
self.summary_tokens_length = 0
def to_json(self):
js = {
'paragraph': self.paragraph,
'summary': self.summary
}
return js
def to_json_str(self):
js = self.to_json()
js_str = json.dumps(js, ensure_ascii=False) + '\n'
return js_str
def to_plain_text(self):
text = f'[paragraph]:\n{self.paragraph}\n\n'
text += f'[summary]:\n{self.summary}\n'
text += ('-' * 30 + '\n\n')
return text
class SummaryBot(object):
def __init__(self, model_name) -> None:
assert model_name in MODEL_LIST, f'model name "{model_name}" is not in {MODEL_LIST}'
self.model_name = model_name
self.api_func = MODEL_MAP[model_name]
self.turbo_func = MODEL_MAP['gpt-3.5-turbo']
self.embedding_func = MODEL_EMBEDDING_MAP[model_name]
self.tokenize_func = get_tokenizer_func(self.model_name)
self.history: list[SummaryTurn] = []
self.final_summary = ''
def clear_history(self):
self.history = []
def roll_back(self):
self.history.pop()
def export_history(self):
hist_lst = [one.to_json() for one in self.history]
hist_lst.append({'final summary': self.final_summary})
hist_txt_lst = [one.to_plain_text() for one in self.history]
hist_txt_lst.append(f"final summary: \n\n{self.final_summary}\n\n")
stamp = datetime2str()
json_filename = f'history/summary-{hist_lst[0]["summary"][:10]}-{self.model_name}-{stamp}.json'
txt_filename = f'history/summary-{hist_lst[0]["summary"][:10]}-{self.model_name}-{stamp}.txt'
save_json_file(json_filename, hist_lst)
save_file(txt_filename, hist_txt_lst)
def ask(self, prompt) -> str:
output = self.api_func(prompt)
return output
def is_history_need(self, prompt) -> str:
output = self.turbo_func(prompt)
LOCAL_CHAT_LOGGER.info(f'\n--------------\nprompt: \n{prompt}\n\n')
LOCAL_CHAT_LOGGER.info(f'output: {output}\n--------------\n')
if ('B' in output) or ('否' in output):
return False
return True
def vectorize(self, text) -> list:
output = self.embedding_func(text)
return output
def _summarize_paragraphs(self, paragraphs):
output = ''
templates_zh = '给定一个文档的各个段落摘要,请写出该文档的最终摘要,要求:(1) 用一段文字提取各段摘要里的关键信息,去除重复信息,组织成逻辑通顺的文本; (2) 字数不超过1500字; (3) 摘要内容使用中文。\n\n各段落摘要:\n\n{}\n\n文档摘要:'
templates_en = 'Given the summaries of each paragraph of a document, please write the final summary of the document, with the following requirements: (1) extract the key information from each paragraph summary into a single paragraph, removing duplicate information and organizing it into a logically coherent text; (2) the word count does not exceed 1500 words; (3) the summary content is in English.\n\nSummaries of each paragraph:\n\n{}\n\nDocument Summarization:'
paragraphs_text = '\n\n'.join(paragraphs).strip()
lang2template = {
LANG_EN: templates_en,
LANG_ZH: templates_zh
}
tmp = choose_language_template(lang2template, paragraphs[0])
input_text = tmp.format(paragraphs_text)
LOCAL_CHAT_LOGGER.info(f"input_text:\n\n{input_text}")
output = self.ask(input_text)
LOCAL_CHAT_LOGGER.info(f"output:\n\n{output}")
return output
def _divide_conquer_summary(self, content_lst):
tgt_summary = ''
summary_token_length_lst = [len(self.tokenize_func(txt)) for txt in content_lst]
LOCAL_CHAT_LOGGER.info(f"summary_token_length_lst:\n\n{summary_token_length_lst}")
total_tokens = sum(summary_token_length_lst)
def split_array(arr):
mid = len(arr) // 2
return arr[:mid], arr[mid:]
if total_tokens < 2500:
tgt_summary = self._summarize_paragraphs(content_lst)
else:
left, right = split_array(content_lst)
left_summary = self._divide_conquer_summary(left)
right_summary = self._divide_conquer_summary(right)
tgt_summary = self._divide_conquer_summary([left_summary, right_summary])
LOCAL_CHAT_LOGGER.info(f"tgt_summary:\n\n{tgt_summary}")
return tgt_summary
def get_final_summary(self):
sub_summary_lst = [item.summary for item in self.history]
final_summary = self._divide_conquer_summary(sub_summary_lst)
self.final_summary = final_summary
return final_summary
def add_turn_history(self, turn: SummaryTurn):
turn.content_tokens_length = len(self.tokenize_func(turn.paragraph))
turn.summary_tokens_length = len(self.tokenize_func(turn.summary))
self.history.append(turn)
def get_turn_for_previous(self):
turn = self.history[-1]
if turn.content_tokens_length < 500:
return turn.paragraph
else:
return turn.summary
# todo: 检索这块需要优化,不一定非得topk,最多topk,没有也可以不加
def get_related_turn(self, query, k=3):
q_embedding = self.vectorize(query)
# 只检索 [0, 上一轮) 上一轮的文本直接拼接进入对话,无需检索
sim_lst = [
self._similarity(q_embedding, v.embedding)
for v in self.history[:-1]
]
# convert to numpy array
arr = np.array(sim_lst)
# get indices and values of the top k maximum values
topk_indices = arr.argsort()[-k:]
topk_values = arr[topk_indices]
index_value_lst = [(idx, v) for idx, v in zip(topk_indices, topk_values)]
# print(index_value_lst)
sorted_index_value_lst = sorted(index_value_lst, key=lambda x: x[0])
LOCAL_CHAT_LOGGER.info(f'\n--------------\n')
LOCAL_CHAT_LOGGER.info(f"\nTop{k}相似历史索引及其相似度: \n\n{sorted_index_value_lst}\n")
LOCAL_CHAT_LOGGER.info(f'\n--------------\n')
retrieve_history_text = ''
for idx, sim_score in sorted_index_value_lst:
turn: SummaryTurn = self.history[idx]
# 判断一下长度
cur = turn.paragraph.strip()
use_summary = False
if turn.content_tokens_length > 300:
use_summary = True
cur = turn.summary.strip()
LOCAL_CHAT_LOGGER.info(f'\n@@@@@@@@@@@@@@@@@@')
LOCAL_CHAT_LOGGER.info(f'检索到的历史轮[使用摘要?{use_summary}]:{cur.strip()}')
LOCAL_CHAT_LOGGER.info(f'相似度:{sim_score}')
retrieve_history_text += f'{cur}\n\n'
return retrieve_history_text.strip()
def _similarity(self, v1, v2):
vec1 = torch.FloatTensor(v1)
vec2 = torch.FloatTensor(v2)
cos_sim = F.cosine_similarity(vec1, vec2, dim=0)
return cos_sim | EXA-1-master | exa/libraries/SCM4LLMs/summary.py |
import torch
import torch.nn.functional as F
import numpy as np
from api import *
import tiktoken
import json
from transformers import GPT2Tokenizer
from tools import load_jsonl_file, datetime2str, save_json_file, save_file
LOCAL_CHAT_LOGGER = None
def set_chat_logger(one):
global LOCAL_CHAT_LOGGER
LOCAL_CHAT_LOGGER = one
def get_tokenizer_func(model_name):
# todo: add bloom, alpaca, llama tokenizer
if model_name in ['gpt-3.5-turbo', 'text-davinci-003']:
tokenizer = tiktoken.encoding_for_model(model_name)
return tokenizer.encode
else:
# default: gpt2 tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
return tokenizer.tokenize
class Turn(object):
def __init__(self, user_input, system_response, user_sys_text, summ, embedding):
self.user_input = user_input
self.system_response = system_response
self.user_sys_text = user_sys_text
self.summ = summ
self.embedding = embedding
self.content_tokens_length = 0
self.summary_tokens_length = 0
def to_json(self):
js = {
'user': self.user_input,
'system': self.system_response
# 'summary': self.summ
}
return js
def to_json_str(self):
js = self.to_json()
js_str = json.dumps(js, ensure_ascii=False) + '\n'
return js_str
def to_plain_text(self):
text = f'[User]:\n{self.user_input}\n\n'
text += f'[System]:\n{self.system_response}\n'
text += ('-' * 30 + '\n\n')
return text
class ChatBot(object):
def __init__(self, model_name) -> None:
assert model_name in MODEL_LIST, f'model name "{model_name}" is not in {MODEL_LIST}'
self.model_name = model_name
self.api_func = MODEL_MAP[model_name]
self.turbo_func = MODEL_MAP['gpt-3.5-turbo']
self.embedding_func = MODEL_EMBEDDING_MAP[model_name]
self.tokenize_func = get_tokenizer_func(self.model_name)
self.history: list[Turn] = []
def clear_history(self):
self.history = []
def roll_back(self):
self.history.pop()
def export_history(self):
hist_lst = [one.to_json() for one in self.history]
hist_txt_lst = [one.to_plain_text() for one in self.history]
stamp = datetime2str()
json_filename = f'history/{hist_lst[0]["user"][:10]}-{self.model_name}-{stamp}.json'
txt_filename = f'history/{hist_lst[0]["user"][:10]}-{self.model_name}-{stamp}.txt'
save_json_file(json_filename, hist_lst)
save_file(txt_filename, hist_txt_lst)
# def load_history(self, hist_file):
# diag_hist = load_jsonl_file(hist_file)
# emb_hist = load_jsonl_file(hist_file + '.emb.json')
# for dig, e in zip(diag_hist, emb_hist):
# js = {}
# js['text'] = dig['text']
# js['summ'] = dig['summ']
# js['embedding'] = e
# one = Turn(**js)
# self.history.append(one)
# self.show_history()
# def show_history(self):
# print('\n\n-------------【history】-------------\n\n')
# for i, turn in enumerate(self.history):
# print(f'{turn.text.strip()}\n\n')
# # print(f'对话摘要: \n{turn.summ}\n')
def ask(self, prompt) -> str:
output = self.api_func(prompt)
return output
def is_history_need(self, prompt) -> str:
output = self.turbo_func(prompt)
LOCAL_CHAT_LOGGER.info(f'\n--------------\nprompt: \n{prompt}\n\n')
LOCAL_CHAT_LOGGER.info(f'output: {output}\n--------------\n')
if ('B' in output) or ('否' in output):
return False
return True
def vectorize(self, text) -> list:
output = self.embedding_func(text)
return output
def add_turn_history(self, turn: Turn):
turn.content_tokens_length = len(self.tokenize_func(turn.user_sys_text))
turn.summary_tokens_length = len(self.tokenize_func(turn.summ))
self.history.append(turn)
def get_turn_for_previous(self):
turn = self.history[-1]
if turn.content_tokens_length < 500:
return turn.user_sys_text
else:
return turn.summ
def _is_concat_history_too_long(self, index_list):
turn_length_lst = [self.history[idx].content_tokens_length for idx in index_list]
total_tokens = sum(turn_length_lst)
if total_tokens > 1500:
return True
else:
return False
# todo: 检索这块需要优化,不一定非得topk,最多topk,没有也可以不加
def get_related_turn(self, query, k=3):
q_embedding = self.vectorize(query)
# 只检索 [0, 上一轮) 上一轮的文本直接拼接进入对话,无需检索
sim_lst = [
self._similarity(q_embedding, v.embedding)
for v in self.history[:-1]
]
# convert to numpy array
arr = np.array(sim_lst)
# get indices and values of the top k maximum values
topk_indices = arr.argsort()[-k:]
topk_values = arr[topk_indices]
# print the results
# print(f"Top {k} indices: ", topk_indices)
# print(f"Top {k} values: ", topk_values)
index_value_lst = [(idx, v) for idx, v in zip(topk_indices, topk_values)]
# print(index_value_lst)
sorted_index_value_lst = sorted(index_value_lst, key=lambda x: x[0])
LOCAL_CHAT_LOGGER.info(f'\n--------------\n')
LOCAL_CHAT_LOGGER.info(f"\nTop{k}相似历史索引及其相似度: \n\n{sorted_index_value_lst}\n")
LOCAL_CHAT_LOGGER.info(f'\n--------------\n')
shorten_history = self._is_concat_history_too_long(topk_indices)
retrieve_history_text = ''
for idx, sim_score in sorted_index_value_lst:
turn: Turn = self.history[idx]
# 判断一下长度
cur = turn.user_sys_text.strip()
use_summary = False
if turn.content_tokens_length > 200 and shorten_history:
use_summary = True
cur = turn.summ.strip()
LOCAL_CHAT_LOGGER.info(f'\n@@@@@@@@@@@@@@@@@@')
LOCAL_CHAT_LOGGER.info(f'检索到的历史轮[使用摘要?{use_summary}]:{cur.strip()}')
LOCAL_CHAT_LOGGER.info(f'相似度:{sim_score}')
retrieve_history_text += f'{cur}\n\n'
return retrieve_history_text
def _similarity(self, v1, v2):
vec1 = torch.FloatTensor(v1)
vec2 = torch.FloatTensor(v2)
cos_sim = F.cosine_similarity(vec1, vec2, dim=0)
return cos_sim | EXA-1-master | exa/libraries/SCM4LLMs/chat.py |
import os
import sys
import argparse
from os.path import join
from tools import *
import logging
from api import set_api_logger
from summary import SummaryBot, SummaryTurn, set_chat_logger
import gradio as gr
args: argparse.Namespace = None
bot: SummaryBot = None
# todo: 这部分长度可能会超长,需要动态设置一下。
def get_concat_input(user_str, pre_sre, hist_str=None):
templates_no_hist_zh = '给定当前文本和上文内容,请写出当前文本的摘要,要求:1)将上文内容作为当前文本的背景信息; 2)对当前文本进行压缩; 3) 输出内容使用中文:\n\n上文内容:{}\n\n当前文本:{}\n\n摘要:'
templates_no_hist_en = 'Given the current text and the previous text, please provide a summary of the current text. The requirements are: 1) use the previous text as background information for the current text; 2) compress the current text; 3) output the summary in English.\n\nPrevious text: {}\n\nCurrent text: {}\n\nSummary:'
lang2template = {
LANG_EN: templates_no_hist_en,
LANG_ZH: templates_no_hist_zh
}
templates_no_hist = choose_language_template(lang2template, user_str)
templates_hist_zh = '给定当前文本和上文内容,请写出当前文本的摘要,要求:1)将上文内容作为当前文本的背景信息; 2)对当前文本进行压缩; 3) 输出内容使用中文:\n\n上文内容:{}\n\n{}\n\n当前文本:{}\n\n摘要:'
templates_hist_en = 'Given the current text and the previous text, please provide a summary of the current text. The requirements are: 1) use the previous text as background information for the current text; 2) compress the current text; 3) output the summary in English.\n\nPrevious text: {}\n\n{}\n\nCurrent text: {}\n\nSummary:'
lang2template = {
LANG_EN: templates_hist_en,
LANG_ZH: templates_hist_zh
}
templates_hist = choose_language_template(lang2template, user_str)
if hist_str:
input_text = templates_hist.format(hist_str, pre_sre, user_str)
else:
input_text = templates_no_hist.format(pre_sre, user_str)
return input_text
def check_key_file(key_file):
if not os.path.exists(key_file):
print(f'[{key_file}] not found! Please put your apikey in the txt file.')
sys.exit(-1)
def judge_need_history(user_instruction):
templates_zh = '给定一段文本内容,判断对该文本进行摘要是否需要历史信息或者上文的信息,要求:(1) 回答是(A)或者否(B),(2) 如果回答是(A),请说明需要补充哪些信息:\n\n文本内容:{}\n\n答案:'
templates_en = 'Given a piece of text, determine whether historical or previous information is needed for summarization. Requirements: (1) Answer with Yes(A) or No(B), (2) If the answer is Yes(A), please explain what information needs to be supplemented:\n\nText Content: {}\n\nAnswer:'
lang2template = {
LANG_EN: templates_en,
LANG_ZH: templates_zh
}
tmp = choose_language_template(lang2template, user_instruction)
input_text = tmp.format(user_instruction)
is_need = bot.is_history_need(input_text)
logger.info(f"\n--------------\n[is_need]: {'需要历史' if is_need else '不需要'}\n--------------\n")
return is_need
def get_first_prompt(user_text, model_name):
# todo: model specific prompt design, use [model_name]
templates_zh = '以下文本内容是长文档的一部分,请写出文本摘要:\n\n文本内容:{}\n\n摘要:'
templates_en = 'This is a part of a lengthy document, please write a summary:\n\nDocument content: {}\n\nSummary:'
lang2template = {
LANG_EN: templates_en,
LANG_ZH: templates_zh
}
tmp = choose_language_template(lang2template, user_text)
concat_input = tmp.format(user_text)
return concat_input
def my_chatbot(user_input, history):
history = history or []
user_input = user_input.strip()
COMMAND_RETURN = '命令已成功执行!'
if user_input in ['清空', 'reset']:
# history.append((user_input, COMMAND_RETURN))
history = []
bot.clear_history()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['导出', 'export']:
# history.append((user_input, COMMAND_RETURN))
bot.export_history()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['回退', '回滚', 'roll back']:
history.pop()
bot.roll_back()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['final summary', '最终摘要']:
final_summary = bot.get_final_summary()
history.append((user_input, final_summary))
return history, history
len_hist = len(bot.history)
cur_turn_index = len_hist + 1
if len_hist == 0:
concat_input = get_first_prompt(user_input, args.model_name)
else:
retrieve = None
is_need = judge_need_history(user_input)
# 并且 需要历史信息才给
if cur_turn_index > 2 and is_need:
retrieve = bot.get_related_turn(user_input, args.similar_top_k)
concat_input = get_concat_input(user_input, bot.get_turn_for_previous(), hist_str=retrieve)
logger.info(f'\n--------------\n[第{cur_turn_index}轮] concat_input:\n\n{concat_input}\n--------------\n')
try:
rsp: str = bot.ask(concat_input)
except Exception as e:
logger.error(f'ERROR: \n\n{e}')
rsp = '喵呜,您的请求好像掉进了喵喵的世界里了~'
history.append((user_input, rsp))
return history, history
summary = rsp.strip()
try:
embedding = bot.vectorize(summary)
except Exception as e:
logger.error(f'bot.vectorize ERROR: \n\n{e}')
rsp = '摘要出错,喵呜,您的请求好像掉进了喵喵的世界里了~'
history.append((user_input, rsp))
return history, history
cur_turn = SummaryTurn(paragraph=user_input, summary=summary, embedding=embedding)
bot.add_turn_history(cur_turn)
history.append((user_input, f"[summary]: {summary}"))
return history, history
if __name__ == '__main__':
parser = argparse.ArgumentParser()
model_choices = ['text-davinci-003', 'gpt-3.5-turbo', 'bloom', 'alpaca', 'llama']
parser.add_argument("--apikey_file", type=str, default="./config/apikey.txt")
parser.add_argument("--model_name", type=str, default="text-davinci-003", choices=model_choices)
parser.add_argument("--target_file", type=str)
parser.add_argument("--logfile", type=str, default="./logs/summary.log.txt")
parser.add_argument("--history_file", type=str)
parser.add_argument("--similar_top_k", type=int, default=4)
args = parser.parse_args()
check_key_file(args.apikey_file)
log_path = args.logfile
makedirs(log_path)
# 配置日志记录
logger = logging.getLogger('summary_logger')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('【%(asctime)s - %(levelname)s】 - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
file_handler = logging.FileHandler(log_path, encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
set_chat_logger(logger)
set_api_logger(logger)
logger.info('\n\n\n')
logger.info('#################################')
logger.info('#################################')
logger.info('#################################')
logger.info('\n\n\n')
logger.info(f"args: \n\n{args}\n")
stamp = datetime2str()
# print(stamp)
if args.target_file:
history_file = f'{args.target_file}'
else:
history_file = f'./history/{stamp}.json'
embedding_file = history_file + '.emb.json'
bot = SummaryBot(model_name=args.model_name)
# if args.history_file:
# history_file = args.history_file
# embedding_file = history_file + '.emb.json'
# bot.load_history(args.history_file)
# makedirs(history_file)
# makedirs(embedding_file)
# if args.target_file:
# with open(history_file, 'w') as file: pass
# with open(embedding_file, 'w') as file: pass
with gr.Blocks() as demo:
gr.Markdown(f"<h1><center>Long Summary Chatbot ({args.model_name})</center></h1>")
chatbot = gr.Chatbot()
state = gr.State()
txt = gr.Textbox(show_label=False, placeholder="Paste me with a paragraph and press enter.").style(container=False)
txt.submit(my_chatbot, inputs=[txt, state], outputs=[chatbot, state])
demo.launch(share = True) | EXA-1-master | exa/libraries/SCM4LLMs/summary-ui-demo.py |
"""MosaicML LLM Foundry package setup."""
import os
import re
from setuptools import setup
_PACKAGE_NAME = 'llm-foundry'
_PACKAGE_DIR = 'llmfoundry'
_REPO_REAL_PATH = os.path.dirname(os.path.realpath(__file__))
_PACKAGE_REAL_PATH = os.path.join(_REPO_REAL_PATH, _PACKAGE_DIR)
# Read the repo version
# We can't use `.__version__` from the library since it's not installed yet
with open(os.path.join(_PACKAGE_REAL_PATH, '__init__.py')) as f:
content = f.read()
# regex: '__version__', whitespace?, '=', whitespace, quote, version, quote
# we put parens around the version so that it becomes elem 1 of the match
expr = re.compile(r"""^__version__\W+=\W+['"]([0-9\.]*)['"]""", re.MULTILINE)
repo_version = expr.findall(content)[0]
# Use repo README for PyPi description
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
# Hide the content between <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN --> and
# <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END --> tags in the README
while True:
start_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN -->'
end_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END -->'
start = long_description.find(start_tag)
end = long_description.find(end_tag)
if start == -1:
assert end == -1, 'there should be a balanced number of start and ends'
break
else:
assert end != -1, 'there should be a balanced number of start and ends'
long_description = long_description[:start] + long_description[
end + len(end_tag):]
classifiers = [
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
]
install_requires = [
'mosaicml[nlp,streaming,wandb]>=0.14.0,<0.15',
'torch==1.13.1',
'datasets==2.10.1',
'sentencepiece==0.1.97',
'einops==0.5.0',
'omegaconf>=2.2.3,<3',
'pynvml<12',
'slack-sdk<4',
'mosaicml-cli>=0.3,<1',
'onnx==1.13.1',
'onnxruntime==1.14.1',
]
extra_deps = {}
extra_deps['dev'] = [
'pre-commit>=2.18.1,<3',
'pytest>=7.2.1,<8',
'pytest_codeblocks>=0.16.1,<0.17',
'pytest-cov>=4,<5',
'pyright==1.1.296',
'toml>=0.10.2,<0.11',
'packaging>=21,<23',
]
extra_deps['gpu'] = [
'flash-attn==v1.0.3.post0',
'triton==2.0.0.dev20221202',
'xentropy-cuda-lib@git+https://github.com/HazyResearch/[email protected]#subdirectory=csrc/xentropy',
]
extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
setup(
name=_PACKAGE_NAME,
version=repo_version,
author='MosaicML',
author_email='[email protected]',
description='LLM Foundry',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mosaicml/llm-foundry/',
package_dir={_PACKAGE_DIR: _PACKAGE_REAL_PATH},
classifiers=classifiers,
install_requires=install_requires,
extras_require=extra_deps,
python_requires='>=3.7',
)
| EXA-1-master | exa/libraries/llm-foundry/setup.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
try:
import torch
from llmfoundry import optim, utils
from llmfoundry.data import (ConcatTokensDataset,
MixtureOfDenoisersCollator, NoConcatDataset,
Seq2SeqFinetuningCollator,
build_finetuning_dataloader,
build_text_denoising_dataloader)
from llmfoundry.models.hf import (ComposerHFCausalLM, ComposerHFPrefixLM,
ComposerHFT5)
from llmfoundry.models.layers.attention import (
MultiheadAttention, attn_bias_shape, build_alibi_bias, build_attn_bias,
flash_attn_fn, scaled_multihead_dot_product_attention,
triton_flash_attn_fn)
from llmfoundry.models.layers.blocks import MPTMLP, MPTBlock
from llmfoundry.models.model_registry import COMPOSER_MODEL_REGISTRY
from llmfoundry.models.mpt import (ComposerMPTCausalLM, MPTConfig,
MPTForCausalLM, MPTModel,
MPTPreTrainedModel)
except ImportError as e:
try:
is_cuda_available = torch.cuda.is_available() # type: ignore
except:
is_cuda_available = False
extras = '.[gpu]' if is_cuda_available else '.'
raise ImportError(
f'Please make sure to pip install {extras} to get the requirements for the LLM example.'
) from e
__all__ = [
'build_text_denoising_dataloader',
'build_finetuning_dataloader',
'MixtureOfDenoisersCollator',
'Seq2SeqFinetuningCollator',
'MPTMLP',
'MPTBlock',
'MPTConfig',
'MPTPreTrainedModel',
'MPTModel',
'MPTForCausalLM',
'ComposerMPTCausalLM',
'ComposerHFCausalLM',
'ComposerHFPrefixLM',
'ComposerHFT5',
'COMPOSER_MODEL_REGISTRY',
'scaled_multihead_dot_product_attention',
'flash_attn_fn',
'triton_flash_attn_fn',
'MultiheadAttention',
'NoConcatDataset',
'ConcatTokensDataset',
'attn_bias_shape',
'build_attn_bias',
'build_alibi_bias',
'optim',
'utils',
]
__version__ = '0.0.4'
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
from typing import List
from composer.core import Callback, State
from composer.loggers import Logger
__all__ = [
'GlobalLRScaling',
'LayerFreezing',
]
class GlobalLRScaling(Callback):
"""GlobalLRScaling.
This callback can be applied upon resuming a model checkpoint. Upon
fit_start it will multiply the base LR by `lr_scale` and set the WD to be.
`wd_pct` * `lr`.
Args:
lr_scale (float): Multiplicative factor to scale LR by
wd_pct (float): Percentage of LR to set weight decay to.
"""
def __init__(self, lr_scale: float, wd_pct: float = 0.0):
self.lr_scale = lr_scale
self.wd_pct = wd_pct
def fit_start(self, state: State, logger: Logger):
if hasattr(state, 'optimizer') and state.optimizers is None:
raise Exception('No optimizers defined')
for optimizer in state.optimizers:
for group in optimizer.param_groups:
group['lr'] *= self.lr_scale
group['weight_decay'] = group['lr'] * self.wd_pct
if 'initial_lr' in group:
group['initial_lr'] *= self.lr_scale
print(
f"Set LR and WD to {group['lr']}, {group['weight_decay']}")
for scheduler in state.schedulers:
scheduler.base_lrs = [
self.lr_scale * lr for lr in scheduler.base_lrs
]
class LayerFreezing(Callback):
"""LayerFreezing.
This callback can be applied upon resuming a model checkpoint. Upon
fit_start it freeze the layers specified in `layer_names`. If using
activation checkpointing, please set the
`activation_checkpointing_reentrant` flag in `fsdp_config` to false.
Args:
layer_names (float): Names of layers to freeze.
"""
def __init__(self, layer_names: List[str]):
self.layer_names = set(layer_names)
def fit_start(self, state: State, logger: Logger):
model_layers = set(name for name, _ in state.model.named_parameters())
for layer in self.layer_names:
if layer not in model_layers:
raise Exception(
f'Attempted to freeze layer not found in model: {layer}\nAvailable layers: {model_layers}'
)
successful_freeze = False
for name, p in state.model.named_parameters():
if p.requires_grad and name in self.layer_names:
p.requires_grad = False
print(f'Froze layer: {name}\nParam: {p}')
successful_freeze = True
if not successful_freeze:
raise Exception(
f"Tried to run LayerFreezing but didn't freeze any layers")
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/callbacks/resumption_callbacks.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import os
import tempfile
from pathlib import Path
import torch
from composer.core import Callback, State
from composer.core.state import fsdp_state_dict_type_context
from composer.loggers import Logger
from composer.loggers.remote_uploader_downloader import RemoteUploaderDownloader
from composer.utils import (dist, format_name_with_dist_and_time, parse_uri,
reproducibility)
class MonolithicCheckpointSaver(Callback):
"""Save a monolithic checkpoint every N batches.
Args:
save_folder (str): Folder to save checkpoints to (can be a URI)
filename (str): Filename to save checkpoints to.
batch_interval (int): Number of batches between checkpoints.
overwrite (bool): Whether to overwrite previous checkpoints.
keep_optimizer(bool): Whether to save the optimizer state in the monolithic checkpoint.
"""
def __init__(self,
save_folder: str,
batch_interval: int,
filename: str = 'ep{epoch}-ba{batch}.pt',
overwrite: bool = False,
keep_optimizers: bool = False):
self.backend, self.bucket_name, self.save_dir_format_str = parse_uri(
save_folder)
self.filename_format_str = filename
self.batch_interval = batch_interval
self.upload_to_object_store = (self.backend != '')
self.overwrite = overwrite
self.keep_optimizers = keep_optimizers
if self.upload_to_object_store:
self.remote_ud = RemoteUploaderDownloader(
bucket_uri=f'{self.backend}://{self.bucket_name}')
else:
self.remote_ud = None
def init(self, state: State, logger: Logger):
if self.upload_to_object_store and self.remote_ud is not None:
self.remote_ud.init(state, logger)
# updated_logger_destinations = [*logger.destinations, new_remote_ud]
# logger.destinations = tuple(updated_logger_destinations)
state.callbacks.append(self.remote_ud)
def batch_checkpoint(self, state: State, logger: Logger):
if state.timestamp.batch.value % self.batch_interval == 0:
self._save_checkpoint(state, logger)
def fit_end(self, state: State, logger: Logger):
if state.timestamp.batch.value % self.batch_interval != 0:
self._save_checkpoint(state, logger)
def _save_checkpoint(self, state: State, logger: Logger):
filename = format_name_with_dist_and_time(self.filename_format_str,
state.run_name,
state.timestamp)
save_dir = format_name_with_dist_and_time(self.save_dir_format_str,
state.run_name,
state.timestamp)
dir_context_mgr = tempfile.TemporaryDirectory(
) if self.upload_to_object_store else contextlib.nullcontext(
enter_result=save_dir)
with dir_context_mgr as temp_save_dir:
save_path = str(Path(temp_save_dir) / Path(filename))
dirname = os.path.dirname(save_path)
if dirname:
os.makedirs(dirname, exist_ok=True)
state_dict = {
'state': state.state_dict(),
'rng': reproducibility.get_rng_state()
}
if not self.keep_optimizers:
state_dict['state'].pop('optimizers')
with fsdp_state_dict_type_context(state.model,
state_dict_type='full'):
state_dict['state']['model'] = state.model.state_dict()
if dist.get_global_rank() == 0:
torch.save(state_dict, save_path)
if self.upload_to_object_store and self.remote_ud is not None and dist.get_global_rank(
) == 0:
remote_file_name = str(Path(save_dir) / Path(filename))
self.remote_ud.upload_file(state=state,
remote_file_name=remote_file_name,
file_path=Path(save_path),
overwrite=self.overwrite)
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/callbacks/monolithic_ckpt_callback.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Periodically log generations to wandb from a set of prompts."""
from typing import List, Union, cast
import torch
import wandb
from composer.core import Callback, State
from composer.loggers import Logger, WandBLogger
from composer.utils import dist, ensure_tuple
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class Generate(Callback):
def __init__(self, prompts: List[str], batch_log_interval: int, **kwargs):
"""Periodically log generations to wandb from a set of prompts.
In the main view for a run, there will be a table that will show the _last_ logged generations.
To compare previous iterations of the generations, you need to
1. Click on the run
2. Click on "artifacts" in the menu on the left side of the screen
3. Click on one of the artifacts called "predictions"
4. Click on the "files" tab
5. Click on "predictions.table.json"
6. On the left hand side, there are different versions of the table produced throughout training. Select one of these.
7. Now, when you hover over other versions, there will be a "compare" button, which will allow you to compare the currently
selected version to the version you add via compare.
Args:
prompts (List[str]): The list of prompts you would like to produce generations for
batch_log_interval (int): The interval (in batches) at which this callback runs
kwargs: All kwargs well be passed along to the call to generate. This is for things like `do_sample`, `top_p`, etc
"""
self.prompts = prompts
self.batch_log_interval = batch_log_interval
self.generate_kwargs = kwargs
self.wandb_logger = None
def init(self, state: State, logger: Logger):
if dist.get_global_rank() == 0:
for destination in ensure_tuple(logger.destinations):
if isinstance(destination, WandBLogger):
self.wandb_logger = destination
def batch_checkpoint(self, state: State, logger: Logger):
if (state.timestamp.batch.value % self.batch_log_interval) == 0:
self.generate(state, logger)
def generate(self, state: State, logger: Logger):
model = state.model
original_mode = model.training
model.eval()
tokenizer = cast(Tokenizer, state.model.tokenizer)
device = state.device
# stash the original original value of padding_side because generation requires left padding
original_padding_side = tokenizer.padding_side
tokenizer.padding_side = 'left'
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenized_input = tokenizer(self.prompts,
return_tensors='pt',
padding=True)
for k, v in tokenized_input.items():
tokenized_input[k] = device.tensor_to_device(v)
# dummy forward call needed for FSDP to work consistently
dummy_input = torch.tensor([[0]], dtype=torch.long)
dummy_input = device.tensor_to_device(dummy_input)
with torch.no_grad():
_ = model.model(input_ids=dummy_input) # type: ignore
output_token_ids = model.model.generate( # type: ignore
input_ids=tokenized_input['input_ids'],
attention_mask=tokenized_input['attention_mask'],
synced_gpus=True,
**self.generate_kwargs,
)
if dist.get_global_rank() == 0:
if self.wandb_logger is not None:
artifact = wandb.Artifact(
'generate_samples_' + str(wandb.run.id), # type: ignore
type='predictions')
rows = []
for i in range(len(self.prompts)):
prompt = self.prompts[i]
output_tokens = output_token_ids[i][
tokenized_input['input_ids'].shape[1]:]
output_text = tokenizer.decode(output_tokens,
skip_special_tokens=True)
rows.append([prompt, output_text])
text_table = wandb.Table(data=rows,
columns=['prompt', 'generation'])
artifact.add(text_table, 'predictions')
wandb.log_artifact(artifact)
wandb.log({'generations': text_table},
step=state.timestamp.batch.value)
tokenizer.padding_side = original_padding_side
model.train(mode=original_mode)
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/callbacks/generate_callback.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
try:
from llmfoundry.callbacks.fdiff_callback import FDiffMetrics
from llmfoundry.callbacks.generate_callback import Generate
from llmfoundry.callbacks.monolithic_ckpt_callback import \
MonolithicCheckpointSaver
from llmfoundry.callbacks.resumption_callbacks import (GlobalLRScaling,
LayerFreezing)
from llmfoundry.callbacks.scheduled_gc_callback import \
ScheduledGarbageCollector
except ImportError as e:
raise ImportError(
'Please make sure to pip install . to get requirements for llm-foundry.'
) from e
__all__ = [
'FDiffMetrics',
'Generate',
'MonolithicCheckpointSaver',
'GlobalLRScaling',
'LayerFreezing',
'ScheduledGarbageCollector',
]
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/callbacks/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import gc
import torch
from composer.core import Callback, State
from composer.loggers import Logger
def gc_cuda():
"""Gargage collect Torch (CUDA) memory."""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
class ScheduledGarbageCollector(Callback):
"""Disable automatic garbage collection and collect garbage at interval.
Args:
batch_interval (int): Number of batches between checkpoints call to gc.collect()
eval_keep_disabled (bool): keep gc disabled during eval (default: False)
"""
def __init__(
self,
batch_interval: int,
eval_keep_disabled: bool = False,
):
self.batch_interval = batch_interval
self.eval_keep_disabled = eval_keep_disabled
self.gc_init_state = None
def fit_start(self, state: State, logger: Logger):
# cache if automatic garbage collection is enabled; reset at fit_end
self.gc_init_state = gc.isenabled()
# disable automatic garbage collection
gc.disable()
gc_cuda()
def fit_end(self, state: State, logger: Logger):
gc_cuda()
# reset automatic garbage collection at fit_end
if self.gc_init_state:
gc.enable()
else:
gc.disable()
def before_dataloader(self, state: State, logger: Logger):
if state.timestamp.batch.value % self.batch_interval == 0:
gc_cuda()
def eval_start(self, state: State, logger: Logger):
gc_cuda()
if not self.eval_keep_disabled:
gc.enable()
def eval_end(self, state: State, logger: Logger):
if not self.eval_keep_disabled:
gc.disable()
gc_cuda()
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/callbacks/scheduled_gc_callback.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Monitor rate of change of loss."""
from __future__ import annotations
import torch
from composer.core import Callback, State
from composer.loggers import Logger
class FDiffMetrics(Callback):
"""Rate of chage of metrics.
tracks and plots the rate of change of metrics effectively taking the
numerical derivative of the metrics
"""
def __init__(self, diff_train_metrics=False, diff_eval_metrics=True):
self.diff_train_metrics = diff_train_metrics
self.diff_eval_metrics = diff_eval_metrics
self.train_prev_loss = None
self.train_prev_metric = {}
self.eval_prev_metric = {}
def batch_end(self, state: State, logger: Logger):
if self.diff_train_metrics:
if not isinstance(state.loss, torch.Tensor):
raise NotImplementedError('Multiple losses not supported yet')
loss = state.loss.item()
if self.train_prev_loss:
logger.log_metrics(
{'loss/train/total_fdiff': loss - self.train_prev_loss})
self.train_prev_loss = loss
for k in self.train_prev_metric.keys():
logger.log_metrics({
f'metrics/train/{k}_fdiff':
state.train_metric_values[k] - self.train_prev_metric[k]
})
for k in state.train_metric_values.keys():
value = state.train_metric_values[k]
self.train_prev_metric[k] = value
def eval_end(self, state: State, logger: Logger):
if self.diff_eval_metrics:
evaluator = state.dataloader_label
metrics = list(state.eval_metrics[evaluator].keys()) # type: ignore
for k in metrics:
mkey = '/'.join(['metrics', evaluator, k]) # type: ignore
if mkey in self.eval_prev_metric.keys():
logger.log_metrics({
f'{mkey}_fdiff':
state.eval_metric_values[k] -
self.eval_prev_metric[mkey]
})
for k in metrics:
mkey = '/'.join(['metrics', evaluator, k]) # type: ignore
self.eval_prev_metric[mkey] = state.eval_metric_values[k]
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/callbacks/fdiff_callback.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import logging
import math
from typing import Callable, Optional, Tuple
import torch
from composer.utils import dist
from torch.optim.optimizer import Optimizer
log = logging.getLogger(__name__)
class DecoupledLionW(Optimizer):
metric_functions = {
'l2_norm/moment':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
optim_state['exp_avg']),
'l2_norm/param':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
param.data),
'l2_norm/update':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
step_tensor),
'l2_norm/grad':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
param.grad),
'cosine/update_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.
cosine_similarity(
param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.
cosine_similarity(
param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0),
}
def __init__(
self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
):
if lr <= 0.:
raise Exception(f'Invalid LR: {lr}. LR must be > 0')
if not all([0. <= beta <= 1. for beta in betas]):
raise Exception(
f'Invalid beta values: {betas} All betas must be between 0 and 1.'
)
if weight_decay >= 1e-3:
log.warning(
f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledLionW` optimizer. Are you sure you want to do this? '
f'Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!'
)
defaults = {'lr': lr, 'betas': betas, 'weight_decay': weight_decay}
super().__init__(params, defaults)
for group in self.param_groups:
group['initial_lr'] = group['lr']
@staticmethod
def lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2) -> None:
# stepweight decay
if wd != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
p.data.mul_(1 - decay_factor * wd)
# update is interpolation between gradient and momentum
update = exp_avg.lerp(grad, 1 - beta1).sign_()
p.add_(update, alpha=-lr)
# momentum is interp b/w gradient and itself
exp_avg.lerp_(grad, 1 - beta2)
@torch.no_grad()
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: p.grad is not None and p.requires_grad,
group['params']):
grad, lr, initial_lr, wd, beta1, beta2, state = p.grad, group[
'lr'], group['initial_lr'], group[
'weight_decay'], *group['betas'], self.state[p]
# init state - exponential moving average of gradient values
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
exp_avg = state['exp_avg']
self.lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2)
return loss
def dist_reduce_metrics(self, optimizer_metrics):
for metric in optimizer_metrics:
if metric.startswith('l2_norm'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = math.sqrt(reduced)
elif metric.startswith('cosine'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_reduced_norm = optimizer_metrics[f'l2_norm/{A}/{layer}']
B_reduced_norm = optimizer_metrics[f'l2_norm/{B}/{layer}']
optimizer_metrics[metric] = reduced / (A_reduced_norm *
B_reduced_norm)
else:
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = reduced / dist.get_world_size()
return optimizer_metrics
def pre_reduce_metrics(self, optimizer_metrics):
"""Preprocess metrics to reduce across ranks correctly."""
# Sort L2 norms first so they are squared before other metrics, which depend on squared values
metrics = optimizer_metrics.keys()
metrics = sorted(metrics,
key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
# L2 norms need to be squared, before they are reduced via summation
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
# L2 norm would've been squared in previous branch
A_rank_subset_norm = math.sqrt(
optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(
optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[
metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str,
optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, _ = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
step_tensor = param_optim_state['exp_avg'].clone().lerp_(
param.grad, 1 - beta1).sign_().mul_(lr)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[
metric](param, param_optim_state, step_tensor)
return optimizer_metrics
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/optim/lion.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
from llmfoundry.optim.adaptive_lion import DecoupledAdaLRLion, DecoupledClipLion
from llmfoundry.optim.lion import DecoupledLionW
__all__ = ['DecoupledLionW', 'DecoupledClipLion', 'DecoupledAdaLRLion']
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/optim/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import collections
class OutlierDetector:
"""OutlierDetector.
This class implements an algorithm to detect outliers in sequential
numeric data (e.g. for gradient/moment norms in optimizers). It relies on a
delayed moving average which is the moving average of observations from time
step T-2*`delay_interval` to T-`delay_interval`. The motivation is that
outliers typically occur in clusters that can potentially stretch for many
observations, hence it's best to use a delayed moving average to detect
outliers.
It defines an outlier as any data point that is `threshold` times larger than the delayed moving average.
The class assumes data is inserted sequentially at evenly spaced intervals of unit length 1.
"""
def __init__(self, threshold: float = 7.5, delay_interval: int = 500):
self.intermediate_data_queue = collections.deque(maxlen=delay_interval)
self.delayed_moving_average = collections.deque(maxlen=delay_interval)
self.threshold = threshold
def insert_observation(self, obs: float) -> bool:
"""Insert observation.
Inserts obs into the data buffer and returns true if it is an "outlier", defined `threshold` times larger than
the windowed moving average from [T-2*`delay_interval` : T-`delay_interval`].
This algorithm first moves recent data into an intermediate circular buffer, and then moves data in to the delayed moving average buffer
once it is old enough to be evicted from the intermediate data. This is to ensure that we take a delayed moving average that doesn't include recent data.
Args:
obs (float): Numeric observation for the current timestep.
Returns:
bool: Indicator of whether the most recent observation was an outlier.
"""
if len(self.intermediate_data_queue # type: ignore
) >= self.intermediate_data_queue.maxlen:
# move data from intermediate queue to slow moving average queue
intermediate_obs = self.intermediate_data_queue.popleft()
self.delayed_moving_average.append(intermediate_obs)
self.intermediate_data_queue.append(obs)
delayed_mva = self.get_delayed_mva()
return delayed_mva is not None and obs > self.threshold * delayed_mva
def get_delayed_mva(self):
if len(self.delayed_moving_average) > 0:
return sum(self.delayed_moving_average) / len(
self.delayed_moving_average)
else:
return None
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/optim/outlier_detection.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import logging
import math
from typing import Callable, Optional, Tuple
import torch
from composer.utils import dist
from torch.optim.optimizer import Optimizer
from llmfoundry.optim.outlier_detection import OutlierDetector
log = logging.getLogger(__name__)
class DecoupledAdaLRLion(Optimizer):
"""DecoupledAdaLRLion.
This class implements a variant of Lion which lowers the layerwise
learning rate when the layer's moment becomes an outlier. A moment is an
outlier if it is some multiple `outlier_threshold` times larger than the
simple windowed moving average (MVA) of moment norms taken from steps T-1000
to T-500. If an outlier is detected, the LR is lowered by `lr_penalty` for
`timeout` steps. If N outliers are detected within `timeout` steps, the LR
is scaled down by min(`lr_penalty` ** N, `min_scale`).
Args:
params (Iterable[torch.Parameter]): Model parameters to optimize
lr (float): Learning rate for updates
betas (Tuple[float]): Momentum factors
weight_decay (float): Weight decay
outlier_threshold (float): Multiplicative factor determining what constitutes an "outlier" relative to the MVA of gradient norms.
timeout (int): Number of steps to lower the learning for after seeing an outlier.
lr_penalty (float): Multiplicative scale by which to lower the LR for each outlier.
min_scale (float): Minimum allowed scaling of the LR .
"""
metric_functions = {
'l2_norm/moment':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
optim_state['exp_avg']),
'l2_norm/param':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
param.data),
'l2_norm/update':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
step_tensor),
'l2_norm/grad':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
param.grad),
'cosine/update_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.
cosine_similarity(
param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.
cosine_similarity(
param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0),
}
def __init__(self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
outlier_threshold: float = 10.0,
timeout: int = 100,
lr_penalty: float = .707,
min_scale: float = 1e-4):
if lr <= 0.:
raise Exception(f'Invalid LR: {lr}. LR must be > 0')
if not all([0. <= beta <= 1. for beta in betas]):
raise Exception(
f'Invalid beta values: {betas} All betas must be between 0 and 1.'
)
if weight_decay >= 1e-3:
log.warning(
f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledLionW` optimizer. Are you sure you want to do this? '
f'Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!'
)
defaults = {'lr': lr, 'betas': betas, 'weight_decay': weight_decay}
super().__init__(params, defaults)
for group in self.param_groups:
group['initial_lr'] = group['lr']
self.outlier_threshold = outlier_threshold
self.timeout = timeout
self.lr_penalty = lr_penalty
self.min_scale = min_scale
@staticmethod
def lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2) -> None:
# stepweight decay
if wd != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
p.data.mul_(1 - decay_factor * wd)
# update is interpolation between gradient and momentum
update = exp_avg.lerp(grad, 1 - beta1).sign_()
p.add_(update, alpha=-lr)
# momentum is interp b/w gradient and itself
exp_avg.lerp_(grad, 1 - beta2)
@staticmethod
def adjust_lr(lr: float, lr_penalty: float, num_times: int,
min_scale: float):
"""Adjusts LR.
Multiplicatively scales down the LR by lr_penalty for each outlier
that has occurred in the last `timeout` number of steps, capping the
scaling to be no smaller than `min_scale`.
Args:
lr (float): Base learning rate
lr_penalty (float): Scaling factor to multiply by for each outlier
num_times (int): Number of outliers in the last `timeout` steps
min_scale (float): Minimum scaling to apply to our LR.
Returns:
float: Scaled LR
"""
return lr * max(min_scale, lr_penalty**num_times)
@torch.no_grad()
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: p.grad is not None and p.requires_grad,
group['params']):
grad, lr, initial_lr, wd, beta1, beta2, state = p.grad, group[
'lr'], group['initial_lr'], group[
'weight_decay'], *group['betas'], self.state[p]
# init state - exponential moving average of gradient values
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['moment_tracker'] = OutlierDetector(
self.outlier_threshold)
state['outlier_timestamp'] = []
state['step'] = 0
exp_avg = state['exp_avg']
# determine if the new moment resulting from this grad would be an outlier
moment_norm = torch.linalg.vector_norm(
exp_avg.lerp(grad, 1 - beta2))**2
if dist.get_world_size() > 1:
dist.all_reduce(moment_norm, reduce_operation='SUM')
moment_norm = math.sqrt(moment_norm)
if state['moment_tracker'].insert_observation(moment_norm):
state['outlier_timestamp'].append(state['step'])
removed = []
for ts in state['outlier_timestamp']:
if state['step'] - ts > self.timeout:
removed.append(ts)
for ts in removed:
state['outlier_timestamp'].remove(ts)
lr = self.adjust_lr(lr, self.lr_penalty,
len(state['outlier_timestamp']),
self.min_scale)
self.lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2)
state['step'] += 1
return loss
def dist_reduce_metrics(self, optimizer_metrics):
for metric in optimizer_metrics:
if metric.startswith('l2_norm'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = math.sqrt(reduced)
elif metric.startswith('cosine'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_reduced_norm = optimizer_metrics[f'l2_norm/{A}/{layer}']
B_reduced_norm = optimizer_metrics[f'l2_norm/{B}/{layer}']
optimizer_metrics[metric] = reduced / (A_reduced_norm *
B_reduced_norm)
elif metric.startswith('layerwise_lr'):
continue
else:
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = reduced / dist.get_world_size()
return optimizer_metrics
def pre_reduce_metrics(self, optimizer_metrics):
"""Preprocess metrics to reduce across ranks correctly."""
# Sort L2 norms first so they are squared before other metrics, which depend on squared values
metrics = optimizer_metrics.keys()
metrics = sorted(metrics,
key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
# L2 norms need to be squared, before they are reduced via summation
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
# L2 norm would've been squared in previous branch
A_rank_subset_norm = math.sqrt(
optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(
optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[
metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str,
optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, _ = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
layerwise_lr = self.adjust_lr(
lr, self.lr_penalty,
len(param_optim_state['outlier_timestamp']), self.min_scale)
step_tensor = param_optim_state['exp_avg'].clone().lerp_(
param.grad, 1 - beta1).sign_().mul_(lr)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[
metric](param, param_optim_state, step_tensor)
optimizer_metrics[f'layerwise_lr/{name}'] = torch.tensor(
layerwise_lr)
return optimizer_metrics
class DecoupledClipLion(Optimizer):
"""DecoupledClipLION.
This class implements a variant of Lion which clips layerwise gradients
that are "outliers". A gradient is an outlier if it is some multiple k times
larger than the simple windowed moving average (MVA) of gradient norms taken
from steps T-1000 to T-500. If an outlier is detected, it is clipped.
to no longer have norm k * MVA.
Args:
params (Iterable[torch.Parameter]): Model parameters to optimize
lr (float): Learning rate for updates
betas (Tuple[float]): Momentum factors
weight_decay (float): Weight decay
outlier_threshold (float): Multiplicative factor determining what constitutes an "outlier" relative to the MVA of gradient norms.
"""
metric_functions = {
'l2_norm/moment':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
optim_state['exp_avg']),
'l2_norm/param':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
param.data),
'l2_norm/update':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
step_tensor),
'l2_norm/grad':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(
param.grad),
'cosine/update_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.
cosine_similarity(
param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.
cosine_similarity(
param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0),
}
def __init__(self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
outlier_threshold=5.0):
if lr <= 0.:
raise Exception(f'Invalid LR: {lr}. LR must be > 0')
if not all([0. <= beta <= 1. for beta in betas]):
raise Exception(
f'Invalid beta values: {betas} All betas must be between 0 and 1.'
)
if weight_decay >= 1e-3:
log.warning(
f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledLionW` optimizer. Are you sure you want to do this? '
f'Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!'
)
defaults = {'lr': lr, 'betas': betas, 'weight_decay': weight_decay}
super().__init__(params, defaults)
for group in self.param_groups:
group['initial_lr'] = group['lr']
self.outlier_threshold = outlier_threshold
@staticmethod
def lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2) -> None:
# stepweight decay
if wd != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
p.data.mul_(1 - decay_factor * wd)
# update is interpolation between gradient and momentum
update = exp_avg.lerp(grad, 1 - beta1).sign_()
p.add_(update, alpha=-lr)
# momentum is interp b/w gradient and itself
exp_avg.lerp_(grad, 1 - beta2)
@torch.no_grad()
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: p.grad is not None and p.requires_grad,
group['params']):
grad, lr, initial_lr, wd, beta1, beta2, state = p.grad, group[
'lr'], group['initial_lr'], group[
'weight_decay'], *group['betas'], self.state[p]
# init state - exponential moving average of gradient values
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['grad_tracker'] = OutlierDetector(
self.outlier_threshold)
state['clipped_batches'] = torch.tensor(0.0)
exp_avg = state['exp_avg']
# determine if the new moment resulting from this grad would be an outlier
grad_norm = torch.linalg.vector_norm(grad)**2
if dist.get_world_size() > 1:
dist.all_reduce(grad_norm, reduce_operation='SUM')
grad_norm = math.sqrt(grad_norm)
if state['grad_tracker'].insert_observation(grad_norm):
state['clipped_batches'] += 1.0
clip_norm = state['grad_tracker'].get_slow_mva(
) * self.outlier_threshold
grad = grad.div(grad_norm).mul_(clip_norm)
self.lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2)
return loss
def dist_reduce_metrics(self, optimizer_metrics):
for metric in optimizer_metrics:
if metric.startswith('l2_norm'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = math.sqrt(reduced)
elif metric.startswith('cosine'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_reduced_norm = optimizer_metrics[f'l2_norm/{A}/{layer}']
B_reduced_norm = optimizer_metrics[f'l2_norm/{B}/{layer}']
optimizer_metrics[metric] = reduced / (A_reduced_norm *
B_reduced_norm)
elif metric.startswith('clipped_batches'):
continue
else:
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = reduced / dist.get_world_size()
return optimizer_metrics
def pre_reduce_metrics(self, optimizer_metrics):
"""Preprocess metrics to reduce across ranks correctly."""
# Sort L2 norms first so they are squared before other metrics, which depend on squared values
metrics = optimizer_metrics.keys()
metrics = sorted(metrics,
key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
# L2 norms need to be squared, before they are reduced via summation
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
# L2 norm would've been squared in previous branch
A_rank_subset_norm = math.sqrt(
optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(
optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[
metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str,
optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, _ = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
step_tensor = param_optim_state['exp_avg'].clone().lerp_(
param.grad, 1 - beta1).sign_().mul_(lr)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[
metric](param, param_optim_state, step_tensor)
optimizer_metrics[f'clipped_batches/{name}'] = param_optim_state[
'clipped_batches']
return optimizer_metrics
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/optim/adaptive_lion.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Union
from composer import algorithms
from composer.callbacks import (HealthChecker, LRMonitor, MemoryMonitor,
OptimizerMonitor, RuntimeEstimator,
SpeedMonitor)
from composer.core import Evaluator
from composer.datasets.in_context_learning_evaluation import \
get_icl_task_dataloader
from composer.loggers import WandBLogger
from composer.optim import DecoupledAdamW
from composer.optim.scheduler import (ConstantWithWarmupScheduler,
CosineAnnealingWithWarmupScheduler,
LinearWithWarmupScheduler)
from composer.utils import dist
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
from transformers import (AutoTokenizer, PreTrainedTokenizer,
PreTrainedTokenizerFast)
from llmfoundry.callbacks import (FDiffMetrics, Generate, GlobalLRScaling,
LayerFreezing, MonolithicCheckpointSaver,
ScheduledGarbageCollector)
from llmfoundry.optim import (DecoupledAdaLRLion, DecoupledClipLion,
DecoupledLionW)
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
def build_callback(name, kwargs):
if name == 'lr_monitor':
return LRMonitor()
elif name == 'memory_monitor':
return MemoryMonitor()
elif name == 'speed_monitor':
return SpeedMonitor(window_size=kwargs.get('window_size', 1),
gpu_flops_available=kwargs.get(
'gpu_flops_available', None))
elif name == 'fdiff':
return FDiffMetrics(**kwargs)
elif name == 'runtime_estimator':
return RuntimeEstimator()
elif name == 'optimizer_monitor':
return OptimizerMonitor(log_optimizer_metrics=kwargs.get(
'log_optimizer_metrics', True),)
elif name == 'health_checker':
return HealthChecker(**kwargs)
elif name == 'generate_callback':
prompts = kwargs.pop('prompts')
return Generate(prompts=list(prompts), **kwargs)
elif name == 'global_lr_scaling':
return GlobalLRScaling(**kwargs)
elif name == 'layer_freezing':
return LayerFreezing(**kwargs)
elif name == 'mono_ckpt_saver':
return MonolithicCheckpointSaver(**kwargs)
elif name == 'scheduled_gc':
return ScheduledGarbageCollector(**kwargs)
else:
raise ValueError(f'Not sure how to build callback: {name}')
def build_logger(name, kwargs):
if name == 'wandb':
return WandBLogger(**kwargs)
else:
raise ValueError(f'Not sure how to build logger: {name}')
def build_algorithm(name, kwargs):
if name == 'gradient_clipping':
return algorithms.GradientClipping(**kwargs)
elif name == 'alibi':
return algorithms.Alibi(**kwargs)
elif name == 'fused_layernorm':
return algorithms.FusedLayerNorm(**kwargs)
elif name == 'gated_linear_units':
return algorithms.GatedLinearUnits(**kwargs)
elif name == 'low_precision_layernorm':
return algorithms.LowPrecisionLayerNorm(**kwargs)
else:
raise ValueError(f'Not sure how to build algorithm: {name}')
def build_optimizer(cfg, model):
if cfg.name == 'decoupled_adamw':
return DecoupledAdamW(model.parameters(),
lr=cfg.lr,
betas=cfg.betas,
eps=cfg.eps,
weight_decay=cfg.weight_decay)
elif cfg.name == 'decoupled_lionw':
return DecoupledLionW(model.parameters(),
lr=cfg.lr,
betas=cfg.betas,
weight_decay=cfg.weight_decay)
elif cfg.name == 'clip_lion':
return DecoupledClipLion(model.parameters(),
lr=cfg.lr,
betas=cfg.betas,
weight_decay=cfg.weight_decay,
outlier_threshold=cfg.outlier_threshold)
elif cfg.name == 'adalr_lion':
return DecoupledAdaLRLion(model.parameters(),
lr=cfg.lr,
betas=cfg.betas,
weight_decay=cfg.weight_decay,
outlier_threshold=cfg.outlier_threshold,
timeout=cfg.timeout,
lr_penalty=cfg.lr_penalty,
min_scale=cfg.min_scale)
else:
raise ValueError(f'Not sure how to build optimizer: {cfg.name}')
def build_scheduler(cfg):
if cfg.name == 'constant_with_warmup':
return ConstantWithWarmupScheduler(t_warmup=cfg.t_warmup)
elif cfg.name == 'cosine_with_warmup':
return CosineAnnealingWithWarmupScheduler(t_warmup=cfg.t_warmup,
alpha_f=cfg.alpha_f)
elif cfg.name == 'linear_decay_with_warmup':
return LinearWithWarmupScheduler(t_warmup=cfg.t_warmup,
alpha_f=cfg.alpha_f)
else:
raise ValueError(f'Not sure how to build scheduler: {cfg.name}')
def build_tokenizer(om_tokenizer_config: DictConfig,) -> Tokenizer:
os.environ['TRANSFORMERS_NO_ADVISORY_WARNINGS'] = '1'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
resolved_om_tokenizer_config = om.to_container(om_tokenizer_config,
resolve=True)
tokenizer_kwargs = resolved_om_tokenizer_config.get( # type: ignore
'kwargs', {})
tokenizer_name = resolved_om_tokenizer_config['name'] # type: ignore
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name,
**tokenizer_kwargs)
# HuggingFace does not respect the model_max_length kwarg, and overrides it with
# min(kwargs['model_max_length'], original_config['model_max_length']), so we
# explicitly set it here
tokenizer.model_max_length = tokenizer_kwargs.get(
'model_max_length',
int(1e30),
)
return tokenizer
def build_icl_evaluators(icl_tasks,
tokenizer,
default_max_seq_len,
default_batch_size,
destination_dir=os.getcwd()):
evaluators = []
logger_keys = []
if isinstance(icl_tasks, str):
print(f'Extracting ICL task config from path: {icl_tasks}')
with open(icl_tasks, 'r') as icl_f:
icl_task_cfg = om.load(icl_f)
icl_tasks = icl_task_cfg.icl_tasks
def _validate_cfg(icl_cfg):
assert 'label' in icl_cfg
assert 'dataset_uri' in icl_cfg and icl_cfg.dataset_uri is not None
assert 'icl_task_type' in icl_cfg
assert 'num_fewshot' in icl_cfg
if 'metric_names' not in icl_cfg:
if icl_cfg.icl_task_type == 'language_modeling':
icl_cfg.metric_names = ['InContextLearningLMAccuracy']
elif icl_cfg.icl_task_type == 'multiple_choice':
icl_cfg.metric_names = [
'InContextLearningMultipleChoiceAccuracy'
]
elif icl_cfg.icl_task_type == 'schema':
icl_cfg.metric_names = [
'InContextLearningMultipleChoiceAccuracy'
]
elif icl_cfg.icl_task_type == 'question_answering':
icl_cfg.metric_names = ['InContextLearningQAAccuracy']
else:
raise ValueError(
f'No metric_names defined, unable to build default metrics for icl_task_type={icl_cfg.icl_task_type}.'
)
if 'prompt_string' not in icl_cfg:
icl_cfg.prompt_string = ''
if 'example_delimiter' not in icl_cfg:
icl_cfg.example_delimiter = '\n'
if 'continuation_delimiter' not in icl_cfg:
icl_cfg.continuation_delimiter = ' '
if 'max_seq_len' not in icl_cfg:
icl_cfg.max_seq_len = default_max_seq_len
if 'batch_size' not in icl_cfg:
icl_cfg.batch_size = default_batch_size
for icl_cfg in icl_tasks:
_validate_cfg(icl_cfg)
for num_fewshot in list(icl_cfg.num_fewshot):
if tokenizer.pad_token_id is None:
# Current workaround to support GPT2 tokenizer with `pad_token_id = None`
pad_tok_id = tokenizer.eos_token_id
else:
pad_tok_id = tokenizer.pad_token_id
label = f'{icl_cfg.label}/{num_fewshot}-shot'
metric_names = list(icl_cfg.metric_names)
# TODO: fix Composer bug when copying local paths and destination exists
destination_path = f'{destination_dir}/{icl_cfg.label}-{num_fewshot}.jsonl'
with dist.run_local_rank_zero_first():
if os.path.exists(destination_path):
os.remove(destination_path)
dataloaders = get_icl_task_dataloader(
icl_cfg.icl_task_type,
icl_cfg.dataset_uri,
tokenizer,
batch_size=icl_cfg.batch_size,
max_seq_len=icl_cfg.max_seq_len,
pad_tok_id=pad_tok_id,
num_fewshot=num_fewshot,
prompt_string=icl_cfg.prompt_string,
example_delimiter=icl_cfg.example_delimiter,
continuation_delimiter=icl_cfg.continuation_delimiter,
destination_path=destination_path,
has_categories=icl_cfg.get('has_categories', False),
)
if hasattr(
icl_cfg,
'has_categories') and icl_cfg.has_categories and isinstance(
dataloaders, dict):
for category in dataloaders.keys():
logger_keys.extend([
f'metrics/{label}/{category}/{m}' for m in metric_names
])
evaluators.append(
Evaluator(label=f'{label}/{category}',
dataloader=dataloaders[category],
metric_names=metric_names),)
else:
logger_keys.extend(
[f'metrics/{label}/{m}' for m in metric_names])
evaluators.append(
Evaluator(label=label,
dataloader=dataloaders,
metric_names=metric_names),)
return evaluators, logger_keys
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/utils/builders.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
try:
from llmfoundry.utils.builders import (build_algorithm, build_callback,
build_icl_evaluators, build_logger,
build_optimizer, build_scheduler,
build_tokenizer)
from llmfoundry.utils.config_utils import (calculate_batch_size_info,
log_config,
update_batch_size_info)
except ImportError as e:
raise ImportError(
'Please make sure to pip install . to get requirements for llm-foundry.'
) from e
__all__ = [
'build_callback',
'build_logger',
'build_algorithm',
'build_optimizer',
'build_scheduler',
'build_icl_evaluators',
'build_tokenizer',
'calculate_batch_size_info',
'update_batch_size_info',
'log_config',
]
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/utils/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import math
from typing import Union
from composer.utils import dist
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
def calculate_batch_size_info(global_batch_size: int,
device_microbatch_size: Union[int, str]):
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'Global batch size {global_batch_size} is not divisible by {dist.get_world_size()} '
'as a result, the batch size would be truncated, please adjust `global_batch_size` '
f'to be divisible by world size, {dist.get_world_size()}.')
device_batch_size = global_batch_size // dist.get_world_size()
if device_microbatch_size == 'auto':
device_grad_accum = 'auto'
elif isinstance(device_microbatch_size, int):
if device_microbatch_size > device_batch_size:
print(
f'WARNING: device_microbatch_size > device_batch_size, '
f'will be reduced from {device_microbatch_size} -> {device_batch_size}.'
)
device_microbatch_size = device_batch_size
device_grad_accum = math.ceil(device_batch_size /
device_microbatch_size)
else:
raise ValueError(f'Not sure how to parse {device_microbatch_size=}')
return device_batch_size, device_microbatch_size, device_grad_accum
# Coming soon: this conversion math will be done inside Composer Trainer
def update_batch_size_info(cfg: DictConfig):
device_train_batch_size, device_train_microbatch_size, device_train_grad_accum = calculate_batch_size_info(
cfg.global_train_batch_size, cfg.device_train_microbatch_size)
cfg.n_gpus = dist.get_world_size()
cfg.device_train_batch_size = device_train_batch_size
cfg.device_train_microbatch_size = device_train_microbatch_size
cfg.device_train_grad_accum = device_train_grad_accum
# Safely set `device_eval_batch_size` if not provided by user
if 'device_eval_batch_size' not in cfg:
if cfg.device_train_microbatch_size == 'auto':
cfg.device_eval_batch_size = 1 # TODO debug auto eval microbatching
else:
cfg.device_eval_batch_size = cfg.device_train_microbatch_size
return cfg
def log_config(cfg: DictConfig):
print(om.to_yaml(cfg))
if 'wandb' in cfg.get('loggers', {}):
try:
import wandb
except ImportError as e:
raise e
if wandb.run:
wandb.config.update(om.to_container(cfg, resolve=True))
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/utils/config_utils.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
from llmfoundry.models.hf import (ComposerHFCausalLM, ComposerHFPrefixLM,
ComposerHFT5)
from llmfoundry.models.mpt import (ComposerMPTCausalLM, MPTConfig,
MPTForCausalLM, MPTModel, MPTPreTrainedModel)
__all__ = [
'ComposerHFCausalLM',
'ComposerHFPrefixLM',
'ComposerHFT5',
'MPTConfig',
'MPTPreTrainedModel',
'MPTModel',
'MPTForCausalLM',
'ComposerMPTCausalLM',
]
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
from llmfoundry.models.hf import (ComposerHFCausalLM, ComposerHFPrefixLM,
ComposerHFT5)
from llmfoundry.models.mpt import ComposerMPTCausalLM
COMPOSER_MODEL_REGISTRY = {
'mpt_causal_lm': ComposerMPTCausalLM,
'hf_causal_lm': ComposerHFCausalLM,
'hf_prefix_lm': ComposerHFPrefixLM,
'hf_t5': ComposerHFT5,
}
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/model_registry.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Attention layers."""
import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from torch import nn
from llmfoundry.models.layers.norm import LPLayerNorm
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int,
original_is_causal: bool):
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError(
'MPT does not support query and key with different number of tokens, unless number of query tokens is 1.'
)
else:
return False
return original_is_causal
def scaled_multihead_dot_product_attention(
query,
key,
value,
n_heads,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
k = rearrange(key, 'b s (h d) -> b h d s',
h=1 if multiquery else n_heads) # includes key.t()
v = rearrange(value, 'b s (h d) -> b h s d', h=1 if multiquery else n_heads)
min_val = torch.finfo(q.dtype).min
b, _, s_q, d = q.shape
s_k = k.size(-1)
if softmax_scale is None:
softmax_scale = 1 / math.sqrt(d)
attn_weight = q.matmul(k) * softmax_scale
if attn_bias is not None:
if (attn_bias.size(-1) != 1 and
attn_bias.size(-1) != s_k) or (attn_bias.size(-2) != 1 and
attn_bias.size(-2) != s_q):
raise RuntimeError(
f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.'
)
attn_weight = attn_weight + attn_bias
if key_padding_mask is not None:
if attn_bias is not None:
warnings.warn(
'Propogating key_padding_mask to the attention module ' +\
'and applying it within the attention module can cause ' +\
'unneccessary computation/memory usage. Consider integrating ' +\
'into attn_bias once and passing that to each attention ' +\
'module instead.'
)
attn_weight = attn_weight.masked_fill(
~key_padding_mask.view((b, 1, 1, s_k)), min_val)
if is_causal:
s = max(s_q, s_k)
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
causal_mask = causal_mask.tril()
causal_mask = causal_mask.to(torch.bool)
causal_mask = ~causal_mask
causal_mask = causal_mask[-s_q:, -s_k:]
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k),
min_val)
attn_weight = torch.softmax(attn_weight, dim=-1)
if dropout_p:
attn_weight = torch.nn.functional.dropout(attn_weight,
p=dropout_p,
training=training,
inplace=True)
out = attn_weight.matmul(v)
out = rearrange(out, 'b h s d -> b s (h d)')
if needs_weights:
return out, attn_weight
return out, None
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(f'{tensor.dtype=} must be in {valid_dtypes=}.')
if not tensor.is_cuda:
raise TypeError(f'Inputs must be cuda tensors ({tensor.is_cuda=}).')
def flash_attn_fn(
query,
key,
value,
n_heads,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from flash_attn import bert_padding, flash_attn_interface # type: ignore # yapf: disable # isort: skip
except:
raise RuntimeError('Please install flash-attn==1.0.3.post0')
check_valid_inputs(query, key, value)
if attn_bias is not None:
raise NotImplementedError(f'attn_bias not implemented for flash attn.')
batch_size, seqlen = query.shape[:2]
if key_padding_mask is None:
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
query_padding_mask = key_padding_mask[:, -query.size(1):]
query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(
query, query_padding_mask)
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(
key, key_padding_mask)
key_unpad = rearrange(key_unpad,
'nnz (h d) -> nnz h d',
h=1 if multiquery else n_heads)
value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)
value_unpad = rearrange(value_unpad,
'nnz (h d) -> nnz h d',
h=1 if multiquery else n_heads)
if multiquery:
# Expanding a tensor does not allocate new memory, but only creates a new
# view on the existing tensor where a dimension of size one is expanded
# to a larger size by setting the stride to 0.
# - pytorch docs
#
# hopefully the kernels can utilize this and we're jot just wasting BW here
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads,
key_unpad.size(-1))
value_unpad = value_unpad.expand(value_unpad.size(0), n_heads,
value_unpad.size(-1))
dropout_p = dropout_p if training else 0.0
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
output_unpad = flash_attn_interface.flash_attn_unpadded_func(
query_unpad,
key_unpad,
value_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=softmax_scale,
causal=reset_is_causal,
return_attn_probs=needs_weights)
output = bert_padding.pad_input(
rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size,
seqlen)
return output, None
def triton_flash_attn_fn(
query,
key,
value,
n_heads,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from flash_attn import flash_attn_triton # type: ignore
except:
raise RuntimeError(
'Please install flash-attn==1.0.3.post0 and triton==2.0.0.dev20221202'
)
check_valid_inputs(query, key, value)
if dropout_p:
raise NotImplementedError(
f'Dropout not implemented for attn_impl: triton.')
if needs_weights:
raise NotImplementedError(
f'attn_impl: triton cannot return attn weights.')
if key_padding_mask is not None:
warnings.warn(
'Propagating key_padding_mask to the attention module ' +\
'and applying it within the attention module can cause ' +\
'unnecessary computation/memory usage. Consider integrating ' +\
'into attn_bias once and passing that to each attention ' +\
'module instead.'
)
b_size, s_k = key_padding_mask.shape[:2]
if attn_bias is None:
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
attn_bias = attn_bias.masked_fill(
~key_padding_mask.view((b_size, 1, 1, s_k)),
torch.finfo(query.dtype).min)
query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
value = rearrange(value,
'b s (h d) -> b s h d',
h=1 if multiquery else n_heads)
if multiquery:
# Expanding a tensor does not allocate new memory, but only creates a new
# view on the existing tensor where a dimension of size one is expanded
# to a larger size by setting the stride to 0.
# - pytorch docs
#
# hopefully the kernels can utilize this and we're jot just wasting BW here
key = key.expand(*key.shape[:2], n_heads, key.size(-1))
value = value.expand(*value.shape[:2], n_heads, value.size(-1))
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
attn_output = flash_attn_triton.flash_attn_func(query, key, value,
attn_bias, reset_is_causal,
softmax_scale)
output = attn_output.view(*attn_output.shape[:2], -1)
return output, None
class MultiheadAttention(nn.Module):
"""Multi-head self attention.
Using torch or triton attention implemetation enables user to also use
additive bias.
"""
def __init__(
self,
d_model: int,
n_heads: int,
attn_impl: str = 'triton',
clip_qkv: Optional[float] = None,
qk_ln: bool = False,
softmax_scale: Optional[float] = None,
attn_pdrop: float = 0.0,
low_precision_layernorm: bool = False,
verbose: int = 0,
device: Optional[str] = None,
):
super().__init__()
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.qk_ln = qk_ln
self.d_model = d_model
self.n_heads = n_heads
self.softmax_scale = softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
self.attn_dropout_p = attn_pdrop
self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
# for param init fn; enables shape based init of fused layers
fuse_splits = (d_model, 2 * d_model)
self.Wqkv._fused = (0, fuse_splits) # type: ignore
if self.qk_ln:
layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
self.q_ln = layernorm_class(self.d_model, device=device)
self.k_ln = layernorm_class(self.d_model, device=device)
if self.attn_impl == 'flash':
self.attn_fn = flash_attn_fn
elif self.attn_impl == 'triton':
self.attn_fn = triton_flash_attn_fn
if verbose:
warnings.warn(
'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
'it uses more memory. When training larger models this can trigger ' +\
'alloc retries which hurts performance. If encountered, we recommend ' +\
'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
)
elif self.attn_impl == 'torch':
self.attn_fn = scaled_multihead_dot_product_attention
if torch.cuda.is_available() and verbose:
warnings.warn(
'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
'`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
'we recommend using `attn_impl: triton`.'
)
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
self.out_proj._is_residual = True # type: ignore
def forward(self,
x,
past_key_value=None,
attn_bias=None,
attention_mask=None,
is_causal=True,
needs_weights=False):
qkv = self.Wqkv(x)
if self.clip_qkv:
qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
query, key, value = qkv.chunk(3, dim=2)
key_padding_mask = attention_mask
if self.qk_ln:
# Applying layernorm to qk
dtype = query.dtype
query = self.q_ln(query).to(dtype)
key = self.k_ln(key).to(dtype)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
attn_bias = attn_bias[:, :, -query.size(1):, -key.size(1):]
context, attn_weights = self.attn_fn(
query,
key,
value,
self.n_heads,
softmax_scale=self.softmax_scale,
attn_bias=attn_bias,
key_padding_mask=key_padding_mask,
is_causal=is_causal,
dropout_p=self.attn_dropout_p,
training=self.training,
needs_weights=needs_weights,
)
return self.out_proj(context), attn_weights, past_key_value
class MultiQueryAttention(nn.Module):
"""Multi-Query self attention.
Using torch or triton attention implemetation enables user to also use
additive bias.
"""
def __init__(
self,
d_model: int,
n_heads: int,
attn_impl: str = 'triton',
clip_qkv: Optional[float] = None,
qk_ln: bool = False,
softmax_scale: Optional[float] = None,
attn_pdrop: float = 0.0,
low_precision_layernorm: bool = False,
verbose: int = 0,
device: Optional[str] = None,
):
super().__init__()
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.qk_ln = qk_ln
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.softmax_scale = softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.head_dim)
self.attn_dropout_p = attn_pdrop
# NOTE: if we ever want to make attn TensorParallel, I'm pretty sure we'll
# want to split Wqkv into Wq and Wkv where Wq can be TensorParallel but
# Wkv shouldn't be TensorParallel
# - vchiley
self.Wqkv = nn.Linear(d_model,
d_model + 2 * self.head_dim,
device=device)
# for param init fn; enables shape based init of fused layers
fuse_splits = (d_model, d_model + self.head_dim)
self.Wqkv._fused = (0, fuse_splits) # type: ignore
if self.qk_ln:
layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
self.q_ln = layernorm_class(d_model, device=device)
self.k_ln = layernorm_class(self.head_dim, device=device)
if self.attn_impl == 'flash':
self.attn_fn = flash_attn_fn
elif self.attn_impl == 'triton':
self.attn_fn = triton_flash_attn_fn
if verbose:
warnings.warn(
'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
'it uses more memory. When training larger models this can trigger ' +\
'alloc retries which hurts performance. If encountered, we recommend ' +\
'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
)
elif self.attn_impl == 'torch':
self.attn_fn = scaled_multihead_dot_product_attention
if torch.cuda.is_available() and verbose:
warnings.warn(
'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
'`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
'we recommend using `attn_impl: triton`.'
)
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
self.out_proj._is_residual = True # type: ignore
def forward(self,
x,
past_key_value=None,
attn_bias=None,
attention_mask=None,
is_causal=True,
needs_weights=False):
qkv = self.Wqkv(x)
if self.clip_qkv:
qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
query, key, value = qkv.split(
[self.d_model, self.head_dim, self.head_dim], dim=2)
key_padding_mask = attention_mask
if self.qk_ln:
# Applying layernorm to qk
dtype = query.dtype
query = self.q_ln(query).to(dtype)
key = self.k_ln(key).to(dtype)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
attn_bias = attn_bias[:, :, -query.size(1):, -key.size(1):]
context, attn_weights = self.attn_fn(
query,
key,
value,
self.n_heads,
softmax_scale=self.softmax_scale,
attn_bias=attn_bias,
key_padding_mask=key_padding_mask,
is_causal=is_causal,
dropout_p=self.attn_dropout_p,
training=self.training,
needs_weights=needs_weights,
multiquery=True,
)
return self.out_proj(context), attn_weights, past_key_value
def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal,
use_sequence_id):
if attn_impl == 'flash':
return None
elif attn_impl in ['torch', 'triton']:
if alibi:
if (prefix_lm or not causal) or use_sequence_id:
return (1, n_heads, seq_len, seq_len)
return (1, n_heads, 1, seq_len)
elif prefix_lm or use_sequence_id:
return (1, 1, seq_len, seq_len)
return None
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
def build_attn_bias(
attn_impl,
attn_bias,
n_heads,
seq_len,
causal=False,
alibi=False,
alibi_bias_max=8,
):
if attn_impl == 'flash':
return None
elif attn_impl in ['torch', 'triton']:
if alibi:
# in place add alibi to attn bias
device, dtype = attn_bias.device, attn_bias.dtype
attn_bias = attn_bias.add(
build_alibi_bias(
n_heads,
seq_len,
full=not causal,
alibi_bias_max=alibi_bias_max,
device=device,
dtype=dtype,
))
return attn_bias
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
def gen_slopes(n_heads, alibi_bias_max=8, device=None):
_n_heads = 2**math.ceil(math.log2(n_heads))
m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
m = m.mul(alibi_bias_max / _n_heads)
slopes = (1. / torch.pow(2, m))
if _n_heads != n_heads:
# if n_heads is not a power of two,
# Huggingface and FasterTransformer calculate slopes normally,
# then return this strided concatenation of slopes
slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
return slopes.view(1, n_heads, 1, 1)
def build_alibi_bias(
n_heads,
seq_len,
full=False,
alibi_bias_max=8,
device=None,
dtype=None,
):
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32,
device=device).view(1, 1, 1, seq_len)
if full:
# generate 1 x Heads x SeqLen x SeqLen alibi bias mask
# otherwise the mask is 1 x Heads x 1 x SeqLen (which is broadcast to the appropriate size)
alibi_bias = alibi_bias - torch.arange(
1 - seq_len, 1, dtype=torch.int32, device=device).view(
1, 1, seq_len, 1)
alibi_bias = alibi_bias.abs().mul(-1)
slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
alibi_bias = alibi_bias * slopes
return alibi_bias.to(dtype=dtype)
ATTN_CLASS_REGISTRY = {
'multihead_attention': MultiheadAttention,
'multiquery_attention': MultiQueryAttention,
}
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/layers/attention.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import torch
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type == 'cuda':
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == 'cpu':
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
class LPLayerNorm(torch.nn.LayerNorm):
def __init__(
self,
normalized_shape,
eps=1e-05,
elementwise_affine=True,
device=None,
dtype=None,
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
device=device,
dtype=dtype,
)
def forward(self, x):
module_device = x.device
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(
self.weight) if self.weight is not None else self.weight
downcast_bias = _cast_if_autocast_enabled(
self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return torch.nn.functional.layer_norm(
downcast_x,
self.normalized_shape,
downcast_weight,
downcast_bias,
self.eps,
)
def rms_norm(x, weight=None, eps=1e-5):
output = x / torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
if weight is not None:
return output * weight
return output
class RMSNorm(torch.nn.Module):
def __init__(
self,
normalized_shape,
eps=1e-5,
weight=True,
dtype=None,
device=None,
):
super().__init__()
self.eps = eps
if weight:
self.weight = torch.nn.Parameter(
torch.ones(normalized_shape, dtype=dtype, device=device))
else:
self.register_parameter('weight', None)
def forward(self, x):
return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
class LPRMSNorm(RMSNorm):
def __init__(
self,
normalized_shape,
eps=1e-5,
weight=True,
dtype=None,
device=None,
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
weight=weight,
dtype=dtype,
device=device,
)
def forward(self, x):
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(
self.weight) if self.weight is not None else self.weight
with torch.autocast(enabled=False, device_type=x.device.type):
return rms_norm(downcast_x, downcast_weight,
self.eps).to(dtype=x.dtype)
NORM_CLASS_REGISTRY = {
'layernorm': torch.nn.LayerNorm,
'low_precision_layernorm': LPLayerNorm,
'rmsnorm': RMSNorm,
'low_precision_rmsnorm': LPRMSNorm,
}
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/layers/norm.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
from llmfoundry.models.layers.attention import (
ATTN_CLASS_REGISTRY, MultiheadAttention, MultiQueryAttention,
attn_bias_shape, build_alibi_bias, build_attn_bias, flash_attn_fn,
scaled_multihead_dot_product_attention, triton_flash_attn_fn)
from llmfoundry.models.layers.blocks import MPTMLP, MPTBlock
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY, LPLayerNorm
__all__ = [
'scaled_multihead_dot_product_attention',
'flash_attn_fn',
'triton_flash_attn_fn',
'MultiheadAttention',
'MultiQueryAttention',
'attn_bias_shape',
'build_attn_bias',
'build_alibi_bias',
'ATTN_CLASS_REGISTRY',
'MPTMLP',
'MPTBlock',
'NORM_CLASS_REGISTRY',
'LPLayerNorm',
]
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/layers/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""GPT Blocks used for the GPT Model."""
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
from llmfoundry.models.layers.attention import ATTN_CLASS_REGISTRY
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
class MPTMLP(nn.Module):
def __init__(self,
d_model: int,
expansion_ratio: int,
device: Optional[str] = None):
super().__init__()
self.up_proj = nn.Linear(d_model,
expansion_ratio * d_model,
device=device)
self.act = nn.GELU(approximate='none')
self.down_proj = nn.Linear(expansion_ratio * d_model,
d_model,
device=device)
self.down_proj._is_residual = True # type: ignore
def forward(self, x):
return self.down_proj(self.act(self.up_proj(x)))
class MPTBlock(nn.Module):
def __init__(
self,
d_model: int,
n_heads: int,
expansion_ratio: int,
attn_config: Dict = {
'attn_type': 'multihead_attention',
'attn_pdrop': 0.0,
'attn_impl': 'triton',
'qk_ln': False,
'clip_qkv': None,
'softmax_scale': None,
'prefix_lm': False,
'attn_uses_sequence_id': False,
'alibi': False,
'alibi_bias_max': 8,
},
resid_pdrop: float = 0.0,
norm_type: str = 'low_precision_layernorm',
verbose: int = 0,
device: Optional[str] = None,
**kwargs):
del kwargs # unused, just to capture any extra args from the config
super().__init__()
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
self.norm_1 = norm_class(d_model, device=device)
self.attn = attn_class(
attn_impl=attn_config['attn_impl'],
clip_qkv=attn_config['clip_qkv'],
qk_ln=attn_config['qk_ln'],
softmax_scale=attn_config['softmax_scale'],
attn_pdrop=attn_config['attn_pdrop'],
d_model=d_model,
n_heads=n_heads,
verbose=verbose,
device=device,
)
self.norm_2 = norm_class(d_model, device=device)
self.ffn = MPTMLP(
d_model=d_model,
expansion_ratio=expansion_ratio,
device=device,
)
self.resid_attn_dropout = nn.Dropout(resid_pdrop)
self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
def forward(
self,
x: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attn_bias: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.ByteTensor] = None,
is_causal: bool = True,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
a = self.norm_1(x)
b, _, past_key_value = self.attn(a,
past_key_value=past_key_value,
attn_bias=attn_bias,
attention_mask=attention_mask,
is_causal=is_causal)
x = x + self.resid_attn_dropout(b)
m = self.norm_2(x)
n = self.ffn(m)
x = x + self.resid_ffn_dropout(n)
return x, past_key_value
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/layers/blocks.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
from llmfoundry.models.mpt.configuration_mpt import MPTConfig
from llmfoundry.models.mpt.modeling_mpt import (ComposerMPTCausalLM,
MPTForCausalLM, MPTModel,
MPTPreTrainedModel)
__all__ = [
'MPTPreTrainedModel',
'MPTModel',
'MPTForCausalLM',
'ComposerMPTCausalLM',
'MPTConfig',
]
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/mpt/__init__.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
import math
import warnings
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from composer.metrics import (InContextLearningLMAccuracy,
InContextLearningLMExpectedCalibrationError,
InContextLearningMCExpectedCalibrationError,
InContextLearningMultipleChoiceAccuracy,
InContextLearningQAAccuracy)
from composer.metrics.nlp import LanguageCrossEntropy, LanguagePerplexity
from composer.models import HuggingFaceModel
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
from transformers import (PreTrainedModel, PreTrainedTokenizer,
PreTrainedTokenizerFast)
from transformers.modeling_outputs import (BaseModelOutputWithPast,
CausalLMOutputWithPast)
from llmfoundry.models.layers.attention import attn_bias_shape, build_attn_bias
from llmfoundry.models.layers.blocks import MPTBlock
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
from llmfoundry.models.mpt.configuration_mpt import MPTConfig
# NOTE: We import all the utils directly just so that HuggingFace will detect
# all the files that it needs to copy into its modules folder. Otherwise it misses
# the ones imported in the submodule
from llmfoundry.models.utils.adapt_tokenizer import (
AutoTokenizerForMOD, adapt_tokenizer_for_denoising)
from llmfoundry.models.utils.hf_prefixlm_converter import (
add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm)
from llmfoundry.models.utils.meta_init_context import init_empty_weights
from llmfoundry.models.utils.param_init_fns import ( # type: ignore
MODEL_INIT_REGISTRY, generic_param_init_fn_)
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(
f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).'
)
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
# CogView (https://arxiv.org/abs/2105.13290) and GLM-130B (https://arxiv.org/abs/2210.02414)
# both report this helping with stabilizing training
self.embedding_fraction = config.embedding_fraction
self.wte = nn.Embedding(config.vocab_size,
config.d_model,
device=config.init_device)
if not self.alibi:
self.wpe = nn.Embedding(config.max_seq_len,
config.d_model,
device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([
MPTBlock(
device=config.init_device,
**config.to_dict(),
) for _ in range(config.n_layers)
])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(
f'You are using {config.init_device=}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.'
)
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
# define attn mask
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(
self.attn_impl,
config.n_heads,
config.max_seq_len,
self.alibi,
prefix_lm=self.prefix_lm,
causal=self.is_causal,
use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(
module.bias, nn.Parameter):
if config.verbose:
warnings.warn(
f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
# Print verbose info
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self,
device,
dtype,
attention_mask: Optional[torch.ByteTensor] = None,
prefix_mask: Optional[torch.ByteTensor] = None,
sequence_id: Optional[torch.LongTensor] = None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape,
device=device,
dtype=dtype)
self.attn_bias = build_attn_bias(
self.attn_impl,
self.attn_bias,
self.config.n_heads,
self.config.max_seq_len,
causal=self.is_causal,
alibi=self.alibi,
alibi_bias_max=self.alibi_bias_max,
)
self._attn_bias_initialized = True
# flash does not support prefix_lm and will incorporate any
# attention_mask inside the attention module
if self.attn_impl == 'flash':
return self.attn_bias, attention_mask
if self.attn_bias is not None:
# .to(*args, **kwargs) is a no-op if tensor is already on
# specified device or of specificed dtype
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
# If using torch or triton, we incorporate the prefix_mask (if appropriate)
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor) # pyright
assert isinstance(prefix_mask, torch.Tensor) # pyright
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
# If using torch or triton, we incorporate sequence_id (if appropriate)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor) # pyright
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
# If using torch or triton, we incorporate attention_mask. This will output
# None in place of attention_mask since it will not be further needed in the
# attention modules.
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k),
device=device,
dtype=dtype)
else:
attn_bias = attn_bias[:, :, :, -s_k:]
if prefix_mask is not None and (attention_mask.shape !=
prefix_mask.shape):
raise ValueError(
f'attention_mask shape={attention_mask.shape} ' +\
f'and prefix_mask shape={prefix_mask.shape} are not equal.'
)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(
~attention_mask.view(-1, 1, 1, s_k), min_val)
return attn_bias, None
def _apply_prefix_mask(self, attn_bias: torch.Tensor,
prefix_mask: torch.Tensor):
s_k, s_q = attn_bias.shape[-2:]
if (s_k != self.config.max_seq_len) or (s_q != self.config.max_seq_len):
raise ValueError(
'attn_bias does not match the expected shape. ' +\
f'The last two dimensions should both be {self.config.max_length} ' +\
f'but are {s_k} and {s_q}.'
)
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(
f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}'
)
# select seq_len subset of attn mask
attn_bias = attn_bias[..., :seq_len, :seq_len]
# Mix the causal max and the bidirectional mask to get the full
# allowable attention (i.e. full = not accounting for padding yet)
causal = torch.tril(
torch.ones((seq_len, seq_len),
dtype=torch.bool,
device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor,
sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(
f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}'
)
# select seq_len subset of attn mask
attn_bias = attn_bias[..., :seq_len, :seq_len]
# Restrict attention to tokens that share the same value
# in sequence_id
cannot_attend = torch.logical_not(
torch.eq(sequence_id.view(-1, seq_len, 1),
sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
attention_mask: Optional[torch.ByteTensor] = None,
prefix_mask: Optional[torch.ByteTensor] = None,
sequence_id: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
use_cache: Optional[bool] = None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
# These args are passed in by keyword in huggingface's generate function
# https://github.com/huggingface/transformers/blob/68287689f2f0d8b7063c400230b3766987abf18d/src/transformers/generation/utils.py#L2201-L2206
# but have not yet been fully implemented in MPTModel
if not return_dict:
raise NotImplementedError(
'return_dict False is not implemented yet for MPT')
if output_attentions:
raise NotImplementedError(
'output_attentions is not implemented yet for MPT')
if attention_mask is not None and attention_mask[:, 0].sum(
) != attention_mask.shape[0] and self.training:
raise NotImplementedError(
'MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError(
'prefix_mask is a required argument when MPT is configured with prefix_lm=True.'
)
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError(
'sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' +\
'and the model is in train mode.'
)
elif (self.attn_uses_sequence_id is False) and (sequence_id
is not None):
warnings.warn(
'MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' +\
'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.'
)
S = input_ids.size(1)
assert (
S <= self.config.max_seq_len
), f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids) # type: ignore
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(
f'past_key_values must provide a past_key_value for each attention ' +\
f'layer in the network ({len(past_key_values)=}; {self.config.n_layers=}).'
)
# get the key tensor whose spec should be (batch, seq, dim), and
# collect the `seq`, so that the position embedding is shifted
past_position = past_key_values[0][0].size(1)
if S + past_position > self.config.max_seq_len:
raise ValueError(
f'Cannot forward input with past sequence length {past_position} and current sequence length '
f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.'
)
pos = torch.arange(past_position,
S + past_position,
dtype=torch.long,
device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
# adjust the position indices to account for padding tokens
pos = torch.clamp(pos - torch.cumsum(
(~attention_mask).to(torch.int32), dim=1)[:,
past_position:],
min=0)
pos_emb = self.wpe(pos) # type: ignore
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x) # type: ignore
else:
# this implementation is proposed on page 7 of the GLM-130B paper https://arxiv.org/abs/2210.02414
x_shrunk = (x * self.embedding_fraction) + (
x.detach() * (1 - self.embedding_fraction))
assert isinstance(self.emb_drop, nn.Module) # pyright
x = self.emb_drop(x_shrunk)
attn_bias, attention_mask = self._attn_bias(
device=x.device,
dtype=x.dtype,
attention_mask=attention_mask,
prefix_mask=prefix_mask,
sequence_id=sequence_id)
# initialize the past key values cache if it should be used
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)
] # type: ignore
all_hidden_states = () if output_hidden_states else None
for b_idx, block in enumerate(self.blocks): # type: ignore
if output_hidden_states:
assert all_hidden_states is not None # pyright
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[
b_idx] if past_key_values is not None else None
x, past_key_value = block(x,
past_key_value=past_key_value,
attn_bias=attn_bias,
attention_mask=attention_mask,
is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
x = self.norm_f(x) # type: ignore
return BaseModelOutputWithPast(
last_hidden_state=x,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
)
# Param Initialization, needed for device='meta' fast initialization
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
MODEL_INIT_REGISTRY[init_fn_name](module=module,
n_layers=self.config.n_layers,
d_model=self.config.d_model,
**self.config.init_config)
# FSDP Wrap function
def fsdp_wrap_fn(self, module):
return isinstance(module, MPTBlock)
# Activation Checkpointing
def activation_checkpointing_fn(self, module):
return isinstance(module, MPTBlock)
class MPTForCausalLM(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
super().__init__(config)
if not config.tie_word_embeddings:
raise ValueError(
'MPTForCausalLM only supports tied word embeddings')
self.transformer = MPTModel(config)
# enables scaling output logits; similar to a softmax "temperature"
# PaLM paper uses scale 1/sqrt(config.d_model)
self.logit_scale = None
if config.logit_scale is not None:
logit_scale = config.logit_scale
if isinstance(logit_scale, str):
if logit_scale == 'inv_sqrt_d_model':
logit_scale = 1 / math.sqrt(config.d_model)
else:
raise ValueError(
f"{logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
)
self.logit_scale = logit_scale
def get_input_embeddings(self):
return self.transformer.wte
def set_input_embeddings(self, value):
self.transformer.wte = value
def get_output_embeddings(self):
return self.transformer.wte
def set_output_embeddings(self, new_embeddings):
self.transformer.wte = new_embeddings
def set_decoder(self, decoder):
self.transformer = decoder
def get_decoder(self):
return self.transformer
def forward(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
attention_mask: Optional[torch.ByteTensor] = None,
prefix_mask: Optional[torch.ByteTensor] = None,
sequence_id: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
use_cache: Optional[bool] = None,
):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.transformer(input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
prefix_mask=prefix_mask,
sequence_id=sequence_id,
return_dict=return_dict,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache)
logits = F.linear(outputs.last_hidden_state,
self.transformer.wte.weight)
if self.logit_scale is not None:
if self.logit_scale == 0:
warnings.warn(
f'Multiplying logits by {self.logit_scale=}. This will produce uniform (uninformative) outputs.'
)
logits *= self.logit_scale
loss = None
if labels is not None:
labels = torch.roll(labels, shifts=-1)
labels[:, -1] = -100
loss = F.cross_entropy(logits.view(-1, logits.size(-1)),
labels.to(logits.device).view(-1))
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
)
# Param Initialization, needed for device='meta' fast initialization
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
MODEL_INIT_REGISTRY[init_fn_name](module=module,
n_layers=self.config.n_layers,
d_model=self.config.d_model,
**self.config.init_config)
# FSDP Wrap function
def fsdp_wrap_fn(self, module):
return isinstance(module, MPTBlock)
# Activation Checkpointing
def activation_checkpointing_fn(self, module):
return isinstance(module, MPTBlock)
def prepare_inputs_for_generation(self,
input_ids,
past_key_values=None,
inputs_embeds=None,
**kwargs):
if inputs_embeds is not None:
raise NotImplementedError(
'inputs_embeds is not implemented for MPT yet')
attention_mask = kwargs['attention_mask'].bool()
if attention_mask[:, -1].sum() != attention_mask.shape[0]:
raise NotImplementedError(
'MPT does not support generation with right padding.')
if self.transformer.attn_uses_sequence_id and self.training:
sequence_id = torch.zeros_like(input_ids[:1])
else:
sequence_id = None
if past_key_values is not None:
input_ids = input_ids[:, -1].unsqueeze(-1)
if self.transformer.prefix_lm:
# Leverage a convenience of sequential generation!
prefix_mask = torch.ones_like(attention_mask)
# This requires that we're using the cache
if kwargs.get('use_cache') == False:
raise NotImplementedError(
'MPT with prefix_lm=True does not support use_cache=False.')
else:
prefix_mask = None
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'prefix_mask': prefix_mask,
'sequence_id': sequence_id,
'past_key_values': past_key_values,
'use_cache': kwargs.get('use_cache', True),
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
"""Used by HuggingFace generate when using beam search with kv-caching.
See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
for an example in transformers.
"""
reordered_past = []
for layer_past in past_key_values:
reordered_past += [
tuple(
past_state.index_select(0, beam_idx)
for past_state in layer_past)
]
return reordered_past
class ComposerMPTCausalLM(HuggingFaceModel):
def __init__(
self,
om_model_config: DictConfig,
tokenizer: Optional[Tokenizer] = None,
):
resolved_om_model_config = om.to_container(om_model_config,
resolve=True)
hf_config = MPTConfig.from_dict(resolved_om_model_config)
model = MPTForCausalLM(hf_config)
train_metrics = [
LanguageCrossEntropy(hf_config.vocab_size),
LanguagePerplexity(hf_config.vocab_size)
]
eval_metrics = [
LanguageCrossEntropy(hf_config.vocab_size),
LanguagePerplexity(hf_config.vocab_size),
InContextLearningLMAccuracy(),
InContextLearningMultipleChoiceAccuracy(),
InContextLearningQAAccuracy(),
InContextLearningLMExpectedCalibrationError(),
InContextLearningMCExpectedCalibrationError()
]
super().__init__(
model=model,
tokenizer=tokenizer,
use_logits=True,
metrics=train_metrics,
eval_metrics=eval_metrics,
shift_labels=True,
allow_embedding_resizing=True,
)
self.n_active_params = sum(p.numel() for p in self.parameters())
loss_fn_config = om_model_config.get('loss_fn', 'fused_crossentropy')
if loss_fn_config == 'fused_crossentropy':
try:
from flash_attn.losses.cross_entropy import CrossEntropyLoss as FusedCrossEntropyLoss # type: ignore # isort: skip
if hf_config.verbose > 1:
warnings.warn('Using Fused Cross Entropy Loss.')
self.loss_fn = FusedCrossEntropyLoss(ignore_index=-100)
except:
raise ValueError(
'Fused Cross Entropy is not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]`, or (2) set your config model.loss_fn=torch_crossentropy.'
)
elif loss_fn_config == 'torch_crossentropy':
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100)
else:
raise ValueError(
f'Specified loss_fn={self.loss_fn} not recognized. `loss_fn` must be one of [`fused_crossentropy`, `torch_crossentropy`].'
)
def get_targets(self, batch):
targets = torch.roll(batch['labels'], shifts=-1)
targets[:, -1] = -100
return targets
def forward(self, batch):
if self.model.transformer.prefix_lm:
add_bidirectional_mask_if_missing(batch)
# Note: prefix_mask is only used if model.prefix_lm is True
return self.model(
input_ids=batch['input_ids'],
attention_mask=batch.get('attention_mask', None),
prefix_mask=batch.get('bidirectional_mask', None),
sequence_id=batch.get('sequence_id', None),
)
def loss(self, outputs, batch):
targets = self.get_targets(batch)
return self.loss_fn(outputs.logits.view(-1, outputs.logits.size(-1)),
targets.view(-1))
def flops_per_batch(self, batch):
# Note: this computation does not take into account padding, and assumes
# that the dataset has been constructed without padding. Additionally, we
# assume the backward pass is approximately 2x the forward pass
bs, msl = batch['input_ids'].shape[0:2]
params_flops_per_token = 2 * self.n_active_params
params_flops_per_seq = params_flops_per_token * msl
attn_flops_per_seq = self.model.config.n_layers * 2 * 2 * (
self.model.config.d_model * (msl**2))
return (params_flops_per_seq + attn_flops_per_seq) * 3 * bs
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/mpt/modeling_mpt.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""A HuggingFace-style model configuration."""
from typing import Dict, Optional, Union
from transformers import PretrainedConfig
attn_config_defaults: Dict = {
'attn_type': 'multihead_attention',
'attn_pdrop': 0.0,
'attn_impl': 'triton',
'qk_ln': False,
'clip_qkv': None,
'softmax_scale': None,
'prefix_lm': False,
'attn_uses_sequence_id': False,
'alibi': False,
'alibi_bias_max': 8,
}
init_config_defaults: Dict = {
'name': 'kaiming_normal_',
'fan_mode': 'fan_in',
'init_nonlinearity': 'relu',
}
class MPTConfig(PretrainedConfig):
model_type = 'mpt'
def __init__(
self,
d_model: int = 2048,
n_heads: int = 16,
n_layers: int = 24,
expansion_ratio: int = 4,
max_seq_len: int = 2048,
vocab_size: int = 50368,
resid_pdrop: float = 0.0,
emb_pdrop: float = 0.0,
learned_pos_emb: bool = True,
attn_config: Dict = attn_config_defaults,
init_device: str = 'cpu',
logit_scale: Optional[Union[float, str]] = None,
no_bias: bool = False,
verbose: int = 0,
embedding_fraction: float = 1.0,
norm_type: str = 'low_precision_layernorm',
use_cache: bool = False,
init_config: Dict = init_config_defaults,
**kwargs,
):
"""The MPT configuration class.
Args:
d_model (int): The size of the embedding dimension of the model.
n_heads (int): The number of attention heads.
n_layers (int): The number of layers in the model.
expansion_ratio (int): The ratio of the up/down scale in the MLP.
max_seq_len (int): The maximum sequence length of the model.
vocab_size (int): The size of the vocabulary.
resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
emb_pdrop (float): The dropout probability for the embedding layer.
learned_pos_emb (bool): Whether to use learned positional embeddings
attn_config (Dict): A dictionary used to configure the model's attention module:
attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
attn_pdrop (float): The dropout probability for the attention layers.
attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
this value.
softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
use the default scale of ``1/sqrt(d_keys)``.
prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
which sub-sequence each token belongs to.
Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
alibi (bool): Whether to use the alibi bias instead of position embeddings.
alibi_bias_max (int): The maximum value of the alibi bias.
init_device (str): The device to use for parameter initialization.
logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
no_bias (bool): Whether to use bias in all layers.
verbose (int): The verbosity level. 0 is silent.
embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
norm_type (str): choose type of norm to use
multiquery_attention (bool): Whether to use multiquery attention implementation.
use_cache (bool): Whether or not the model should return the last key/values attentions
init_config (Dict): A dictionary used to configure the model initialization:
init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
init_std (float): The standard deviation of the normal distribution used to initialize the model,
if using the baseline_ parameter initialization scheme.
init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
---
See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
"""
self.d_model = d_model
self.n_heads = n_heads
self.n_layers = n_layers
self.expansion_ratio = expansion_ratio
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.resid_pdrop = resid_pdrop
self.emb_pdrop = emb_pdrop
self.learned_pos_emb = learned_pos_emb
self.attn_config = attn_config
self.init_device = init_device
self.logit_scale = logit_scale
self.no_bias = no_bias
self.verbose = verbose
self.embedding_fraction = embedding_fraction
self.norm_type = norm_type
self.use_cache = use_cache
self.init_config = init_config
if 'name' in kwargs:
del kwargs['name']
if 'loss_fn' in kwargs:
del kwargs['loss_fn']
super().__init__(**kwargs)
self._validate_config()
def _set_config_defaults(self, config, config_defaults):
# set config defaults
for k, v in config_defaults.items():
if k not in config:
config[k] = v
return config
def _validate_config(self):
# set config defaults
self.attn_config = self._set_config_defaults(
self.attn_config,
attn_config_defaults,
)
self.init_config = self._set_config_defaults(
self.init_config,
init_config_defaults,
)
if self.d_model % self.n_heads != 0:
raise ValueError('d_model must be divisible by n_heads')
if any(
prob < 0 or prob > 1 for prob in
[self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop]):
raise ValueError(
"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1"
)
if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:
raise ValueError(
f"Unknown attn_impl={self.attn_config['attn_impl']}")
if self.attn_config['prefix_lm'] and self.attn_config[
'attn_impl'] not in ['torch', 'triton']:
raise NotImplementedError(
'prefix_lm only implemented with torch and triton attention.')
if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in [
'torch', 'triton'
]:
raise NotImplementedError(
'alibi only implemented with torch and triton attention.')
if self.attn_config['attn_uses_sequence_id'] and self.attn_config[
'attn_impl'] not in ['torch', 'triton']:
raise NotImplementedError(
'attn_uses_sequence_id only implemented with torch and triton attention.'
)
if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
raise ValueError(
'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'
)
if isinstance(self.logit_scale,
str) and self.logit_scale != 'inv_sqrt_d_model':
raise ValueError(
f"{self.logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
)
if self.init_config.get('name', None) is None:
raise ValueError(f"{self.init_config=} 'name' needs to be set.")
if not self.learned_pos_emb and not self.attn_config['alibi']:
raise ValueError(
f'Positional information must be provided to the model using either learned_pos_emb or alibi.'
)
| EXA-1-master | exa/libraries/llm-foundry/llmfoundry/models/mpt/configuration_mpt.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.