|
from argparse import ArgumentParser |
|
from multiprocessing import Pool |
|
|
|
from requests import HTTPError |
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
def get_args(): |
|
parser = ArgumentParser() |
|
|
|
parser.add_argument('--experiments', type=lambda s: s.split(','), required=True, help='Experiments we want to download.') |
|
|
|
parser.add_argument('--steps', type=lambda s: [int(item) for item in s.split(',')], required=True, help='Steps we should download the model checkpoints') |
|
return parser.parse_args() |
|
|
|
def _load_model(pretrain:str, revision: str): |
|
try: |
|
AutoModel.from_pretrained(pretrain, revision=revision) |
|
AutoTokenizer.from_pretrained(pretrain, revision=revision) |
|
return f"Loaded: {{pretrain:{pretrain}, revision:{revision}}}" |
|
except HTTPError: |
|
return f"Failed to load: {{pretrain:{pretrain}, revision:{revision}}}" |
|
|
|
def load_model(kwargs): |
|
return _load_model(**kwargs) |
|
|
|
def main(): |
|
args = get_args() |
|
pretrains = args.experiments |
|
steps = args.steps |
|
revisions = [f"global_step{step}" for step in steps] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for kwargs in [{"pretrain": pretrain, "revision": revision} for pretrain in pretrains for revision in revisions]: |
|
print(load_model(kwargs)) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|