gururise commited on
Commit
a100dd2
·
1 Parent(s): 1b9f1cc

update model cache location

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,8 +5,8 @@ import codecs
5
  from datetime import datetime
6
 
7
  import os
8
- os.environ['TRANSFORMERS_CACHE'] = '/data/.modelcache/huggingface/hub/'
9
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:516"
10
 
11
  from transformers import BloomTokenizerFast
12
  from petals.client import DistributedBloomForCausalLM
 
5
  from datetime import datetime
6
 
7
  import os
8
+ #os.environ['TRANSFORMERS_CACHE'] = '/data/.modelcache/huggingface/hub/'
9
+ #os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:516"
10
 
11
  from transformers import BloomTokenizerFast
12
  from petals.client import DistributedBloomForCausalLM