sparkleman commited on
Commit
6b82cc0
·
1 Parent(s): 43cae0b
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,7 +5,7 @@ if os.environ.get("MODELSCOPE_ENVIRONMENT") == "studio":
5
 
6
  patch_hub()
7
 
8
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
9
 
10
 
11
  from config import CONFIG, ModelConfig
@@ -247,7 +247,7 @@ def generate(
247
  if token == 0 and token in request.stop_tokens:
248
  yield {
249
  "content": "".join(cache_word_list),
250
- "tokens": out_tokens[out_last :],
251
  "finish_reason": "stop:token:0",
252
  "state": model_state,
253
  }
@@ -266,7 +266,7 @@ def generate(
266
  if token in request.stop_tokens:
267
  yield {
268
  "content": "".join(cache_word_list),
269
- "tokens": out_tokens[out_last :],
270
  "finish_reason": f"stop:token:{token}",
271
  "state": model_state,
272
  }
 
5
 
6
  patch_hub()
7
 
8
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:256"
9
 
10
 
11
  from config import CONFIG, ModelConfig
 
247
  if token == 0 and token in request.stop_tokens:
248
  yield {
249
  "content": "".join(cache_word_list),
250
+ "tokens": out_tokens[out_last:],
251
  "finish_reason": "stop:token:0",
252
  "state": model_state,
253
  }
 
266
  if token in request.stop_tokens:
267
  yield {
268
  "content": "".join(cache_word_list),
269
+ "tokens": out_tokens[out_last:],
270
  "finish_reason": f"stop:token:{token}",
271
  "state": model_state,
272
  }