Tonic commited on
Commit
21a3eb8
·
1 Parent(s): ffba568

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -105,16 +105,15 @@ def _parse_text(text):
105
 
106
  def save_image(image_file, upload_dir):
107
  Path(upload_dir).mkdir(parents=True, exist_ok=True)
108
- filename = secrets.token_hex(10) + Path(image_file.name).suffix
109
- file_path = Path(upload_dir) / filename
110
- if isinstance(image_file, gr.NamedString):
111
- shutil.copy(image_file.name, file_path)
112
- else:
113
  with open(file_path, "wb") as f:
114
  f.write(image_file.read())
115
- return str(file_path)
 
 
116
 
117
-
118
  def add_file(history, task_history, file):
119
  if file is None:
120
  return history, task_history
@@ -136,14 +135,11 @@ def _launch_demo(args, model, tokenizer):
136
  query = [{'image': chat_query[0]}]
137
  else:
138
  query = [{'text': _parse_text(chat_query)}]
139
-
140
  inputs = tokenizer.from_list_format(query)
141
  tokenized_inputs = tokenizer(inputs, return_tensors='pt')
142
  tokenized_inputs = tokenized_inputs.to(model.device)
143
-
144
  pred = model.generate(**tokenized_inputs)
145
  response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False)
146
-
147
  if 'image' in query[0]:
148
  image = tokenizer.draw_bbox_on_latest_picture(response)
149
  if image is not None:
 
105
 
106
  def save_image(image_file, upload_dir):
107
  Path(upload_dir).mkdir(parents=True, exist_ok=True)
108
+ if image_file is not None:
109
+ filename = secrets.token_hex(10) + Path(image_file.name).suffix
110
+ file_path = Path(upload_dir) / filename
 
 
111
  with open(file_path, "wb") as f:
112
  f.write(image_file.read())
113
+ return str(file_path)
114
+ else:
115
+ return None
116
 
 
117
  def add_file(history, task_history, file):
118
  if file is None:
119
  return history, task_history
 
135
  query = [{'image': chat_query[0]}]
136
  else:
137
  query = [{'text': _parse_text(chat_query)}]
 
138
  inputs = tokenizer.from_list_format(query)
139
  tokenized_inputs = tokenizer(inputs, return_tensors='pt')
140
  tokenized_inputs = tokenized_inputs.to(model.device)
 
141
  pred = model.generate(**tokenized_inputs)
142
  response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False)
 
143
  if 'image' in query[0]:
144
  image = tokenizer.draw_bbox_on_latest_picture(response)
145
  if image is not None: