Harold-lkk commited on
Commit
ac1ea7e
·
1 Parent(s): 0f4b503

beautiful memory show

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -8,7 +8,6 @@ from mmgpt.models.builder import create_model_and_transforms
8
 
9
  TEMPLATE = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
10
  response_split = "### Response:"
11
- Prompt_Tutorial = "Model Inputs = {Prompt}({seperator}Image:\n<image> if image upload){seperator}{user_prefix}:\n{instruction}({sepertator}+Input:\n{history}){seperator}{ai_prefix}:\n"
12
 
13
 
14
  class Inferencer:
@@ -229,7 +228,8 @@ def bot(
229
  num_beams, temperature, top_k, top_p,
230
  do_sample)
231
  state.all_history[-1][-1] = inference_results
232
- memory_allocated = str(torch.cuda.memory_allocated() / 1024**3) + 'GB'
 
233
  return state, to_gradio_chatbot(state), "", None, inputs, memory_allocated
234
 
235
 
@@ -271,9 +271,6 @@ def build_conversation_demo():
271
  "Prompt",
272
  open=False,
273
  ):
274
- with gr.Accordion(
275
- "Click to hide the tutorial", open=False):
276
- gr.Markdown(Prompt_Tutorial)
277
  with gr.Row():
278
  ai_prefix = gr.Text("Response", label="AI Prefix")
279
  user_prefix = gr.Text(
@@ -367,7 +364,7 @@ if __name__ == "__main__":
367
  llama_path=llama_path,
368
  open_flamingo_path=open_flamingo_path,
369
  finetune_path=finetune_path)
370
- init_memory = str(torch.cuda.memory_allocated() / 1024**3) + 'GB'
371
  demo = build_conversation_demo()
372
  demo.queue(concurrency_count=3)
373
  IP = "0.0.0.0"
 
8
 
9
  TEMPLATE = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
10
  response_split = "### Response:"
 
11
 
12
 
13
  class Inferencer:
 
228
  num_beams, temperature, top_k, top_p,
229
  do_sample)
230
  state.all_history[-1][-1] = inference_results
231
+ memory_allocated = str(round(torch.cuda.memory_allocated() / 1024**3,
232
+ 2)) + 'GB'
233
  return state, to_gradio_chatbot(state), "", None, inputs, memory_allocated
234
 
235
 
 
271
  "Prompt",
272
  open=False,
273
  ):
 
 
 
274
  with gr.Row():
275
  ai_prefix = gr.Text("Response", label="AI Prefix")
276
  user_prefix = gr.Text(
 
364
  llama_path=llama_path,
365
  open_flamingo_path=open_flamingo_path,
366
  finetune_path=finetune_path)
367
+ init_memory = str(round(torch.cuda.memory_allocated() / 1024**3, 2)) + 'GB'
368
  demo = build_conversation_demo()
369
  demo.queue(concurrency_count=3)
370
  IP = "0.0.0.0"