davanstrien HF Staff commited on
Commit
dea96c1
·
1 Parent(s): 6c47666

chore: Refactor generate_instruction_response function and optimize Gradio demo description

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -35,6 +35,10 @@ terminators = [
35
 
36
  @spaces.GPU
37
  def generate_instruction_response():
 
 
 
 
38
  instruction = pipeline(
39
  extract_input,
40
  max_new_tokens=2048,
@@ -48,8 +52,9 @@ def generate_instruction_response():
48
  len(extract_input) :
49
  ].split("\n")[0]
50
 
51
- first_rep = "## LLm generated instruction:\n\n" + sanitized_instruction
52
- yield first_rep + "\n\n generating LLM response..."
 
53
 
54
  response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
55
 
@@ -64,7 +69,7 @@ def generate_instruction_response():
64
 
65
  assistant_response = response[0]["generated_text"][len(response_template) :]
66
 
67
- yield (first_rep + "\n\n## LLM Generated response:\n\n" + assistant_response)
68
 
69
 
70
  title = "Magpie demo"
 
35
 
36
  @spaces.GPU
37
  def generate_instruction_response():
38
+ prompt_info = (
39
+ f"Generating a user prompt from the LLM via the template `{extract_input}`\n\n"
40
+ )
41
+ yield prompt_info
42
  instruction = pipeline(
43
  extract_input,
44
  max_new_tokens=2048,
 
52
  len(extract_input) :
53
  ].split("\n")[0]
54
 
55
+ first_step = "## LLM generated instruction:\n\n" + sanitized_instruction
56
+ first_step = prompt_info + first_step
57
+ yield first_step + "\n\n generating LLM response..."
58
 
59
  response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
60
 
 
69
 
70
  assistant_response = response[0]["generated_text"][len(response_template) :]
71
 
72
+ yield (first_step + "\n\n## LLM Generated response:\n\n" + assistant_response)
73
 
74
 
75
  title = "Magpie demo"