dondoesstuff commited on
Commit
bc6741e
·
1 Parent(s): 84e069a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -36
app.py CHANGED
@@ -10,19 +10,19 @@ prompt_template = 'USER: {0}\nASSISTANT: '
10
  # Function to generate responses using the GPT-4 model with custom settings
11
  def generate_response(prompt, settings):
12
  # Extract settings from the input
13
- max_tokens = settings["max_tokens"]
14
- temp = settings["temp"]
15
- top_k = settings["top_k"]
16
- top_p = settings["top_p"]
17
- repeat_penalty = settings["repeat_penalty"]
18
- repeat_last_n = settings["repeat_last_n"]
19
- n_batch = settings["n_batch"]
20
- n_predict = settings["n_predict"]
21
- streaming = settings["streaming"]
22
-
23
  # Generate chat history and input prompt
24
  chat_history_with_prompt = prompt_template.format(prompt)
25
-
26
  # Generate response with custom settings
27
  response = model.generate(
28
  chat_history_with_prompt,
@@ -36,33 +36,29 @@ def generate_response(prompt, settings):
36
  n_predict=n_predict,
37
  streaming=streaming
38
  )
39
-
40
  return response
41
 
42
  # Initialize Gradio Interface
43
- with gr.Blocks() as chatbot_demo:
44
- with gr.Tab("Chat"):
45
- gr.Interface(
46
- fn=generate_response,
47
- inputs=[
48
- gr.inputs.Textbox(label="Chat Input", placeholder="Start the conversation..."),
49
- gr.inputs.Number(default=200, label="Max Tokens"),
50
- gr.inputs.Number(default=0.7, label="Temperature"),
51
- gr.inputs.Number(default=40, label="Top-k"),
52
- gr.inputs.Number(default=0.4, label="Top-p"),
53
- gr.inputs.Number(default=1.18, label="Repeat Penalty"),
54
- gr.inputs.Number(default=64, label="Repeat Last n"),
55
- gr.inputs.Number(default=8, label="Batch Size"),
56
- gr.inputs.Textbox(default="Auto", label="Number of Predictions"),
57
- gr.inputs.Checkbox(default=False, label="Streaming"),
58
- ],
59
- outputs=gr.outputs.Textbox(),
60
- title="GPT-4 Chatbot",
61
- description="Chat with the GPT-4 based chatbot. Configure generation settings and see the chat history for this session.",
62
- )
63
-
64
- with gr.Tab("Settings"):
65
- gr.Text("Settings tab content")
66
 
67
  # Launch Gradio Interface
68
- chatbot_demo.queue(concurrency_count=75).launch(debug=True)
 
 
10
  # Function to generate responses using the GPT-4 model with custom settings
11
  def generate_response(prompt, settings):
12
  # Extract settings from the input
13
+ max_tokens = settings[0]
14
+ temp = settings[1]
15
+ top_k = settings[2]
16
+ top_p = settings[3]
17
+ repeat_penalty = settings[4]
18
+ repeat_last_n = settings[5]
19
+ n_batch = settings[6]
20
+ n_predict = settings[7]
21
+ streaming = settings[8]
22
+
23
  # Generate chat history and input prompt
24
  chat_history_with_prompt = prompt_template.format(prompt)
25
+
26
  # Generate response with custom settings
27
  response = model.generate(
28
  chat_history_with_prompt,
 
36
  n_predict=n_predict,
37
  streaming=streaming
38
  )
39
+
40
  return response
41
 
42
  # Initialize Gradio Interface
43
+ interface = gr.Interface(
44
+ fn=generate_response,
45
+ inputs=[
46
+ gr.inputs.Textbox(label="Chat Input", placeholder="Start the conversation..."),
47
+ gr.inputs.Number(default=200, label="Max Tokens"),
48
+ gr.inputs.Number(default=0.7, label="Temperature"),
49
+ gr.inputs.Number(default=40, label="Top-k"),
50
+ gr.inputs.Number(default=0.4, label="Top-p"),
51
+ gr.inputs.Number(default=1.18, label="Repeat Penalty"),
52
+ gr.inputs.Number(default=64, label="Repeat Last n"),
53
+ gr.inputs.Number(default=8, label="Batch Size"),
54
+ gr.inputs.Textbox(default="Auto", label="Number of Predictions"),
55
+ gr.inputs.Checkbox(default=False, label="Streaming"),
56
+ ],
57
+ outputs=gr.outputs.Textbox(),
58
+ title="GPT-4 Chatbot",
59
+ description="Chat with the GPT-4 based chatbot. Configure generation settings and see the chat history for this session.",
60
+ )
 
 
 
 
 
61
 
62
  # Launch Gradio Interface
63
+ if __name__ == "__main__":
64
+ interface.launch(debug=True)