Bhaskar2611 commited on
Commit
2088584
·
verified ·
1 Parent(s): 53cff8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -17
app.py CHANGED
@@ -1,3 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
@@ -15,21 +112,6 @@ def format_prompt(message, history):
15
  prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
 
18
- # def format_prompt(message, history):
19
- # prompt = "<s>"
20
- # # Add the system instruction
21
- # prompt += "[INST] You are an AI Dermatologist chatbot designed to assist users with skin and hair care by only providing text. "
22
- # prompt += "If the user hasn't provided sufficient information, ask them what they want to know related to skin and hair. [/INST]"
23
-
24
- # # Optionally include history (if relevant for the conversation context)
25
- # for user_input, bot_response in history:
26
- # prompt += f" {bot_response}" # Only include bot responses, not user inputs
27
-
28
- # # Append the user's latest message to generate a response, but do not include it in the final prompt
29
- # prompt += f" {message}"
30
-
31
- # return prompt
32
-
33
  # Function to generate responses with the AI Dermatologist context
34
  def generate(
35
  prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
@@ -61,7 +143,7 @@ def generate(
61
  return output
62
 
63
  # Customizable input controls for the chatbot interface
64
- additional_inputs = [
65
  gr.Slider(
66
  label="Temperature",
67
  value=0.9,
@@ -104,7 +186,7 @@ additional_inputs = [
104
  gr.ChatInterface(
105
  fn=generate,
106
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
107
- additional_inputs=additional_inputs,
108
  title="AI Dermatologist"
109
  ).launch(show_api=False)
110
 
 
1
+ # import gradio as gr
2
+ # from huggingface_hub import InferenceClient
3
+
4
+ # # Initialize the client with your desired model
5
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
+
7
+ # # Define the system prompt as an AI Dermatologist
8
+ # def format_prompt(message, history):
9
+ # prompt = "<s>"
10
+ # # Start the conversation with a system message
11
+ # prompt += "[INST] You are an AI Dermatologist chatbot designed to assist users with skin and hair care by only providing text and if user information is not provided related to skin or hair then ask what they want to know related to skin and hair.[/INST]"
12
+ # for user_prompt, bot_response in history:
13
+ # prompt += f"[INST] {user_prompt} [/INST]"
14
+ # prompt += f" {bot_response}</s> "
15
+ # prompt += f"[INST] {message} [/INST]"
16
+ # return prompt
17
+
18
+ # # Function to generate responses with the AI Dermatologist context
19
+ # def generate(
20
+ # prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
21
+ # ):
22
+ # temperature = float(temperature)
23
+ # if temperature < 1e-2:
24
+ # temperature = 1e-2
25
+ # top_p = float(top_p)
26
+
27
+ # generate_kwargs = dict(
28
+ # temperature=temperature,
29
+ # max_new_tokens=max_new_tokens,
30
+ # top_p=top_p,
31
+ # repetition_penalty=repetition_penalty,
32
+ # do_sample=True,
33
+ # seed=42,
34
+ # )
35
+
36
+ # formatted_prompt = format_prompt(prompt, history)
37
+
38
+ # stream = client.text_generation(
39
+ # formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
40
+ # )
41
+ # output = ""
42
+
43
+ # for response in stream:
44
+ # output += response.token.text
45
+ # yield output
46
+ # return output
47
+
48
+ # # Customizable input controls for the chatbot interface
49
+ # additional_inputs = [
50
+ # gr.Slider(
51
+ # label="Temperature",
52
+ # value=0.9,
53
+ # minimum=0.0,
54
+ # maximum=1.0,
55
+ # step=0.05,
56
+ # interactive=True,
57
+ # info="Higher values produce more diverse outputs",
58
+ # ),
59
+ # gr.Slider(
60
+ # label="Max new tokens",
61
+ # value=256,
62
+ # minimum=0,
63
+ # maximum=1048,
64
+ # step=64,
65
+ # interactive=True,
66
+ # info="The maximum numbers of new tokens",
67
+ # ),
68
+ # gr.Slider(
69
+ # label="Top-p (nucleus sampling)",
70
+ # value=0.90,
71
+ # minimum=0.0,
72
+ # maximum=1,
73
+ # step=0.05,
74
+ # interactive=True,
75
+ # info="Higher values sample more low-probability tokens",
76
+ # ),
77
+ # gr.Slider(
78
+ # label="Repetition penalty",
79
+ # value=1.2,
80
+ # minimum=1.0,
81
+ # maximum=2.0,
82
+ # step=0.05,
83
+ # interactive=True,
84
+ # info="Penalize repeated tokens",
85
+ # )
86
+ # ]
87
+
88
+ # # Define the chatbot interface with the starting system message as AI Dermatologist
89
+ # gr.ChatInterface(
90
+ # fn=generate,
91
+ # chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
92
+ # additional_inputs=additional_inputs,
93
+ # title="AI Dermatologist"
94
+ # ).launch(show_api=False)
95
+
96
+ # # Load your model after launching the interface
97
+ # gr.load("models/Bhaskar2611/Capstone").launch()
98
  import gradio as gr
99
  from huggingface_hub import InferenceClient
100
 
 
112
  prompt += f"[INST] {message} [/INST]"
113
  return prompt
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  # Function to generate responses with the AI Dermatologist context
116
  def generate(
117
  prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
 
143
  return output
144
 
145
  # Customizable input controls for the chatbot interface
146
+ Settings = [
147
  gr.Slider(
148
  label="Temperature",
149
  value=0.9,
 
186
  gr.ChatInterface(
187
  fn=generate,
188
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
189
+ additional_inputs= Settings,
190
  title="AI Dermatologist"
191
  ).launch(show_api=False)
192