Spaces:
Sleeping
Sleeping
File size: 5,275 Bytes
e675232 26c3bae d4c7697 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 d4c7697 e675232 d4c7697 e675232 26c3bae e675232 26c3bae e675232 26c3bae e675232 6da0a2d e675232 26c3bae e675232 d4c7697 26c3bae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
# from huggingface_hub import InferenceClient
# import gradio as gr
# client = InferenceClient(
# "mistralai/Mistral-7B-Instruct-v0.3"
# )
# def format_prompt(message, history):
# prompt = "<s>"
# for user_prompt, bot_response in history:
# prompt += f"[INST] {user_prompt} [/INST]"
# prompt += f" {bot_response}</s> "
# prompt += f"[INST] {message} [/INST]"
# return prompt
# def generate(
# prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
# ):
# temperature = float(temperature)
# if temperature < 1e-2:
# temperature = 1e-2
# top_p = float(top_p)
# generate_kwargs = dict(
# temperature=temperature,
# max_new_tokens=max_new_tokens,
# top_p=top_p,
# repetition_penalty=repetition_penalty,
# do_sample=True,
# seed=42,
# )
# formatted_prompt = format_prompt(prompt, history)
# stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
# output = ""
# for response in stream:
# output += response.token.text
# yield output
# return output
# additional_inputs=[
# gr.Slider(
# label="Temperature",
# value=0.9,
# minimum=0.0,
# maximum=1.0,
# step=0.05,
# interactive=True,
# info="Higher values produce more diverse outputs",
# ),
# gr.Slider(
# label="Max new tokens",
# value=256,
# minimum=0,
# maximum=1048,
# step=64,
# interactive=True,
# info="The maximum numbers of new tokens",
# ),
# gr.Slider(
# label="Top-p (nucleus sampling)",
# value=0.90,
# minimum=0.0,
# maximum=1,
# step=0.05,
# interactive=True,
# info="Higher values sample more low-probability tokens",
# ),
# gr.Slider(
# label="Repetition penalty",
# value=1.2,
# minimum=1.0,
# maximum=2.0,
# step=0.05,
# interactive=True,
# info="Penalize repeated tokens",
# )
# ]
# gr.ChatInterface(
# fn=generate,
# chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
# additional_inputs=additional_inputs,
# title="""AI Dermatologist"""
# ).launch(show_api=False)
# gr.load("models/Bhaskar2611/Capstone").launch()
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the client with your desired model
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Define the system prompt as an AI Dermatologist
def format_prompt(message, history):
prompt = "<s>"
# Start the conversation with a system message
prompt += "[INST] You are an AI Dermatologist designed to assist users with skin and hair care.[/INST]"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
# Function to generate responses with the AI Dermatologist context
def generate(
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
):
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
)
output = ""
for response in stream:
output += response.token.text
yield output
return output
# Customizable input controls for the chatbot interface
additional_inputs = [
gr.Slider(
label="Temperature",
value=0.9,
minimum=0.0,
maximum=1.0,
step=0.05,
interactive=True,
info="Higher values produce more diverse outputs",
),
gr.Slider(
label="Max new tokens",
value=256,
minimum=0,
maximum=1048,
step=64,
interactive=True,
info="The maximum numbers of new tokens",
),
gr.Slider(
label="Top-p (nucleus sampling)",
value=0.90,
minimum=0.0,
maximum=1,
step=0.05,
interactive=True,
info="Higher values sample more low-probability tokens",
),
gr.Slider(
label="Repetition penalty",
value=1.2,
minimum=1.0,
maximum=2.0,
step=0.05,
interactive=True,
info="Penalize repeated tokens",
)
]
# Define the chatbot interface with the starting system message as AI Dermatologist
gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
additional_inputs=additional_inputs,
title="AI Dermatologist"
).launch(show_api=False)
# Load your model after launching the interface
gr.load("models/Bhaskar2611/Capstone").launch()
|