Bhaskar2611 commited on
Commit
4f02b1c
·
verified ·
1 Parent(s): 79a62a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -27
app.py CHANGED
@@ -87,37 +87,100 @@
87
  # if __name__ == "__main__":
88
  # demo.launch()
89
 
90
- import os # Import the os module
91
- import time
92
- import gradio as gr
93
- from langchain_community.chat_models import ChatOpenAI # Updated import based on deprecation warning
94
- from langchain.schema import AIMessage, HumanMessage
95
- import openai
96
-
97
- # Set your OpenAI API key
98
- os.environ["OPENAI_API_KEY"] = "sk-3_mJiR5z9Q3XN-D33cgrAIYGffmMvHfu5Je1U0CW1ZT3BlbkFJA2vfSvDqZAVUyHo2JIcU91XPiAq424OSS8ci29tWMA" # Replace with your OpenAI key
99
-
100
- # Initialize ChatOpenAI
101
- llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
102
-
103
- def predict(message, history):
104
- # Reformat history for LangChain
105
- history_langchain_format = []
106
- for human, ai in history:
107
- history_langchain_format.append(HumanMessage(content=human))
108
- history_langchain_format.append(AIMessage(content=ai))
109
 
110
- # Add latest human message
111
- history_langchain_format.append(HumanMessage(content=message))
112
 
113
- # Get response from the model
114
- gpt_response = llm(history_langchain_format)
115
 
116
- # Return response
117
- return gpt_response.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- # Using ChatInterface to create a chat-style UI
120
- demo = gr.Interface(fn=predict, type="messages")
121
 
122
  if __name__ == "__main__":
123
  demo.launch()
@@ -134,3 +197,4 @@ if __name__ == "__main__":
134
 
135
 
136
 
 
 
87
  # if __name__ == "__main__":
88
  # demo.launch()
89
 
90
+ # import os # Import the os module
91
+ # import time
92
+ # import gradio as gr
93
+ # from langchain_community.chat_models import ChatOpenAI # Updated import based on deprecation warning
94
+ # from langchain.schema import AIMessage, HumanMessage
95
+ # import openai
96
+
97
+ # # Set your OpenAI API key
98
+ # os.environ["OPENAI_API_KEY"] = "sk-3_mJiR5z9Q3XN-D33cgrAIYGffmMvHfu5Je1U0CW1ZT3BlbkFJA2vfSvDqZAVUyHo2JIcU91XPiAq424OSS8ci29tWMA" # Replace with your OpenAI key
99
+
100
+ # # Initialize ChatOpenAI
101
+ # llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
102
+
103
+ # def predict(message, history):
104
+ # # Reformat history for LangChain
105
+ # history_langchain_format = []
106
+ # for human, ai in history:
107
+ # history_langchain_format.append(HumanMessage(content=human))
108
+ # history_langchain_format.append(AIMessage(content=ai))
109
 
110
+ # # Add latest human message
111
+ # history_langchain_format.append(HumanMessage(content=message))
112
 
113
+ # # Get response from the model
114
+ # gpt_response = llm(history_langchain_format)
115
 
116
+ # # Return response
117
+ # return gpt_response.content
118
+
119
+ # # Using ChatInterface to create a chat-style UI
120
+ # demo = gr.ChatInterface(fn=predict, type="messages")
121
+ # if __name__ == "__main__":
122
+ # demo.launch()
123
+ import gradio as gr
124
+ from huggingface_hub import InferenceClient
125
+
126
+ """
127
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
128
+ """
129
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
130
+
131
+
132
+ def respond(
133
+ message,
134
+ history: list[tuple[str, str]],
135
+ system_message,
136
+ max_tokens,
137
+ temperature,
138
+ top_p,
139
+ ):
140
+ messages = [{"role": "system", "content": system_message}]
141
+
142
+ for val in history:
143
+ if val[0]:
144
+ messages.append({"role": "user", "content": val[0]})
145
+ if val[1]:
146
+ messages.append({"role": "assistant", "content": val[1]})
147
+
148
+ messages.append({"role": "user", "content": message})
149
+
150
+ response = ""
151
+
152
+ for message in client.chat_completion(
153
+ messages,
154
+ max_tokens=max_tokens,
155
+ stream=True,
156
+ temperature=temperature,
157
+ top_p=top_p,
158
+ ):
159
+ token = message.choices[0].delta.content
160
+
161
+ response += token
162
+ yield response
163
+
164
+
165
+ """
166
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
167
+ """
168
+ demo = gr.ChatInterface(
169
+ respond,
170
+ additional_inputs=[
171
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
172
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
173
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
174
+ gr.Slider(
175
+ minimum=0.1,
176
+ maximum=1.0,
177
+ value=0.95,
178
+ step=0.05,
179
+ label="Top-p (nucleus sampling)",
180
+ ),
181
+ ],
182
+ )
183
 
 
 
184
 
185
  if __name__ == "__main__":
186
  demo.launch()
 
197
 
198
 
199
 
200
+