Spaces:
Runtime error
Runtime error
Commit
·
6ba5e8d
0
Parent(s):
Duplicate from leifive/chatgpt
Browse filesCo-authored-by: Yuren Wu <[email protected]>
- .gitattributes +34 -0
- README.md +14 -0
- app.py +45 -0
- chat_completion.py +62 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: AI ChatGPT
|
3 |
+
emoji: 📚
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.23.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
duplicated_from: leifive/chatgpt
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
from chat_completion import ChatCompletion
|
7 |
+
|
8 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
9 |
+
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--share', action='store_true', default=False)
|
12 |
+
parser.add_argument('--welcome', type=str, default='Say something to ChatGPT here ...')
|
13 |
+
parser.add_argument('--title', type=str, default='ChatGPT')
|
14 |
+
parser.add_argument('--setting', type=str, default=None)
|
15 |
+
args = parser.parse_args()
|
16 |
+
|
17 |
+
bot = ChatCompletion(api_key=api_key)
|
18 |
+
|
19 |
+
with gr.Blocks(title=args.title) as demo:
|
20 |
+
chatbot = gr.Chatbot(show_label=False)
|
21 |
+
msg = gr.TextArea(show_label=False, placeholder=args.welcome)
|
22 |
+
send_btn = gr.Button('Send')
|
23 |
+
retry_btn = gr.Button('Retry')
|
24 |
+
reset_btn = gr.Button('Reset')
|
25 |
+
|
26 |
+
def send(user_message, history):
|
27 |
+
if not user_message:
|
28 |
+
return '', history
|
29 |
+
|
30 |
+
response = bot(user_message, setting=args.setting) if user_message != 'retry' else bot.retry()
|
31 |
+
return '', history + [[user_message, response]]
|
32 |
+
|
33 |
+
def reset():
|
34 |
+
bot.reset()
|
35 |
+
return None, [[None, None]]
|
36 |
+
|
37 |
+
def retry(history):
|
38 |
+
return send('retry', history)
|
39 |
+
|
40 |
+
send_btn.click(send, inputs=[msg, chatbot], outputs=[msg, chatbot], show_progress=True)
|
41 |
+
reset_btn.click(reset, inputs=None, outputs=[msg, chatbot])
|
42 |
+
retry_btn.click(retry, inputs=chatbot, outputs=[msg, chatbot])
|
43 |
+
|
44 |
+
|
45 |
+
demo.launch(share=args.share)
|
chat_completion.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import linecache
|
2 |
+
import re
|
3 |
+
from typing import Dict, List, Optional
|
4 |
+
|
5 |
+
import openai
|
6 |
+
|
7 |
+
|
8 |
+
class ChatCompletion:
|
9 |
+
def __init__(self, model: str = 'gpt-3.5-turbo',
|
10 |
+
api_key: Optional[str] = None, api_key_path: str = './openai_api_key'):
|
11 |
+
if api_key is None:
|
12 |
+
openai.api_key = api_key
|
13 |
+
api_key = linecache.getline(api_key_path, 2).strip('\n')
|
14 |
+
if len(api_key) == 0:
|
15 |
+
raise EnvironmentError
|
16 |
+
openai.api_key = api_key
|
17 |
+
|
18 |
+
self.model = model
|
19 |
+
self.system_messages = []
|
20 |
+
self.user_messages = []
|
21 |
+
|
22 |
+
def chat(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str:
|
23 |
+
if self._context_length() > 2048:
|
24 |
+
self.reset()
|
25 |
+
if setting is not None:
|
26 |
+
if setting not in self.system_messages:
|
27 |
+
self.system_messages.append(setting)
|
28 |
+
if not self.user_messages or msg != self.user_messages[-1]:
|
29 |
+
self.user_messages.append(msg)
|
30 |
+
|
31 |
+
return self._run(model)
|
32 |
+
|
33 |
+
def retry(self, model: Optional[str] = None) -> str:
|
34 |
+
return self._run(model)
|
35 |
+
|
36 |
+
def reset(self):
|
37 |
+
self.system_messages.clear()
|
38 |
+
self.user_messages.clear()
|
39 |
+
|
40 |
+
def _make_message(self) -> List[Dict]:
|
41 |
+
sys_messages = [{'role': 'system', 'content': msg} for msg in self.system_messages]
|
42 |
+
user_messages = [{'role': 'user', 'content': msg} for msg in self.user_messages]
|
43 |
+
return sys_messages + user_messages
|
44 |
+
|
45 |
+
def _context_length(self) -> int:
|
46 |
+
return len(''.join(self.system_messages)) + len(''.join(self.user_messages))
|
47 |
+
|
48 |
+
def _run(self, model: Optional[str] = None) -> str:
|
49 |
+
if model is None:
|
50 |
+
model = self.model
|
51 |
+
try:
|
52 |
+
response = openai.ChatCompletion.create(model=model, messages=self._make_message())
|
53 |
+
ans = response['choices'][0]['message']['content']
|
54 |
+
ans = re.sub(r'^\n+', '', ans)
|
55 |
+
except openai.error.OpenAIError as e:
|
56 |
+
ans = e
|
57 |
+
except Exception as e:
|
58 |
+
print(e)
|
59 |
+
return ans
|
60 |
+
|
61 |
+
def __call__(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str:
|
62 |
+
return self.chat(msg, setting, model)
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai
|