zuleleee commited on
Commit
bf78b42
·
1 Parent(s): b6dea63
Files changed (2) hide show
  1. main.py +127 -0
  2. requirements.txt +0 -0
main.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import os
4
+
5
+ os.environ["PINECONE_API_KEY"] = "cd3256b8-4f19-4e35-b92a-1a8b32af0472"
6
+ os.environ["PINECONE_ENV"] = "asia-southeast1-gcp-free"
7
+
8
+ # Set your OpenAI GPT-3 API key
9
+ os.environ["OPENAI_API_KEY"] = "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"
10
+
11
+ from langchain.embeddings.openai import OpenAIEmbeddings
12
+ from langchain.text_splitter import CharacterTextSplitter
13
+ from langchain.vectorstores import Pinecone
14
+ from langchain.document_loaders.csv_loader import CSVLoader
15
+
16
+ # loader = CSVLoader(file_path="products_231022 - Products.csv", encoding="utf8")
17
+
18
+ # documents = loader.load()
19
+
20
+ # text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
21
+ # docs = text_splitter.split_documents(documents)
22
+
23
+ embeddings = OpenAIEmbeddings(openai_api_key = os.environ["OPENAI_API_KEY"])
24
+
25
+ import pinecone
26
+
27
+ # initialize pinecone
28
+ pinecone.init(
29
+ api_key=os.getenv("PINECONE_API_KEY"), # find at app.pinecone.io
30
+ environment=os.getenv("PINECONE_ENV"), # next to api key in console
31
+ )
32
+
33
+ index_name = "chatbot"
34
+
35
+ vectordb = Pinecone.from_existing_index(index_name, embeddings)
36
+
37
+ from langchain.memory import ConversationBufferMemory
38
+ from langchain.chains import ConversationalRetrievalChain
39
+ from langchain.chat_models import ChatOpenAI
40
+
41
+ # # Define a function to generate responses using GPT-3
42
+ # def chatbot(input_text):
43
+
44
+
45
+
46
+ # # from langchain.chat_models import ChatOpenAI
47
+ # # llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)
48
+ # # llm.predict("Hello world!")
49
+
50
+ # # completion = openai.ChatCompletion.create(
51
+ # # model="gpt-3.5-turbo",
52
+ # # max_tokens=50,
53
+ # # api_key=api_key,
54
+ # # messages=[
55
+ # # {"role": "user", "content": input_text}
56
+ # # ]
57
+ # # )
58
+ # return chain.run({'question': input_text})
59
+
60
+ # # Create a Gradio interface
61
+ # chatbot_interface = gr.Interface(
62
+ # fn=chatbot,
63
+ # inputs="text",
64
+ # outputs="text",
65
+ # title="Chatbot",
66
+ # )
67
+
68
+ # # Start the Gradio app
69
+ # chatbot_interface.launch(share=True)
70
+
71
+
72
+ import gradio as gr
73
+ import openai
74
+ import os
75
+
76
+ openai.api_key = os.getenv('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')
77
+
78
+
79
+ class Conversation:
80
+ def __init__(self, num_of_round):
81
+ self.num_of_round = num_of_round
82
+ self.messages = []
83
+
84
+ def ask(self, question):
85
+ try:
86
+ self.messages.append({"role": "user", "content": question})
87
+ retriever = vectordb.as_retriever()
88
+ llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0, openai_api_key = os.environ["OPENAI_API_KEY"])
89
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages= True)
90
+ chain = ConversationalRetrievalChain.from_llm(llm, retriever= retriever, memory= memory)
91
+ response = chain.run({'question': question})
92
+ except Exception as e:
93
+ print(e)
94
+ return e
95
+
96
+ message = response
97
+
98
+ # 最新的答案拼接进 messages
99
+ self.messages.append({"role": "assistant", "content": message})
100
+
101
+ if len(self.messages) > self.num_of_round*2 + 1:
102
+ del self.messages[1:3] # Remove the first round conversation left.
103
+ return message
104
+
105
+
106
+
107
+ conv = Conversation(10)
108
+
109
+
110
+ def answer(question, history=[]):
111
+ history.append(question)
112
+ response = conv.ask(question)
113
+ history.append(response)
114
+ responses = [(u, b) for u, b in zip(history[::2], history[1::2])]
115
+ return responses, history
116
+
117
+
118
+ with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo:
119
+ chatbot = gr.Chatbot(elem_id="chatbot")
120
+ state = gr.State([])
121
+
122
+ with gr.Row():
123
+ txt = gr.Textbox(show_label=False, placeholder="Enter question and press enter")
124
+
125
+ txt.submit(answer, [txt, state], [chatbot, state])
126
+
127
+ demo.launch()
requirements.txt ADDED
Binary file (3.26 kB). View file