rodrigomasini commited on
Commit
04e7d93
·
1 Parent(s): 4eca6b5

Create utils.py

Browse files
Files changed (1) hide show
  1. utils.py +121 -0
utils.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from langchain.chat_models import ChatOpenAI
3
+ from langchain.schema import (
4
+ AIMessage,
5
+ HumanMessage,
6
+ SystemMessage
7
+ )
8
+
9
+ def get_api_response(content: str, max_tokens=None):
10
+
11
+ chat = ChatOpenAI(
12
+ openai_api_key=OPENAI_API_KEY,
13
+ #model='gpt-3',
14
+ #model='gpt-3.5-turbo',
15
+ #model='gpt-3.5-turbo-0613',
16
+ #model='gpt-3.5-turbo-16k',
17
+ model='gpt-3.5-turbo-16k-0613',
18
+ openai_proxy=OPENAI_Proxy,
19
+ #model='gpt-4',
20
+ #model='gpt-4-0613',
21
+ #model='gpt-4-32k-0613',
22
+ temperature=0.5)
23
+
24
+
25
+ # response = openai.ChatCompletion.create(
26
+ # model='gpt-3.5-turbo-16k-0613',
27
+ # messages=[{
28
+ # 'role': 'system',
29
+ # 'content': 'You are a helpful and creative assistant for writing novel.'
30
+ # }, {
31
+ # 'role': 'user',
32
+ # 'content': content,
33
+ # }],
34
+ # temperature=0.5,
35
+ # max_tokens=max_tokens
36
+ # )
37
+ # response = None
38
+ messages = [
39
+ SystemMessage(content="You are a helpful and creative assistant for writing novel."),
40
+ HumanMessage(content=content)
41
+ ]
42
+ try:
43
+ response = chat(messages)
44
+ except:
45
+ st.error("OpenAI Error")
46
+
47
+ if response is not None:
48
+ return response.content
49
+ else:
50
+ return "Error: response not found"
51
+
52
+ def get_content_between_a_b(a,b,text):
53
+ return re.search(f"{a}(.*?)\n{b}", text, re.DOTALL).group(1).strip()
54
+
55
+
56
+ def get_init(init_text=None,text=None,response_file=None):
57
+ """
58
+ init_text: if the title, outline, and the first 3 paragraphs are given in a .txt file, directly read
59
+ text: if no .txt file is given, use init prompt to generate
60
+ """
61
+ if not init_text:
62
+ response = get_api_response(text)
63
+ print(response)
64
+
65
+ if response_file:
66
+ with open(response_file, 'a', encoding='utf-8') as f:
67
+ f.write(f"Init output here:\n{response}\n\n")
68
+ else:
69
+ with open(init_text,'r',encoding='utf-8') as f:
70
+ response = f.read()
71
+ f.close()
72
+ paragraphs = {
73
+ "name":"",
74
+ "Outline":"",
75
+ "Paragraph 1":"",
76
+ "Paragraph 2":"",
77
+ "Paragraph 3":"",
78
+ "Summary": "",
79
+ "Instruction 1":"",
80
+ "Instruction 2":"",
81
+ "Instruction 3":""
82
+ }
83
+ paragraphs['name'] = get_content_between_a_b('Name:','Outline',response)
84
+
85
+ paragraphs['Paragraph 1'] = get_content_between_a_b('Paragraph 1:','Paragraph 2:',response)
86
+ paragraphs['Paragraph 2'] = get_content_between_a_b('Paragraph 2:','Paragraph 3:',response)
87
+ paragraphs['Paragraph 3'] = get_content_between_a_b('Paragraph 3:','Summary',response)
88
+ paragraphs['Summary'] = get_content_between_a_b('Summary:','Instruction 1',response)
89
+ paragraphs['Instruction 1'] = get_content_between_a_b('Instruction 1:','Instruction 2',response)
90
+ paragraphs['Instruction 2'] = get_content_between_a_b('Instruction 2:','Instruction 3',response)
91
+ lines = response.splitlines()
92
+ # content of Instruction 3 may be in the same line with I3 or in the next line
93
+ if lines[-1] != '\n' and lines[-1].startswith('Instruction 3'):
94
+ paragraphs['Instruction 3'] = lines[-1][len("Instruction 3:"):]
95
+ elif lines[-1] != '\n':
96
+ paragraphs['Instruction 3'] = lines[-1]
97
+ # Sometimes it gives Chapter outline, sometimes it doesn't
98
+ for line in lines:
99
+ if line.startswith('Chapter'):
100
+ paragraphs['Outline'] = get_content_between_a_b('Outline:','Chapter',response)
101
+ break
102
+ if paragraphs['Outline'] == '':
103
+ paragraphs['Outline'] = get_content_between_a_b('Outline:','Paragraph',response)
104
+
105
+
106
+ return paragraphs
107
+
108
+ def get_chatgpt_response(model,prompt):
109
+ response = ""
110
+ for data in model.ask(prompt):
111
+ response = data["message"]
112
+ model.delete_conversation(model.conversation_id)
113
+ model.reset_chat()
114
+ return response
115
+
116
+
117
+ def parse_instructions(instructions):
118
+ output = ""
119
+ for i in range(len(instructions)):
120
+ output += f"{i+1}. {instructions[i]}\n"
121
+ return output