Wedyan2023 commited on
Commit
d9f4633
·
verified ·
1 Parent(s): b9ca69f

Delete app10.py

Browse files
Files changed (1) hide show
  1. app10.py +0 -208
app10.py DELETED
@@ -1,208 +0,0 @@
1
- ## update of aap7.py
2
-
3
- import os
4
- import streamlit as st
5
- from openai import OpenAI
6
- from dotenv import load_dotenv
7
- from langchain_core.prompts import PromptTemplate
8
-
9
- # Load environment variables
10
- load_dotenv()
11
- ##openai_api_key = os.getenv("OPENAI_API_KEY")
12
-
13
- # Initialize the client
14
- client = OpenAI(
15
- base_url="https://api-inference.huggingface.co/v1",
16
- api_key=os.environ.get('TOKEN2') # Add your Huggingface token here
17
- )
18
-
19
-
20
- # Initialize the OpenAI client
21
- ##client = OpenAI(
22
- ##base_url="https://api-inference.huggingface.co/v1",
23
- ##api_key=openai_api_key
24
- ##)
25
-
26
- # Define reset function for the conversation
27
- def reset_conversation():
28
- st.session_state.conversation = []
29
- st.session_state.messages = []
30
-
31
- # Streamlit interface setup
32
- st.title("🤖 Text Data Generation & Labeling App")
33
- st.sidebar.title("Settings")
34
-
35
- # Sidebar settings
36
- selected_model = st.sidebar.selectbox("Select Model", ["meta-llama/Meta-Llama-3-8B-Instruct"])
37
- temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.5)
38
- st.sidebar.button("Reset Conversation", on_click=reset_conversation)
39
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
40
- st.sidebar.markdown("*Note: Generated content may be inaccurate or false.*")
41
-
42
- # Initialize conversation state
43
- if "messages" not in st.session_state:
44
- st.session_state.messages = []
45
-
46
- # Display conversation
47
- for message in st.session_state.messages:
48
- with st.chat_message(message["role"]):
49
- st.markdown(message["content"])
50
-
51
- # Main logic: choose between Data Generation and Data Labeling
52
- task_choice = st.selectbox("Choose Task", ["Data Generation", "Data Labeling"])
53
-
54
- if task_choice == "Data Generation":
55
- classification_type = st.selectbox(
56
- "Choose Classification Type",
57
- ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
58
- )
59
-
60
- if classification_type == "Sentiment Analysis":
61
- labels = ["Positive", "Negative", "Neutral"]
62
- elif classification_type == "Binary Classification":
63
- label_1 = st.text_input("Enter first class")
64
- label_2 = st.text_input("Enter second class")
65
- labels = [label_1, label_2]
66
- else: # Multi-Class Classification
67
- num_classes = st.slider("How many classes?", 3, 10, 3)
68
- labels = [st.text_input(f"Class {i+1}") for i in range(num_classes)]
69
-
70
- domain = st.selectbox("Choose Domain", ["Restaurant reviews", "E-commerce reviews", "Custom"])
71
- if domain == "Custom":
72
- domain = st.text_input("Specify custom domain")
73
-
74
- min_words = st.number_input("Minimum words per example", min_value=10, max_value=90, value=10)
75
- max_words = st.number_input("Maximum words per example", min_value=10, max_value=90, value=90)
76
-
77
- use_few_shot = st.radio("Use few-shot examples?", ["Yes", "No"])
78
- few_shot_examples = []
79
- if use_few_shot == "Yes":
80
- num_examples = st.slider("Number of few-shot examples", 1, 5, 1)
81
- for i in range(num_examples):
82
- content = st.text_area(f"Example {i+1} Content")
83
- label = st.selectbox(f"Example {i+1} Label", labels)
84
- few_shot_examples.append({"content": content, "label": label})
85
-
86
- num_to_generate = st.number_input("Number of examples to generate", 1, 100, 10)
87
- user_prompt = st.text_area("Enter additional instructions", "")
88
-
89
- # Construct the LangChain prompt
90
- prompt_template = PromptTemplate(
91
- input_variables=["classification_type", "domain", "num_examples", "min_words", "max_words", "labels", "user_prompt"],
92
- template=(
93
- "You are a professional {classification_type} expert tasked with generating examples for {domain}.\n"
94
- "Use the following parameters:\n"
95
- "- Number of examples: {num_examples}\n"
96
- "- Word range: {min_words}-{max_words}\n"
97
- "- Labels: {labels}\n"
98
- "{user_prompt}"
99
- )
100
- )
101
- system_prompt = prompt_template.format(
102
- classification_type=classification_type,
103
- domain=domain,
104
- num_examples=num_to_generate,
105
- min_words=min_words,
106
- max_words=max_words,
107
- labels=", ".join(labels),
108
- user_prompt=user_prompt
109
- )
110
-
111
- st.write("System Prompt:")
112
- st.code(system_prompt)
113
-
114
- if st.button("Generate Examples"):
115
- with st.spinner("Generating..."):
116
- st.session_state.messages.append({"role": "system", "content": system_prompt})
117
- try:
118
- stream = client.chat.completions.create(
119
- model=selected_model,
120
- messages=[{"role": "system", "content": system_prompt}],
121
- temperature=temperature,
122
- stream=True,
123
- max_tokens=3000,
124
- )
125
- response = st.write_stream(stream)
126
- st.session_state.messages.append({"role": "assistant", "content": response})
127
- except Exception as e:
128
- st.error("An error occurred during generation.")
129
- st.error(f"Details: {e}")
130
-
131
-
132
- elif task_choice == "Data Labeling":
133
- # Labeling logic
134
- labeling_type = st.selectbox(
135
- "Classification Type for Labeling",
136
- ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
137
- )
138
-
139
- if labeling_type == "Sentiment Analysis":
140
- labels = ["Positive", "Negative", "Neutral"]
141
- elif labeling_type == "Binary Classification":
142
- label_1 = st.text_input("First label for classification")
143
- label_2 = st.text_input("Second label for classification")
144
- labels = [label_1, label_2]
145
- else: # Multi-Class Classification
146
- num_classes = st.slider("Number of labels", 3, 10, 3)
147
- labels = [st.text_input(f"Label {i+1}") for i in range(num_classes)]
148
-
149
- use_few_shot_labeling = st.radio("Add few-shot examples for labeling?", ["Yes", "No"])
150
- few_shot_labeling_examples = []
151
- if use_few_shot_labeling == "Yes":
152
- num_labeling_examples = st.slider("Number of few-shot labeling examples", 1, 5, 1)
153
- for i in range(num_labeling_examples):
154
- content = st.text_area(f"Labeling Example {i+1} Content")
155
- label = st.selectbox(f"Label for Example {i+1}", labels)
156
- few_shot_labeling_examples.append({"content": content, "label": label})
157
-
158
- text_to_classify = st.text_area("Enter text to classify")
159
-
160
- if st.button("Classify Text"):
161
- if text_to_classify:
162
- # Construct the labeling prompt
163
- labeling_prompt_template = PromptTemplate(
164
- input_variables=["labeling_type", "labels", "few_shot_examples", "text_to_classify"],
165
- template=(
166
- "You are an expert in {labeling_type} classification. "
167
- "Classify the following text using: {labels}.\n\n"
168
- "DO NO write additional information or commentary"
169
- "use user {few_shot_examples} as guidance in labeling process\n"
170
- "Write calassifaication as {text_to_classify}. Label: [Label] \n"
171
- "Classify this: {text_to_classify}"
172
- )
173
- )
174
-
175
- # Prepare few-shot examples for the prompt
176
- few_shot_examples_text = ""
177
- if few_shot_labeling_examples:
178
- few_shot_examples_text += "Example classifications:\n"
179
- for ex in few_shot_labeling_examples:
180
- few_shot_examples_text += f"Text: {ex['content']} - Label: {ex['label']}\n"
181
-
182
- # Format the prompt with the user's input
183
- labeling_prompt = labeling_prompt_template.format(
184
- labeling_type=labeling_type.lower(),
185
- labels=", ".join(labels),
186
- few_shot_examples=few_shot_examples_text.strip(),
187
- text_to_classify=text_to_classify
188
- )
189
-
190
- with st.spinner("Classifying..."):
191
- st.session_state.messages.append({"role": "system", "content": labeling_prompt})
192
- try:
193
- stream = client.chat.completions.create(
194
- model=selected_model,
195
- messages=[{"role": "system", "content": labeling_prompt}],
196
- temperature=temperature,
197
- stream=True,
198
- max_tokens=3000,
199
- )
200
- labeling_response = st.write_stream(stream)
201
- # Format response to match desired output
202
- formatted_response = f"Label: {labeling_response}"
203
- st.write(formatted_response)
204
- except Exception as e:
205
- st.error("An error occurred during classification.")
206
- st.error(f"Details: {e}")
207
- else:
208
- st.warning("Please enter text to classify.")