peterciank commited on
Commit
a7b6dea
·
verified ·
1 Parent(s): 39377c0

Delete pages

Browse files
Files changed (1) hide show
  1. pages/Comparision.py +0 -89
pages/Comparision.py DELETED
@@ -1,89 +0,0 @@
1
- import streamlit as st
2
- from openai import OpenAI
3
- import os
4
- import requests
5
- from dotenv import load_dotenv
6
-
7
- # Load environment variables
8
- load_dotenv()
9
-
10
- # Initialize the client with HuggingFace
11
- client = OpenAI(
12
- base_url="https://api-inference.huggingface.co/v1",
13
- api_key=os.environ.get('HFSecret') # Replace with your HuggingFace token
14
- )
15
-
16
- # Define the Llama 3 8B model
17
- repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
18
-
19
- # Title of the App
20
- st.title("Text Analysis with Llama 3: Sentiment, Summarization, and Keyword Extraction")
21
-
22
- # Dropdown options to choose a text file
23
- options = ['None', 'Appreciation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
24
-
25
- # Create a dropdown menu to select options
26
- selected_option = st.selectbox("Select a preset option", options)
27
-
28
- # Define URLs for different text options
29
- url_dict = {
30
- 'Appreciation Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt",
31
- 'Regret Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt",
32
- 'Kindness Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt",
33
- 'Lost Melody Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt",
34
- 'Twitter Example 1': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt",
35
- 'Twitter Example 2': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
36
- }
37
-
38
- # Function to fetch text content
39
- def fetch_text_content(option):
40
- if option in url_dict:
41
- response = requests.get(url_dict[option])
42
- return response.text if response.status_code == 200 else "Error fetching the text"
43
- return ""
44
-
45
- # Fetch the selected text
46
- jd = fetch_text_content(selected_option)
47
-
48
- # Display fetched text
49
- text = st.text_area('Enter the text to analyze', jd)
50
-
51
- # Function to call Llama 3 for analysis
52
- def call_llama_analysis(task, text):
53
- prompt = f"Perform {task} on the following text:\n\n{text}"
54
-
55
- # Call Llama 3 for the task
56
- response = client.completions.create(
57
- model=repo_id,
58
- prompt=prompt,
59
- max_tokens=3000,
60
- temperature=0.5
61
- )
62
-
63
- return response['choices'][0]['text']
64
-
65
- # Start analysis on button click
66
- if st.button("Start Analysis"):
67
- with st.spinner("Analyzing Sentiment..."):
68
- try:
69
- sentiment_result = call_llama_analysis("sentiment analysis", text)
70
- with st.expander("Sentiment Analysis - ✅ Completed", expanded=True):
71
- st.write(sentiment_result)
72
- except Exception as e:
73
- st.error(f"Error in Sentiment Analysis: {str(e)}")
74
-
75
- with st.spinner("Summarizing..."):
76
- try:
77
- summary_result = call_llama_analysis("summarization", text)
78
- with st.expander("Summarization - ✅ Completed", expanded=True):
79
- st.write(summary_result)
80
- except Exception as e:
81
- st.error(f"Error in Summarization: {str(e)}")
82
-
83
- with st.spinner("Extracting Keywords..."):
84
- try:
85
- keywords_result = call_llama_analysis("keyword extraction", text)
86
- with st.expander("Keywords Extraction - ✅ Completed", expanded=True):
87
- st.write(keywords_result)
88
- except Exception as e:
89
- st.error(f"Error in Keyword Extraction: {str(e)}")