Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
import os
|
2 |
import requests
|
3 |
-
|
4 |
-
from bs4 import BeautifulSoup
|
5 |
|
6 |
api_token = os.environ.get("TOKEN")
|
7 |
-
API_URL = "https://api-inference.huggingface.co/models/meta-llama/
|
8 |
headers = {"Authorization": f"Bearer {api_token}"}
|
9 |
|
10 |
def query(payload):
|
@@ -13,8 +12,7 @@ def query(payload):
|
|
13 |
|
14 |
def analyze_sentiment(pl7_texts):
|
15 |
output = query({
|
16 |
-
"inputs": f'''
|
17 |
-
<|start_header_id|>system<|end_header_id|>
|
18 |
You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
|
19 |
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
|
20 |
For posts that talk about image generation models, return "image_generation"
|
@@ -26,46 +24,34 @@ For posts about tools and libraries, return "tools_libraries"
|
|
26 |
For posts containing tutorials and guides, return "tutorials_guides"
|
27 |
For posts about debugging and problem-solving, return "debugging"
|
28 |
Respond only with the category name, without any additional explanation or text.
|
29 |
-
|
30 |
-
|
|
|
31 |
{pl7_texts}
|
32 |
-
|
33 |
-
<|start_header_id|>assistant<|end_header_id|>
|
34 |
'''
|
35 |
})
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
'chatmodel/llm': 'Chat Model/LLM',
|
41 |
-
'image_generation': 'Image Generation',
|
42 |
-
'fine_tuning': 'Fine-tuning',
|
43 |
-
'ethics_bias': 'Ethics and Bias',
|
44 |
-
'datasets': 'Datasets',
|
45 |
-
'tools_libraries': 'Tools and Libraries',
|
46 |
-
'tutorials_guides': 'Tutorials and Guides',
|
47 |
-
'debugging': 'Debugging'
|
48 |
-
}
|
49 |
-
return categories.get(response, f"Error: Ambiguous response - '{response}'")
|
50 |
-
return "Error: No valid response received"
|
51 |
|
52 |
url = 'https://huggingface.co/posts'
|
53 |
response = requests.get(url)
|
54 |
|
55 |
if response.status_code == 200:
|
|
|
56 |
soup = BeautifulSoup(response.content, 'html.parser')
|
57 |
pl7_elements = soup.find_all(class_='pl-7')
|
58 |
pl7_texts = [element.text.strip() for element in pl7_elements]
|
59 |
|
60 |
-
|
61 |
-
|
|
|
62 |
sentiment = analyze_sentiment(text)
|
63 |
-
|
64 |
|
65 |
-
|
66 |
-
print(f"
|
67 |
-
print(f"Content of pl7_text_2: {pl7_texts[1]}")
|
68 |
-
else:
|
69 |
-
print("Not enough pl-7 elements found")
|
70 |
else:
|
71 |
print(f"Error {response.status_code} when retrieving {url}")
|
|
|
1 |
import os
|
2 |
import requests
|
3 |
+
from collections import Counter
|
|
|
4 |
|
5 |
api_token = os.environ.get("TOKEN")
|
6 |
+
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
|
7 |
headers = {"Authorization": f"Bearer {api_token}"}
|
8 |
|
9 |
def query(payload):
|
|
|
12 |
|
13 |
def analyze_sentiment(pl7_texts):
|
14 |
output = query({
|
15 |
+
"inputs": f'''<s>[INST] <<SYS>>
|
|
|
16 |
You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
|
17 |
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
|
18 |
For posts that talk about image generation models, return "image_generation"
|
|
|
24 |
For posts containing tutorials and guides, return "tutorials_guides"
|
25 |
For posts about debugging and problem-solving, return "debugging"
|
26 |
Respond only with the category name, without any additional explanation or text.
|
27 |
+
<</SYS>>
|
28 |
+
|
29 |
+
Analyze the following text:
|
30 |
{pl7_texts}
|
31 |
+
[/INST]
|
|
|
32 |
'''
|
33 |
})
|
34 |
+
|
35 |
+
if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
|
36 |
+
return output[0]['generated_text'].strip().lower()
|
37 |
+
return "unknown"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
url = 'https://huggingface.co/posts'
|
40 |
response = requests.get(url)
|
41 |
|
42 |
if response.status_code == 200:
|
43 |
+
from bs4 import BeautifulSoup
|
44 |
soup = BeautifulSoup(response.content, 'html.parser')
|
45 |
pl7_elements = soup.find_all(class_='pl-7')
|
46 |
pl7_texts = [element.text.strip() for element in pl7_elements]
|
47 |
|
48 |
+
sentiment_counter = Counter()
|
49 |
+
|
50 |
+
for text in pl7_texts:
|
51 |
sentiment = analyze_sentiment(text)
|
52 |
+
sentiment_counter[sentiment] += 1
|
53 |
|
54 |
+
for category, count in sentiment_counter.items():
|
55 |
+
print(f"{category} = {count}")
|
|
|
|
|
|
|
56 |
else:
|
57 |
print(f"Error {response.status_code} when retrieving {url}")
|