Divyansh Kushwaha commited on
Commit
5011037
·
1 Parent(s): b83c08b
Files changed (2) hide show
  1. Dockerfile +10 -8
  2. api.py +176 -6
Dockerfile CHANGED
@@ -1,17 +1,19 @@
1
  FROM python:3.9-slim
2
 
 
 
 
 
 
3
  # Set the working directory
4
  WORKDIR /app
5
 
6
  # Copy requirements and install dependencies
7
- COPY requirements.txt .
8
- RUN pip install --no-cache-dir -r requirements.txt
9
-
10
- # Copy the application code
11
- COPY . .
12
 
13
- # Expose the port FastAPI will run on
14
- EXPOSE 8000
15
 
16
  # Command to run the FastAPI app
17
- CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "8000"]
 
1
  FROM python:3.9-slim
2
 
3
+ # Create a non-root user
4
+ RUN useradd -m -u 1000 user
5
+ USER user
6
+ ENV PATH="/home/user/.local/bin:$PATH"
7
+
8
  # Set the working directory
9
  WORKDIR /app
10
 
11
  # Copy requirements and install dependencies
12
+ COPY --chown=user requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
 
 
 
14
 
15
+ # Copy application code
16
+ COPY --chown=user . /app
17
 
18
  # Command to run the FastAPI app
19
+ CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
api.py CHANGED
@@ -238,16 +238,12 @@
238
  from fastapi import FastAPI, Query,HTTPException
239
  from fastapi.responses import JSONResponse, FileResponse
240
  from elevenlabs import ElevenLabs
 
 
241
  from langchain.schema import HumanMessage
242
  from langchain_groq import ChatGroq
243
  import json
244
  from dotenv import load_dotenv
245
- from utils import (
246
- extract_titles_and_summaries,
247
- perform_sentiment_analysis,
248
- extract_topics_with_hf,
249
- compare_articles
250
- )
251
  import os
252
  load_dotenv()
253
  GROQ_API_KEY = os.getenv('GROQ_API_KEY')
@@ -261,6 +257,180 @@ llm=ChatGroq(api_key=GROQ_API_KEY, model="llama-3.1-8b-instant")
261
  JSON_FILE_PATH = "final_summary.json"
262
  AUDIO_FILE_PATH = "hindi_summary.mp3"
263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
  def generate_summary(company_name):
266
  news_articles = extract_titles_and_summaries(company_name)
 
238
  from fastapi import FastAPI, Query,HTTPException
239
  from fastapi.responses import JSONResponse, FileResponse
240
  from elevenlabs import ElevenLabs
241
+ from bs4 import BeautifulSoup
242
+ import requests
243
  from langchain.schema import HumanMessage
244
  from langchain_groq import ChatGroq
245
  import json
246
  from dotenv import load_dotenv
 
 
 
 
 
 
247
  import os
248
  load_dotenv()
249
  GROQ_API_KEY = os.getenv('GROQ_API_KEY')
 
257
  JSON_FILE_PATH = "final_summary.json"
258
  AUDIO_FILE_PATH = "hindi_summary.mp3"
259
 
260
+ def extract_titles_and_summaries(company_name, num_articles=10):
261
+ url = f"https://economictimes.indiatimes.com/topic/{company_name}/news"
262
+ try:
263
+ response = requests.get(url)
264
+ if response.status_code != 200:
265
+ print(f"Failed to fetch the webpage. Status code: {response.status_code}")
266
+ return []
267
+
268
+ soup = BeautifulSoup(response.content, "html.parser")
269
+ articles = soup.find_all('div', class_='clr flt topicstry story_list', limit=num_articles)
270
+ extracted_articles = []
271
+
272
+ for article in articles:
273
+ title_tag = article.find('h2')
274
+ if title_tag:
275
+ link_tag = title_tag.find('a')
276
+ title = link_tag.get_text(strip=True) if link_tag else "No Title Found"
277
+ else:
278
+ title = "No Title Found"
279
+
280
+ summary_tag = article.find('p')
281
+ summary = summary_tag.get_text(strip=True) if summary_tag else "No Summary Found"
282
+
283
+ extracted_articles.append({
284
+ "Title": title,
285
+ "Summary": summary
286
+ })
287
+
288
+ return {
289
+ "Company": company_name,
290
+ "Articles": extracted_articles
291
+ }
292
+ except Exception as e:
293
+ print(f"An error occurred: {e}")
294
+ return []
295
+
296
+ def perform_sentiment_analysis(news_data):
297
+ from transformers import pipeline
298
+ articles = news_data.get("Articles", [])
299
+ pipe = pipeline("text-classification", model="tabularisai/multilingual-sentiment-analysis",device=1)
300
+ sentiment_counts = {"Positive": 0, "Negative": 0, "Neutral": 0}
301
+
302
+ for article in articles:
303
+ content = f"{article['Title']} {article['Summary']}"
304
+ sentiment_result = pipe(content)[0]
305
+
306
+ sentiment_map = {
307
+ "positive": "Positive",
308
+ "negative": "Negative",
309
+ "neutral": "Neutral",
310
+ "very positive": "Positive",
311
+ "very negative": "Negative"
312
+ }
313
+
314
+ sentiment = sentiment_map.get(sentiment_result["label"].lower(), "Unknown")
315
+ score = float(sentiment_result["score"])
316
+
317
+ article["Sentiment"] = sentiment
318
+ article["Score"] = score
319
+
320
+ if sentiment in sentiment_counts:
321
+ sentiment_counts[sentiment] += 1
322
+
323
+ return news_data, sentiment_counts
324
+
325
+ def extract_topics_with_hf(news_data):
326
+ structured_data = {
327
+ "Company": news_data.get("Company", "Unknown"),
328
+ "Articles": []
329
+ }
330
+ articles = news_data.get("Articles", [])
331
+ for article in articles:
332
+ content = f"{article['Title']} {article['Summary']}"
333
+ # Define the prompt for Groq AI
334
+ prompt = f"""
335
+ Analyze the following content: "{content}"
336
+ Extract and return **exactly three key topics** most relevant to this content.
337
+ The topics should be of one word after analyzing the content.
338
+ Respond in a JSON format like this:
339
+ {{"Topics": ["topic1", "topic2", "topic3"]}}
340
+ """
341
+ try:
342
+ # Use Groq AI to invoke the model
343
+ response = llm.invoke([HumanMessage(content=prompt)]).content
344
+ topics_result = json.loads(response).get("Topics", ["Unknown"]) # Parse JSON response
345
+ except Exception as e:
346
+ print(f"Error while extracting topics: {e}")
347
+ topics_result = ["Unknown"]
348
+
349
+ structured_data["Articles"].append({
350
+ "Title": article["Title"],
351
+ "Summary": article["Summary"],
352
+ "Sentiment": article.get("Sentiment", "Unknown"),
353
+ "Score": article.get("Score", 0.0),
354
+ "Topics": topics_result
355
+ })
356
+ return structured_data
357
+
358
+ def generate_final_sentiment(news_data, sentiment_counts):
359
+ company_name = news_data["Company"]
360
+ total_articles = sum(sentiment_counts.values())
361
+ combined_summaries = " ".join([article["Summary"] for article in news_data["Articles"]])
362
+ prompt = f"""
363
+ Based on the analysis of {total_articles} articles about the company "{company_name}":
364
+ - Positive articles: {sentiment_counts['Positive']}
365
+ - Negative articles: {sentiment_counts['Negative']}
366
+ - Neutral articles: {sentiment_counts['Neutral']}
367
+ The following are the summarized key points from the articles: "{combined_summaries}".
368
+ Provide a single, concise summary that integrates the overall sentiment analysis and key news highlights while maintaining a natural flow. Explain its implications for the company's reputation, stock potential, and public perception.
369
+ Respond **ONLY** with a well-structured very concised and very short paragraph in plain text, focus on overall sentiment.
370
+ """
371
+ response = llm.invoke([HumanMessage(content=prompt)],max_tokens=200)
372
+ final_sentiment = response if response else "Sentiment analysis summary not available."
373
+ return final_sentiment.content # it's a string
374
+
375
+ def extract_json(response):
376
+ try:
377
+ return json.loads(response)
378
+ except json.JSONDecodeError:
379
+ return {}
380
+
381
+ def compare_articles(news_data, sentiment_counts):
382
+ articles = news_data.get("Articles", [])
383
+ all_topics = [set(article["Topics"]) for article in articles]
384
+ common_topics = set.intersection(*all_topics) if all_topics else set()
385
+ topics_prompt = f"""
386
+ Analyze the following article topics and identify **only three** key themes that are common across multiple articles,
387
+ even if they are phrased differently. The topics from each article are:
388
+ {all_topics}
389
+
390
+ Respond **ONLY** with a JSON format:
391
+ {{"CommonTopics": ["topic1", "topic2", "topic3"]}}
392
+ """
393
+ response = llm.invoke([HumanMessage(content=topics_prompt)]).content
394
+ contextual_common_topics = extract_json(response).get("CommonTopics", list(common_topics))[:3] # Limit to 3 topics
395
+
396
+ total_articles = sum(sentiment_counts.values())
397
+ comparison_prompt = f"""
398
+ Provide a high-level summary comparing {total_articles} news articles about "{news_data['Company']}":
399
+ - Sentiment distribution: {sentiment_counts}
400
+ - Commonly discussed topics across articles: {contextual_common_topics}
401
+
402
+ Consider the following:
403
+ 1. Notable contrasts between articles (e.g., major differences in topics and perspectives).
404
+ 2. Overall implications for the company's reputation, stock potential, and public perception.
405
+ 3. How sentiment varies across articles and its impact.
406
+
407
+ Respond **ONLY** with a concise and insightful summary in this JSON format:
408
+ {{
409
+ "Coverage Differences": [
410
+ {{"Comparison": "Brief contrast between Articles 1 & 2", "Impact": "Concise impact statement"}},
411
+ {{"Comparison": "Brief contrast between Articles 3 & 4", "Impact": "Concise impact statement"}}
412
+ ]
413
+ }}
414
+ """
415
+ response = llm.invoke([HumanMessage(content=comparison_prompt)]).content
416
+ coverage_differences = extract_json(response).get("Coverage Differences", [])
417
+ final_sentiment = generate_final_sentiment(news_data, sentiment_counts,llm)
418
+ return {
419
+ "Company": news_data["Company"],
420
+ "Articles": articles,
421
+ "Comparative Sentiment Score": {
422
+ "Sentiment Distribution": sentiment_counts,
423
+ "Coverage Differences": coverage_differences,
424
+ "Topic Overlap": {
425
+ "Common Topics": contextual_common_topics,
426
+ "Unique Topics": {
427
+ f"Article {i+1}": list(topics - set(contextual_common_topics))
428
+ for i, topics in enumerate(all_topics)
429
+ }
430
+ }
431
+ },
432
+ "Final Sentiment Analysis": final_sentiment
433
+ }
434
 
435
  def generate_summary(company_name):
436
  news_articles = extract_titles_and_summaries(company_name)