CCockrum commited on
Commit
b9e2074
·
verified ·
1 Parent(s): 6351a04

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -3
app.py CHANGED
@@ -1,13 +1,17 @@
1
  import os
2
- from langchain_huggingface import HuggingFaceEndpoint
3
  import streamlit as st
 
4
  from langchain_core.prompts import PromptTemplate
5
  from langchain_core.output_parsers import StrOutputParser
6
- import requests
7
  from config import NASA_API_KEY # Import the NASA API key from the configuration file
8
 
9
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
10
 
 
 
 
11
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
12
  llm = HuggingFaceEndpoint(
13
  repo_id=model_id,
@@ -29,9 +33,30 @@ def get_nasa_apod():
29
  else:
30
  return "I couldn't fetch data from NASA right now. Please try again later."
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def get_response(system_message, chat_history, user_text,
33
  eos_token_id=['User'], max_new_tokens=256, get_llm_hf_kws={}):
34
- if "NASA" in user_text or "space" in user_text:
 
 
 
35
  nasa_response = get_nasa_apod()
36
  chat_history.append({'role': 'user', 'content': user_text})
37
  chat_history.append({'role': 'assistant', 'content': nasa_response})
@@ -53,6 +78,11 @@ def get_response(system_message, chat_history, user_text,
53
 
54
  chat_history.append({'role': 'user', 'content': user_text})
55
  chat_history.append({'role': 'assistant', 'content': response})
 
 
 
 
 
56
  return response, chat_history
57
 
58
  # Streamlit setup
 
1
  import os
2
+ import requests
3
  import streamlit as st
4
+ from langchain_huggingface import HuggingFaceEndpoint
5
  from langchain_core.prompts import PromptTemplate
6
  from langchain_core.output_parsers import StrOutputParser
7
+ from transformers import pipeline # for Sentiment Analysis
8
  from config import NASA_API_KEY # Import the NASA API key from the configuration file
9
 
10
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
11
 
12
+ # Initialize sentiment analysis pipeline
13
+ sentiment_analyzer = pipeline("sentiment-analysis")
14
+
15
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
16
  llm = HuggingFaceEndpoint(
17
  repo_id=model_id,
 
33
  else:
34
  return "I couldn't fetch data from NASA right now. Please try again later."
35
 
36
+ def analyze_sentiment(user_text):
37
+ """
38
+ Analyzes the sentiment of the user's input to adjust responses.
39
+ """
40
+ result = sentiment_analyzer(user_text)[0]
41
+ sentiment = result['label']
42
+ return sentiment
43
+
44
+ def predict_action(user_text):
45
+ """
46
+ Predicts actions based on user input (e.g., fetch space info or general knowledge).
47
+ """
48
+ if "NASA" in user_text or "space" in user_text:
49
+ return "nasa_info"
50
+ if "weather" in user_text:
51
+ return "weather_info"
52
+ return "general_query"
53
+
54
  def get_response(system_message, chat_history, user_text,
55
  eos_token_id=['User'], max_new_tokens=256, get_llm_hf_kws={}):
56
+ sentiment = analyze_sentiment(user_text)
57
+ action = predict_action(user_text)
58
+
59
+ if action == "nasa_info":
60
  nasa_response = get_nasa_apod()
61
  chat_history.append({'role': 'user', 'content': user_text})
62
  chat_history.append({'role': 'assistant', 'content': nasa_response})
 
78
 
79
  chat_history.append({'role': 'user', 'content': user_text})
80
  chat_history.append({'role': 'assistant', 'content': response})
81
+
82
+ # Modify response based on sentiment analysis (e.g., offer help for negative sentiments)
83
+ if sentiment == "NEGATIVE":
84
+ response = "I'm sorry to hear that. How can I assist you further?"
85
+
86
  return response, chat_history
87
 
88
  # Streamlit setup