Sujana85 commited on
Commit
627a646
Β·
verified Β·
1 Parent(s): a24e7dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -27
app.py CHANGED
@@ -1,12 +1,11 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import matplotlib.pyplot as plt
4
- import seaborn as sns
5
  import pandas as pd
6
  import torch
7
 
8
- # Load model
9
- model_id = "ibm-granite/granite-3b-code-instruct" # Replace with actual granite model if different
10
  tokenizer = AutoTokenizer.from_pretrained(model_id)
11
  model = AutoModelForCausalLM.from_pretrained(
12
  model_id,
@@ -14,40 +13,40 @@ model = AutoModelForCausalLM.from_pretrained(
14
  torch_dtype=torch.float16
15
  )
16
 
17
- # Load sentiment analysis model
18
  sentiment_analyzer = pipeline("sentiment-analysis")
19
 
20
- # Simulated citizen profiles
 
 
 
21
  user_profiles = {
22
  "1001": {"location": "Hyderabad", "issues": ["traffic", "air pollution"]},
23
  "1002": {"location": "Delhi", "issues": ["waste management", "noise"]},
24
  }
25
 
26
- # Store submitted feedback during session
27
- submitted_data = []
28
-
29
- # Chat Function (ChatGPT-style)
30
  def chat_fn(message, history):
31
- full_prompt = tokenizer.apply_chat_template(
32
  [{"role": "user", "content": message}],
33
  tokenize=False,
34
  add_generation_prompt=True
35
  )
36
- inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
37
  outputs = model.generate(**inputs, max_new_tokens=200)
38
- reply = tokenizer.decode(outputs[0], skip_special_tokens=True).split("assistant")[-1].strip()
39
- return reply
40
 
41
- # Sentiment Analysis
42
  def analyze_sentiment(text):
43
  result = sentiment_analyzer(text)[0]
44
  return f"{result['label']} ({result['score']*100:.2f}%)"
45
 
46
- # Live Feedback β†’ Dashboard
47
  def collect_and_plot_feedback(comment, category):
48
  sentiment = sentiment_analyzer(comment)[0]["label"]
49
  submitted_data.append({"Category": category, "Sentiment": sentiment})
50
-
51
  df = pd.DataFrame(submitted_data)
52
  summary = df.groupby(['Category', 'Sentiment']).size().unstack(fill_value=0)
53
 
@@ -56,32 +55,31 @@ def collect_and_plot_feedback(comment, category):
56
  plt.title("Live Citizen Sentiment by Category")
57
  plt.ylabel("Count")
58
  plt.tight_layout()
 
59
  return f"Recorded sentiment: {sentiment}", fig
60
 
61
- # Personalized Contextual Assistant
62
  def personalized_response(user_id, query):
63
  profile = user_profiles.get(user_id)
64
  if not profile:
65
  return "User profile not found. Please check your user ID."
 
66
  context = f"User from {profile['location']} concerned with: {', '.join(profile['issues'])}. Question: {query}"
67
- input_tokens = tokenizer(context, return_tensors="pt").to(model.device)
68
- output = model.generate(**input_tokens, max_new_tokens=150)
69
- return tokenizer.decode(output[0], skip_special_tokens=True)
 
70
 
71
- # Build Gradio App
72
  with gr.Blocks(title="Citizen AI – Intelligent Citizen Engagement Platform") as demo:
73
  gr.Markdown("## 🧠 Citizen AI – Intelligent Citizen Engagement Platform")
74
 
75
  with gr.Tab("πŸ€– Chat Assistant"):
76
- gr.ChatInterface(
77
  fn=chat_fn,
78
  title="🧠 Ask Citizen AI",
79
- theme="soft",
80
  chatbot=gr.Chatbot(label="Citizen Chat"),
81
- textbox=gr.Textbox(placeholder="Type your question here...", show_label=False),
82
- retry_btn="πŸ” Retry",
83
- clear_btn="πŸ—‘οΈ Clear",
84
- submit_btn="➀ Send"
85
  )
86
 
87
  with gr.Tab("πŸ“Š Sentiment Analysis"):
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import matplotlib.pyplot as plt
 
4
  import pandas as pd
5
  import torch
6
 
7
+ # Load your model (adjust if needed)
8
+ model_id = "ibm-granite/granite-3b-code-instruct"
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
  model = AutoModelForCausalLM.from_pretrained(
11
  model_id,
 
13
  torch_dtype=torch.float16
14
  )
15
 
16
+ # Sentiment analysis pipeline
17
  sentiment_analyzer = pipeline("sentiment-analysis")
18
 
19
+ # In-memory storage for feedback
20
+ submitted_data = []
21
+
22
+ # Dummy user profiles
23
  user_profiles = {
24
  "1001": {"location": "Hyderabad", "issues": ["traffic", "air pollution"]},
25
  "1002": {"location": "Delhi", "issues": ["waste management", "noise"]},
26
  }
27
 
28
+ # Chat function
 
 
 
29
  def chat_fn(message, history):
30
+ prompt = tokenizer.apply_chat_template(
31
  [{"role": "user", "content": message}],
32
  tokenize=False,
33
  add_generation_prompt=True
34
  )
35
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
36
  outputs = model.generate(**inputs, max_new_tokens=200)
37
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("assistant")[-1].strip()
38
+ return response
39
 
40
+ # Sentiment analysis
41
  def analyze_sentiment(text):
42
  result = sentiment_analyzer(text)[0]
43
  return f"{result['label']} ({result['score']*100:.2f}%)"
44
 
45
+ # Feedback form + live dashboard
46
  def collect_and_plot_feedback(comment, category):
47
  sentiment = sentiment_analyzer(comment)[0]["label"]
48
  submitted_data.append({"Category": category, "Sentiment": sentiment})
49
+
50
  df = pd.DataFrame(submitted_data)
51
  summary = df.groupby(['Category', 'Sentiment']).size().unstack(fill_value=0)
52
 
 
55
  plt.title("Live Citizen Sentiment by Category")
56
  plt.ylabel("Count")
57
  plt.tight_layout()
58
+
59
  return f"Recorded sentiment: {sentiment}", fig
60
 
61
+ # Personalized assistant
62
  def personalized_response(user_id, query):
63
  profile = user_profiles.get(user_id)
64
  if not profile:
65
  return "User profile not found. Please check your user ID."
66
+
67
  context = f"User from {profile['location']} concerned with: {', '.join(profile['issues'])}. Question: {query}"
68
+ inputs = tokenizer(context, return_tensors="pt").to(model.device)
69
+ outputs = model.generate(**inputs, max_new_tokens=150)
70
+ reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
71
+ return reply
72
 
73
+ # Build app
74
  with gr.Blocks(title="Citizen AI – Intelligent Citizen Engagement Platform") as demo:
75
  gr.Markdown("## 🧠 Citizen AI – Intelligent Citizen Engagement Platform")
76
 
77
  with gr.Tab("πŸ€– Chat Assistant"):
78
+ chat = gr.ChatInterface(
79
  fn=chat_fn,
80
  title="🧠 Ask Citizen AI",
 
81
  chatbot=gr.Chatbot(label="Citizen Chat"),
82
+ textbox=gr.Textbox(placeholder="Type your question here...", show_label=False)
 
 
 
83
  )
84
 
85
  with gr.Tab("πŸ“Š Sentiment Analysis"):