Sephfox commited on
Commit
88945ca
·
verified ·
1 Parent(s): f81068b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -38,22 +38,28 @@ data = {
38
  }
39
  df = pd.DataFrame(data)
40
 
 
 
 
 
41
  # Encoding emotions
42
  emotions_target = pd.Categorical(df['emotion']).codes
43
  emotion_classes = pd.Categorical(df['emotion']).categories
44
 
45
  # Load pre-trained BERT model for emotion prediction
46
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
 
47
 
48
  # Lazy loading for the fine-tuned language model
49
- def predict_emotion(context):
50
- tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
51
- emotion_prediction_pipeline = pipeline('text-classification', model=emotion_prediction_model, tokenizer=tokenizer, return_all_scores=True)
52
- predictions = emotion_prediction_pipeline(context)
53
- emotion_scores = predictions[0]
54
- emotion_pred = max(emotion_scores, key=emotion_scores.get)
55
- return emotion_pred
56
-
 
57
 
58
  # Enhanced Emotional States
59
  emotions = {
@@ -122,7 +128,8 @@ def evolve_emotions():
122
  toolbox.register("attr_float", random.uniform, 0, 20)
123
  toolbox.register("attr_intensity", random.uniform, 0, 10)
124
  toolbox.register("individual", tools.initCycle, creator.Individual,
125
- (toolbox.attr_float,) * (len(emotions) - 1) +
 
126
  (toolbox.attr_intensity,) * len(emotions) +
127
  (lambda: 100,), n=1)
128
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
@@ -147,7 +154,7 @@ def evolve_emotions():
147
  emotions['ideal_state']['percentage'] = ideal_state
148
 
149
  def predict_emotion(context):
150
- emotion_prediction_pipeline = pipeline('text-classification', model=emotion_prediction_model, return_all_scores=True)
151
  predictions = emotion_prediction_pipeline(context)
152
  emotion_scores = predictions[0]
153
  emotion_pred = max(emotion_scores, key=emotion_scores.get)
 
38
  }
39
  df = pd.DataFrame(data)
40
 
41
+ # Encoding the contexts using One-Hot Encoding (memory-efficient)
42
+ encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
43
+ contexts_encoded = encoder.fit_transform(df[['context']])
44
+
45
  # Encoding emotions
46
  emotions_target = pd.Categorical(df['emotion']).codes
47
  emotion_classes = pd.Categorical(df['emotion']).categories
48
 
49
  # Load pre-trained BERT model for emotion prediction
50
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
51
+ emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
52
 
53
  # Lazy loading for the fine-tuned language model
54
+ _finetuned_lm_tokenizer = None
55
+ _finetuned_lm_model = None
56
+ def get_finetuned_lm_model():
57
+ global _finetuned_lm_tokenizer, _finetuned_lm_model
58
+ if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
59
+ finetuned_lm_model_name = "microsoft/DialoGPT-large" # Replace with your fine-tuned language model name
60
+ _finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(finetuned_lm_model_name)
61
+ _finetuned_lm_model = AutoModelForCausalLM.from_pretrained(finetuned_lm_model_name, device_map="auto", low_cpu_mem_usage=True)
62
+ return _finetuned_lm_tokenizer, _finetuned_lm_model
63
 
64
  # Enhanced Emotional States
65
  emotions = {
 
128
  toolbox.register("attr_float", random.uniform, 0, 20)
129
  toolbox.register("attr_intensity", random.uniform, 0, 10)
130
  toolbox.register("individual", tools.initCycle, creator.Individual,
131
+ (toolbox.attr_float,) * (len(emotions) - 1)
132
+ +
133
  (toolbox.attr_intensity,) * len(emotions) +
134
  (lambda: 100,), n=1)
135
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
 
154
  emotions['ideal_state']['percentage'] = ideal_state
155
 
156
  def predict_emotion(context):
157
+ emotion_prediction_pipeline = pipeline('text-classification', model=emotion_prediction_model, tokenizer=emotion_prediction_tokenizer, return_all_scores=True)
158
  predictions = emotion_prediction_pipeline(context)
159
  emotion_scores = predictions[0]
160
  emotion_pred = max(emotion_scores, key=emotion_scores.get)