KevSun commited on
Commit
0a8c4f3
·
verified ·
1 Parent(s): c5863a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -11
app.py CHANGED
@@ -18,7 +18,7 @@ user_input = st.text_area("Your text here:")
18
  if st.button("Predict"):
19
  if user_input:
20
  # Tokenize input text
21
- inputs = tokenizer(user_input, return_tensors="pt")
22
 
23
  # Get predictions from the model
24
  with torch.no_grad():
@@ -29,19 +29,31 @@ if st.button("Predict"):
29
 
30
  # Convert to numpy array if necessary
31
  predicted_scores = predictions.numpy()
32
- #predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
33
- #predictions = predictions[0].tolist()
34
-
35
- # Convert predictions to a NumPy array for the calculations
36
- #predictions_np = np.array(predictions)
37
-
38
- # Scale the predictions
39
- normalized_scores = (predicted_scores / predicted_scores.max()) * 9 # Scale to 9
40
- rounded_scores = np.round(normalized_scores * 2) / 2
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  # Display the predictions
43
  labels = ["Task Achievement", "Coherence and Cohesion", "Vocabulary", "Grammar", "Overall"]
44
  for label, score in zip(labels, rounded_scores):
45
- st.write(f"{label}: {score:}")
46
  else:
47
  st.write("Please enter some text to get scores.")
 
18
  if st.button("Predict"):
19
  if user_input:
20
  # Tokenize input text
21
+ inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True, max_length=512)
22
 
23
  # Get predictions from the model
24
  with torch.no_grad():
 
29
 
30
  # Convert to numpy array if necessary
31
  predicted_scores = predictions.numpy()
32
+
33
+ # Apply a significant reduction to lower the scores
34
+ target_average_score = 6.0
35
+ current_average_score = np.mean(predicted_scores)
36
+ reduction_amount = current_average_score - target_average_score
37
+
38
+ adjusted_scores = predicted_scores - reduction_amount
39
+
40
+ # Ensure scores do not go below zero
41
+ adjusted_scores = np.maximum(adjusted_scores, 0)
42
+
43
+ # Normalize the scores to ensure they fall within the 0-9 range
44
+ normalized_scores = (adjusted_scores / adjusted_scores.max()) * 9 # Scale to 9
45
+
46
+ # Apply an additional reduction to the overall score if needed
47
+ overall_score_index = len(normalized_scores) - 1
48
+ additional_reduction = 2.0 # Adjust this value as needed
49
+ normalized_scores[overall_score_index] = max(normalized_scores[overall_score_index] - additional_reduction, 0)
50
+
51
+ # Round the scores
52
+ rounded_scores = np.round(normalized_scores * 2) / 2
53
 
54
  # Display the predictions
55
  labels = ["Task Achievement", "Coherence and Cohesion", "Vocabulary", "Grammar", "Overall"]
56
  for label, score in zip(labels, rounded_scores):
57
+ st.write(f"{label}: {score:.1f}")
58
  else:
59
  st.write("Please enter some text to get scores.")