Spaces:
Runtime error
Runtime error
iamspruce
commited on
Commit
·
1cfeb58
1
Parent(s):
6eff95d
added new routes
Browse files- app/core/security.py +7 -1
- app/main.py +35 -8
- app/prompts.py +24 -0
- app/routers/analyze.py +0 -153
- app/routers/conciseness.py +42 -0
- app/routers/grammar.py +61 -0
- app/routers/inclusive_language.py +42 -0
- app/routers/paraphrase.py +20 -11
- app/routers/punctuation.py +67 -0
- app/routers/readability.py +55 -0
- app/routers/sentence_correctness.py +62 -0
- app/routers/summarize.py +20 -11
- app/routers/tone.py +56 -0
- app/routers/translate.py +20 -10
- app/routers/vocabulary.py +42 -0
- app/routers/voice.py +72 -0
- requirements.txt +1 -0
app/core/security.py
CHANGED
@@ -1,8 +1,14 @@
|
|
1 |
from fastapi import Header, HTTPException
|
|
|
2 |
|
3 |
# Define a simple API key for authentication.
|
4 |
# In a production environment, this should be a more robust and securely managed key.
|
5 |
-
API_KEY
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def verify_api_key(x_api_key: str = Header(...)):
|
8 |
"""
|
|
|
1 |
from fastapi import Header, HTTPException
|
2 |
+
import os
|
3 |
|
4 |
# Define a simple API key for authentication.
|
5 |
# In a production environment, this should be a more robust and securely managed key.
|
6 |
+
# Load API_KEY from an environment variable for security.
|
7 |
+
# Fallback to a default for local development if not set, but warn about it.
|
8 |
+
API_KEY = os.getenv("GRAMMAFREE_API_KEY", "12345") # Use a strong default for production!
|
9 |
+
|
10 |
+
if API_KEY == "12345":
|
11 |
+
print("WARNING: Using default API_KEY. Set GRAMMAFREE_API_KEY environment variable in production!")
|
12 |
|
13 |
def verify_api_key(x_api_key: str = Header(...)):
|
14 |
"""
|
app/main.py
CHANGED
@@ -1,6 +1,25 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
-
# Import
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
# Initialize the FastAPI application
|
6 |
app = FastAPI()
|
@@ -11,11 +30,19 @@ def root():
|
|
11 |
Root endpoint for the API.
|
12 |
Returns a welcome message.
|
13 |
"""
|
|
|
14 |
return {"message": "Welcome to Grammafree API"}
|
15 |
|
16 |
-
# Include the routers for
|
17 |
-
#
|
18 |
-
app.include_router(
|
19 |
-
app.include_router(
|
20 |
-
app.include_router(
|
21 |
-
app.include_router(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
import logging # Import logging module
|
3 |
+
|
4 |
+
# Configure basic logging
|
5 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
6 |
+
logger = logging.getLogger(__name__)
|
7 |
+
|
8 |
+
# Import all individual routers for different functionalities
|
9 |
+
from app.routers import (
|
10 |
+
grammar,
|
11 |
+
punctuation,
|
12 |
+
sentence_correctness,
|
13 |
+
tone,
|
14 |
+
voice,
|
15 |
+
inclusive_language,
|
16 |
+
vocabulary,
|
17 |
+
conciseness,
|
18 |
+
readability,
|
19 |
+
paraphrase,
|
20 |
+
translate,
|
21 |
+
summarize
|
22 |
+
)
|
23 |
|
24 |
# Initialize the FastAPI application
|
25 |
app = FastAPI()
|
|
|
30 |
Root endpoint for the API.
|
31 |
Returns a welcome message.
|
32 |
"""
|
33 |
+
logger.info("Root endpoint accessed.")
|
34 |
return {"message": "Welcome to Grammafree API"}
|
35 |
|
36 |
+
# Include all the individual routers for modular API structure.
|
37 |
+
app.include_router(grammar.router) # Grammar correction and diffs
|
38 |
+
app.include_router(punctuation.router) # Punctuation fixes
|
39 |
+
app.include_router(sentence_correctness.router) # Sentence correctness feedback
|
40 |
+
app.include_router(tone.router) # Tone detection and suggestions
|
41 |
+
app.include_router(voice.router) # Active/Passive voice detection
|
42 |
+
app.include_router(inclusive_language.router) # Inclusive language rewriting
|
43 |
+
app.include_router(vocabulary.router) # Vocabulary enhancement
|
44 |
+
app.include_router(conciseness.router) # Conciseness suggestions
|
45 |
+
app.include_router(readability.router) # Readability scores
|
46 |
+
app.include_router(paraphrase.router) # Existing paraphrasing functionality
|
47 |
+
app.include_router(translate.router) # Existing translation functionality
|
48 |
+
app.include_router(summarize.router) # Existing summarization functionality
|
app/prompts.py
CHANGED
@@ -95,3 +95,27 @@ def tone_analysis_prompt(text: str) -> str:
|
|
95 |
str: The generated prompt.
|
96 |
"""
|
97 |
return f"Analyze the tone of the following text and suggest improvements if needed: {text}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
str: The generated prompt.
|
96 |
"""
|
97 |
return f"Analyze the tone of the following text and suggest improvements if needed: {text}"
|
98 |
+
|
99 |
+
def vocabulary_prompt(text: str) -> str:
|
100 |
+
"""
|
101 |
+
Generates a prompt to suggest vocabulary improvements for the given text.
|
102 |
+
|
103 |
+
Args:
|
104 |
+
text (str): The original text.
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
str: The generated prompt.
|
108 |
+
"""
|
109 |
+
return f"For the following text, identify any weak or overused words and suggest stronger, more precise synonyms or alternative phrasing. Provide suggestions as a list of 'original word/phrase -> suggested word/phrase': {text}"
|
110 |
+
|
111 |
+
def conciseness_prompt(text: str) -> str:
|
112 |
+
"""
|
113 |
+
Generates a prompt to make the given text more concise.
|
114 |
+
|
115 |
+
Args:
|
116 |
+
text (str): The original text.
|
117 |
+
|
118 |
+
Returns:
|
119 |
+
str: The generated prompt.
|
120 |
+
"""
|
121 |
+
return f"Rewrite the following text to be more concise, removing any unnecessary words or phrases while retaining the original meaning: {text}"
|
app/routers/analyze.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
from fastapi import APIRouter, Depends, Request
|
2 |
-
from pydantic import BaseModel
|
3 |
-
from app import models, prompts
|
4 |
-
from app.core.security import verify_api_key
|
5 |
-
import language_tool_python
|
6 |
-
import spacy
|
7 |
-
import difflib # Import the difflib module for text comparisons
|
8 |
-
|
9 |
-
router = APIRouter()
|
10 |
-
|
11 |
-
# Load the spaCy English language model for natural language processing tasks,
|
12 |
-
# such as dependency parsing for active/passive voice detection.
|
13 |
-
# IMPORTANT: If you get an OSError: [E050] Can't find model 'en_core_web_sm',
|
14 |
-
# you need to run: python -m spacy download en_core_web_sm in your terminal.
|
15 |
-
try:
|
16 |
-
nlp = spacy.load("en_core_web_sm")
|
17 |
-
except OSError:
|
18 |
-
print("SpaCy model 'en_core_web_sm' not found. Please run: python -m spacy download en_core_web_sm")
|
19 |
-
# You might want to raise an HTTPException or provide a dummy NLP object
|
20 |
-
# if the model is crucial for the application to function.
|
21 |
-
# For now, we'll let it fail if not installed, as it's a critical dependency.
|
22 |
-
raise
|
23 |
-
|
24 |
-
# Initialize LanguageTool for grammar, spelling, and style checking.
|
25 |
-
# 'en-US' specifies the English (United States) language.
|
26 |
-
tool = language_tool_python.LanguageTool('en-US')
|
27 |
-
|
28 |
-
class AnalyzeInput(BaseModel):
|
29 |
-
"""
|
30 |
-
Pydantic BaseModel for validating the input request body for the /analyze endpoint.
|
31 |
-
It expects a single field: 'text' (string).
|
32 |
-
"""
|
33 |
-
text: str
|
34 |
-
|
35 |
-
# Apply the verify_api_key dependency at the router level for this endpoint.
|
36 |
-
# The Request object is now correctly handled without being wrapped by Depends.
|
37 |
-
@router.post("/analyze", dependencies=[Depends(verify_api_key)])
|
38 |
-
def analyze_text(payload: AnalyzeInput):
|
39 |
-
"""
|
40 |
-
Analyzes the provided text for grammar, punctuation, sentence correctness,
|
41 |
-
tone, active/passive voice, and inclusive pronoun suggestions.
|
42 |
-
|
43 |
-
Args:
|
44 |
-
payload (AnalyzeInput): The request body containing the text to be analyzed.
|
45 |
-
(dependencies=[Depends(verify_api_key)]): Ensures the API key is verified before execution.
|
46 |
-
|
47 |
-
Returns:
|
48 |
-
dict: A dictionary containing various analysis results structured as per requirements.
|
49 |
-
"""
|
50 |
-
text = payload.text
|
51 |
-
|
52 |
-
# --- 1. Grammar Suggestions with Diffs ---
|
53 |
-
# Get the grammatically corrected version of the original text.
|
54 |
-
# Note: The 'vennify/t5-base-grammar-correction' model's performance
|
55 |
-
# can vary. For more robust corrections, especially for subtle spelling
|
56 |
-
# and grammar errors, consider a larger or fine-tuned model if needed.
|
57 |
-
corrected_grammar = models.run_grammar_correction(text)
|
58 |
-
|
59 |
-
grammar_changes = []
|
60 |
-
# Use difflib to find differences between the original and corrected text.
|
61 |
-
# We split by words to get word-level diffs, which are easier to interpret.
|
62 |
-
s = difflib.SequenceMatcher(None, text.split(), corrected_grammar.split())
|
63 |
-
|
64 |
-
# Iterate through the operations (opcodes) generated by SequenceMatcher.
|
65 |
-
# 'equal', 'replace', 'delete', 'insert' are the types of operations.
|
66 |
-
for opcode, i1, i2, j1, j2 in s.get_opcodes():
|
67 |
-
if opcode == 'replace':
|
68 |
-
# If words are replaced, format as "'original_word' -> 'corrected_word'"
|
69 |
-
original_part = ' '.join(text.split()[i1:i2])
|
70 |
-
corrected_part = ' '.join(corrected_grammar.split()[j1:j2])
|
71 |
-
grammar_changes.append(f"'{original_part}' \u2192 '{corrected_part}'") # Using Unicode arrow
|
72 |
-
elif opcode == 'delete':
|
73 |
-
# If words are deleted, format as "'deleted_word' removed"
|
74 |
-
deleted_part = ' '.join(text.split()[i1:i2])
|
75 |
-
grammar_changes.append(f"'{deleted_part}' removed")
|
76 |
-
elif opcode == 'insert':
|
77 |
-
# If words are inserted, format as "'inserted_word' added"
|
78 |
-
inserted_part = ' '.join(corrected_grammar.split()[j1:j2])
|
79 |
-
grammar_changes.append(f"'{inserted_part}' added")
|
80 |
-
|
81 |
-
# --- 2. Punctuation Fixes and 3. Sentence Correctness Feedback ---
|
82 |
-
# LanguageTool checks the original text for various issues including punctuation.
|
83 |
-
matches = tool.check(text)
|
84 |
-
|
85 |
-
punctuation_issues = []
|
86 |
-
sentence_correctness_feedback = []
|
87 |
-
|
88 |
-
for m in matches:
|
89 |
-
# Check if the rule ID (from LanguageTool) contains "PUNCTUATION" to categorize it.
|
90 |
-
if 'PUNCTUATION' in m.ruleId.upper():
|
91 |
-
punctuation_issues.append(m.message)
|
92 |
-
else:
|
93 |
-
# All other issues are considered general sentence correctness feedback.
|
94 |
-
sentence_correctness_feedback.append(m.message)
|
95 |
-
|
96 |
-
# --- 4. Tone Detection and Suggestion ---
|
97 |
-
# Classify the tone of the original text using the fine-tuned emotion classifier.
|
98 |
-
detected_tone = models.classify_tone(text)
|
99 |
-
|
100 |
-
tone_suggestion_text = ""
|
101 |
-
# Provide a simple tone suggestion based on the detected tone.
|
102 |
-
# This logic can be expanded for more sophisticated suggestions based on context or user goals.
|
103 |
-
if detected_tone in ["neutral", "joy", "sadness", "anger", "fear", "disgust", "surprise"]:
|
104 |
-
# For simplicity, we'll try to make neutral/joy more formal, and other strong emotions more neutral/calm.
|
105 |
-
if detected_tone in ["neutral", "joy"]:
|
106 |
-
tone_suggestion_text = models.run_flan_prompt(prompts.tone_prompt(text, "formal"))
|
107 |
-
else: # For emotions like anger, sadness, fear, etc., suggest a more neutral/calm tone
|
108 |
-
tone_suggestion_text = models.run_flan_prompt(prompts.tone_prompt(text, "neutral and calm"))
|
109 |
-
else:
|
110 |
-
# If no specific suggestion, indicate that the detected tone is generally fine.
|
111 |
-
tone_suggestion_text = f"The detected tone '{detected_tone}' seems appropriate for general communication."
|
112 |
-
|
113 |
-
|
114 |
-
# --- 5. Active/Passive Voice Detection and Suggestion ---
|
115 |
-
doc = nlp(text) # Process the text with spaCy for linguistic analysis
|
116 |
-
voice_detected = "active"
|
117 |
-
voice_suggestion = "None \u2014 active voice is fine here." # Using Unicode em dash for better readability
|
118 |
-
|
119 |
-
# Iterate through tokens to find passive auxiliary verbs (e.g., "is", "was", "been" when followed by a past participle).
|
120 |
-
# A simple heuristic: if any token's dependency is 'auxpass', it's likely part of a passive construction.
|
121 |
-
for token in doc:
|
122 |
-
if token.dep_ == "auxpass":
|
123 |
-
voice_detected = "passive"
|
124 |
-
# If passive voice is detected, ask FLAN-T5 to rewrite it in active voice.
|
125 |
-
better_voice_prompt = prompts.active_voice_prompt(text)
|
126 |
-
voice_suggestion = models.run_flan_prompt(better_voice_prompt)
|
127 |
-
break # Exit loop once passive voice is detected, no need to check further
|
128 |
-
|
129 |
-
# --- 6. Inclusive Pronoun Suggestion ---
|
130 |
-
# Use FLAN-T5 with a specific prompt to suggest more inclusive language.
|
131 |
-
inclusive_pronouns_suggestion = models.run_flan_prompt(prompts.pronoun_friendly_prompt(text))
|
132 |
-
|
133 |
-
# --- Construct the final response matching the example output structure ---
|
134 |
-
return {
|
135 |
-
"grammar": {
|
136 |
-
"corrected": corrected_grammar,
|
137 |
-
"changes": grammar_changes
|
138 |
-
},
|
139 |
-
"punctuation": {
|
140 |
-
"issues": punctuation_issues,
|
141 |
-
"suggestions": [] # The grammar correction and diffs implicitly handle suggestions here
|
142 |
-
},
|
143 |
-
"sentence_correctness": sentence_correctness_feedback,
|
144 |
-
"tone_analysis": {
|
145 |
-
"detected": detected_tone,
|
146 |
-
"suggestion": tone_suggestion_text
|
147 |
-
},
|
148 |
-
"voice": {
|
149 |
-
"detected": voice_detected,
|
150 |
-
"suggestion": voice_suggestion
|
151 |
-
},
|
152 |
-
"inclusive_pronouns": inclusive_pronouns_suggestion
|
153 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/routers/conciseness.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models, prompts
|
4 |
+
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
class ConcisenessInput(BaseModel):
|
12 |
+
"""
|
13 |
+
Pydantic BaseModel for validating the input request body for the /conciseness_check endpoint.
|
14 |
+
It expects a single field: 'text' (string).
|
15 |
+
"""
|
16 |
+
text: str
|
17 |
+
|
18 |
+
@router.post("/conciseness_check", dependencies=[Depends(verify_api_key)])
|
19 |
+
def conciseness_check(payload: ConcisenessInput):
|
20 |
+
"""
|
21 |
+
Provides suggestions for making text more concise.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
payload (ConcisenessInput): The request body containing the text to be analyzed.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
dict: A dictionary containing the concise version of the text.
|
28 |
+
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
concise_text = models.run_flan_prompt(prompts.conciseness_prompt(text))
|
33 |
+
|
34 |
+
return {
|
35 |
+
"concise_version": concise_text
|
36 |
+
}
|
37 |
+
except Exception as e:
|
38 |
+
logger.error(f"Error in conciseness_check: {e}", exc_info=True)
|
39 |
+
raise HTTPException(
|
40 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
41 |
+
detail=f"An error occurred during conciseness checking: {e}"
|
42 |
+
)
|
app/routers/grammar.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models
|
4 |
+
from app.core.security import verify_api_key
|
5 |
+
import difflib
|
6 |
+
import logging # Import logging
|
7 |
+
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
router = APIRouter()
|
11 |
+
|
12 |
+
class GrammarInput(BaseModel):
|
13 |
+
"""
|
14 |
+
Pydantic BaseModel for validating the input request body for the /grammar_check endpoint.
|
15 |
+
It expects a single field: 'text' (string).
|
16 |
+
"""
|
17 |
+
text: str
|
18 |
+
|
19 |
+
@router.post("/grammar_check", dependencies=[Depends(verify_api_key)])
|
20 |
+
def grammar_check(payload: GrammarInput):
|
21 |
+
"""
|
22 |
+
Corrects the grammar of the provided text and shows changes.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
payload (GrammarInput): The request body containing the text to be analyzed.
|
26 |
+
|
27 |
+
Returns:
|
28 |
+
dict: A dictionary containing the corrected text and a list of changes.
|
29 |
+
"""
|
30 |
+
original_text = payload.text
|
31 |
+
|
32 |
+
try:
|
33 |
+
corrected_text = models.run_grammar_correction(original_text)
|
34 |
+
|
35 |
+
grammar_changes = []
|
36 |
+
s = difflib.SequenceMatcher(None, original_text.split(), corrected_text.split())
|
37 |
+
|
38 |
+
for opcode, i1, i2, j1, j2 in s.get_opcodes():
|
39 |
+
if opcode == 'replace':
|
40 |
+
original_part = ' '.join(original_text.split()[i1:i2])
|
41 |
+
corrected_part = ' '.join(corrected_text.split()[j1:j2])
|
42 |
+
grammar_changes.append(f"'{original_part}' \u2192 '{corrected_part}'")
|
43 |
+
elif opcode == 'delete':
|
44 |
+
deleted_part = ' '.join(original_text.split()[i1:i2])
|
45 |
+
grammar_changes.append(f"'{deleted_part}' removed")
|
46 |
+
elif opcode == 'insert':
|
47 |
+
inserted_part = ' '.join(corrected_text.split()[j1:j2])
|
48 |
+
grammar_changes.append(f"'{inserted_part}' added")
|
49 |
+
|
50 |
+
return {
|
51 |
+
"grammar": {
|
52 |
+
"corrected": corrected_text,
|
53 |
+
"changes": grammar_changes
|
54 |
+
}
|
55 |
+
}
|
56 |
+
except Exception as e:
|
57 |
+
logger.error(f"Error in grammar_check: {e}", exc_info=True)
|
58 |
+
raise HTTPException(
|
59 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
60 |
+
detail=f"An error occurred during grammar checking: {e}"
|
61 |
+
)
|
app/routers/inclusive_language.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models, prompts
|
4 |
+
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
class InclusiveLanguageInput(BaseModel):
|
12 |
+
"""
|
13 |
+
Pydantic BaseModel for validating the input request body for the /inclusive_language endpoint.
|
14 |
+
It expects a single field: 'text' (string).
|
15 |
+
"""
|
16 |
+
text: str
|
17 |
+
|
18 |
+
@router.post("/inclusive_language", dependencies=[Depends(verify_api_key)])
|
19 |
+
def inclusive_language_check(payload: InclusiveLanguageInput):
|
20 |
+
"""
|
21 |
+
Provides suggestions for rewriting text using inclusive language.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
payload (InclusiveLanguageInput): The request body containing the text to be analyzed.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
dict: A dictionary containing the rewritten text with inclusive language.
|
28 |
+
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
inclusive_text = models.run_flan_prompt(prompts.pronoun_friendly_prompt(text))
|
33 |
+
|
34 |
+
return {
|
35 |
+
"inclusive_pronouns": inclusive_text
|
36 |
+
}
|
37 |
+
except Exception as e:
|
38 |
+
logger.error(f"Error in inclusive_language_check: {e}", exc_info=True)
|
39 |
+
raise HTTPException(
|
40 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
41 |
+
detail=f"An error occurred during inclusive language check: {e}"
|
42 |
+
)
|
app/routers/paraphrase.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
-
from fastapi import APIRouter, Depends
|
2 |
from pydantic import BaseModel
|
3 |
from app import models, prompts
|
4 |
from app.core.security import verify_api_key
|
|
|
|
|
|
|
5 |
|
6 |
-
# Create an APIRouter instance. This will handle routes specific to paraphrasing.
|
7 |
router = APIRouter()
|
8 |
|
9 |
-
class
|
10 |
"""
|
11 |
Pydantic BaseModel for validating the input request body for the /paraphrase endpoint.
|
12 |
It expects a single field: 'text' (string).
|
@@ -14,17 +16,24 @@ class Input(BaseModel):
|
|
14 |
text: str
|
15 |
|
16 |
@router.post("/paraphrase", dependencies=[Depends(verify_api_key)])
|
17 |
-
def paraphrase(
|
18 |
"""
|
19 |
-
|
20 |
|
21 |
Args:
|
22 |
-
|
23 |
-
(dependencies=[Depends(verify_api_key)]): Ensures the API key is verified before execution.
|
24 |
|
25 |
Returns:
|
26 |
-
dict: A dictionary containing the paraphrased
|
27 |
"""
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
from pydantic import BaseModel
|
3 |
from app import models, prompts
|
4 |
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
|
|
|
9 |
router = APIRouter()
|
10 |
|
11 |
+
class ParaphraseInput(BaseModel): # Renamed Input to ParaphraseInput for clarity
|
12 |
"""
|
13 |
Pydantic BaseModel for validating the input request body for the /paraphrase endpoint.
|
14 |
It expects a single field: 'text' (string).
|
|
|
16 |
text: str
|
17 |
|
18 |
@router.post("/paraphrase", dependencies=[Depends(verify_api_key)])
|
19 |
+
def paraphrase(payload: ParaphraseInput): # Renamed input to payload for consistency
|
20 |
"""
|
21 |
+
Paraphrases the provided text.
|
22 |
|
23 |
Args:
|
24 |
+
payload (ParaphraseInput): The request body containing the text to be paraphrased.
|
|
|
25 |
|
26 |
Returns:
|
27 |
+
dict: A dictionary containing the paraphrased text.
|
28 |
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
paraphrased_text = models.run_flan_prompt(prompts.paraphrase_prompt(text))
|
33 |
+
return {"result": paraphrased_text}
|
34 |
+
except Exception as e:
|
35 |
+
logger.error(f"Error in paraphrase: {e}", exc_info=True)
|
36 |
+
raise HTTPException(
|
37 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
38 |
+
detail=f"An error occurred during paraphrasing: {e}"
|
39 |
+
)
|
app/routers/punctuation.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app.core.security import verify_api_key
|
4 |
+
import language_tool_python
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
# Initialize LanguageTool. This will be the same instance as used for punctuation.
|
12 |
+
# Ensure Java is installed in your environment.
|
13 |
+
try:
|
14 |
+
tool = language_tool_python.LanguageTool('en-US')
|
15 |
+
except Exception as e:
|
16 |
+
logger.error(f"Failed to initialize LanguageTool: {e}", exc_info=True)
|
17 |
+
# If LanguageTool cannot be initialized, raise an error or handle gracefully
|
18 |
+
# For an MVP, we might let the app start but fail on requests that use it.
|
19 |
+
# A more robust solution might mark the endpoint as unavailable.
|
20 |
+
tool = None # Set to None if initialization fails
|
21 |
+
|
22 |
+
class PunctuationInput(BaseModel):
|
23 |
+
"""
|
24 |
+
Pydantic BaseModel for validating the input request body for the /punctuation_check endpoint.
|
25 |
+
It expects a single field: 'text' (string).
|
26 |
+
"""
|
27 |
+
text: str
|
28 |
+
|
29 |
+
@router.post("/punctuation_check", dependencies=[Depends(verify_api_key)])
|
30 |
+
def punctuation_check(payload: PunctuationInput):
|
31 |
+
"""
|
32 |
+
Checks the provided text for punctuation errors.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
payload (PunctuationInput): The request body containing the text to be analyzed.
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
dict: A dictionary containing a list of punctuation issues.
|
39 |
+
"""
|
40 |
+
if tool is None:
|
41 |
+
raise HTTPException(
|
42 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
43 |
+
detail="Punctuation check service is not available (LanguageTool failed to initialize)."
|
44 |
+
)
|
45 |
+
|
46 |
+
text = payload.text
|
47 |
+
|
48 |
+
try:
|
49 |
+
matches = tool.check(text)
|
50 |
+
|
51 |
+
punctuation_issues = []
|
52 |
+
for m in matches:
|
53 |
+
if 'PUNCTUATION' in m.ruleId.upper():
|
54 |
+
punctuation_issues.append(m.message)
|
55 |
+
|
56 |
+
return {
|
57 |
+
"punctuation": {
|
58 |
+
"issues": punctuation_issues,
|
59 |
+
"suggestions": [] # Suggestions might be handled by overall grammar correction
|
60 |
+
}
|
61 |
+
}
|
62 |
+
except Exception as e:
|
63 |
+
logger.error(f"Error in punctuation_check: {e}", exc_info=True)
|
64 |
+
raise HTTPException(
|
65 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
66 |
+
detail=f"An error occurred during punctuation checking: {e}"
|
67 |
+
)
|
app/routers/readability.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app.core.security import verify_api_key
|
4 |
+
import textstat # Import the textstat library
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
class ReadabilityInput(BaseModel):
|
12 |
+
"""
|
13 |
+
Pydantic BaseModel for validating the input request body for the /readability_score endpoint.
|
14 |
+
It expects a single field: 'text' (string).
|
15 |
+
"""
|
16 |
+
text: str
|
17 |
+
|
18 |
+
@router.post("/readability_score", dependencies=[Depends(verify_api_key)])
|
19 |
+
def readability_score(payload: ReadabilityInput):
|
20 |
+
"""
|
21 |
+
Calculates various readability scores for the provided text.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
payload (ReadabilityInput): The request body containing the text to be analyzed.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
dict: A dictionary containing various readability scores.
|
28 |
+
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
# Calculate different readability scores using textstat
|
33 |
+
flesch_reading_ease = textstat.flesch_reading_ease(text)
|
34 |
+
flesch_kincaid_grade = textstat.flesch_kincaid_grade(text)
|
35 |
+
gunning_fog = textstat.gunning_fog(text)
|
36 |
+
smog_index = textstat.smog_index(text)
|
37 |
+
coleman_liau_index = textstat.coleman_liau_index(text)
|
38 |
+
automated_readability_index = textstat.automated_readability_index(text)
|
39 |
+
|
40 |
+
return {
|
41 |
+
"readability_scores": {
|
42 |
+
"flesch_reading_ease": flesch_reading_ease,
|
43 |
+
"flesch_kincaid_grade": flesch_kincaid_grade,
|
44 |
+
"gunning_fog_index": gunning_fog,
|
45 |
+
"smog_index": smog_index,
|
46 |
+
"coleman_liau_index": coleman_liau_index,
|
47 |
+
"automated_readability_index": automated_readability_index
|
48 |
+
}
|
49 |
+
}
|
50 |
+
except Exception as e:
|
51 |
+
logger.error(f"Error in readability_score: {e}", exc_info=True)
|
52 |
+
raise HTTPException(
|
53 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
54 |
+
detail=f"An error occurred during readability score calculation: {e}"
|
55 |
+
)
|
app/routers/sentence_correctness.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app.core.security import verify_api_key
|
4 |
+
import language_tool_python
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
# Initialize LanguageTool. This will be the same instance as used for punctuation.
|
12 |
+
# Ensure Java is installed in your environment.
|
13 |
+
try:
|
14 |
+
tool = language_tool_python.LanguageTool('en-US')
|
15 |
+
except Exception as e:
|
16 |
+
logger.error(f"Failed to initialize LanguageTool for sentence correctness: {e}", exc_info=True)
|
17 |
+
tool = None
|
18 |
+
|
19 |
+
class SentenceCorrectnessInput(BaseModel):
|
20 |
+
"""
|
21 |
+
Pydantic BaseModel for validating the input request body for the /sentence_correctness endpoint.
|
22 |
+
It expects a single field: 'text' (string).
|
23 |
+
"""
|
24 |
+
text: str
|
25 |
+
|
26 |
+
@router.post("/sentence_correctness", dependencies=[Depends(verify_api_key)])
|
27 |
+
def sentence_correctness_check(payload: SentenceCorrectnessInput):
|
28 |
+
"""
|
29 |
+
Provides feedback on sentence-level correctness (e.g., fragments, subject-verb agreement).
|
30 |
+
|
31 |
+
Args:
|
32 |
+
payload (SentenceCorrectnessInput): The request body containing the text to be analyzed.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
dict: A dictionary containing a list of sentence correctness feedback.
|
36 |
+
"""
|
37 |
+
if tool is None:
|
38 |
+
raise HTTPException(
|
39 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
40 |
+
detail="Sentence correctness service is not available (LanguageTool failed to initialize)."
|
41 |
+
)
|
42 |
+
|
43 |
+
text = payload.text
|
44 |
+
|
45 |
+
try:
|
46 |
+
matches = tool.check(text)
|
47 |
+
|
48 |
+
sentence_correctness_feedback = []
|
49 |
+
for m in matches:
|
50 |
+
# Exclude punctuation issues, as they are handled in a separate endpoint
|
51 |
+
if 'PUNCTUATION' not in m.ruleId.upper():
|
52 |
+
sentence_correctness_feedback.append(m.message)
|
53 |
+
|
54 |
+
return {
|
55 |
+
"sentence_correctness": sentence_correctness_feedback
|
56 |
+
}
|
57 |
+
except Exception as e:
|
58 |
+
logger.error(f"Error in sentence_correctness_check: {e}", exc_info=True)
|
59 |
+
raise HTTPException(
|
60 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
61 |
+
detail=f"An error occurred during sentence correctness checking: {e}"
|
62 |
+
)
|
app/routers/summarize.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
-
from fastapi import APIRouter, Depends
|
2 |
from pydantic import BaseModel
|
3 |
from app import models, prompts
|
4 |
from app.core.security import verify_api_key
|
|
|
|
|
|
|
5 |
|
6 |
-
# Create an APIRouter instance. This will handle routes specific to summarization.
|
7 |
router = APIRouter()
|
8 |
|
9 |
-
class
|
10 |
"""
|
11 |
Pydantic BaseModel for validating the input request body for the /summarize endpoint.
|
12 |
It expects a single field: 'text' (string).
|
@@ -14,17 +16,24 @@ class Input(BaseModel):
|
|
14 |
text: str
|
15 |
|
16 |
@router.post("/summarize", dependencies=[Depends(verify_api_key)])
|
17 |
-
def summarize(
|
18 |
"""
|
19 |
-
|
20 |
|
21 |
Args:
|
22 |
-
|
23 |
-
(dependencies=[Depends(verify_api_key)]): Ensures the API key is verified before execution.
|
24 |
|
25 |
Returns:
|
26 |
-
dict: A dictionary containing the summarized
|
27 |
"""
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
from pydantic import BaseModel
|
3 |
from app import models, prompts
|
4 |
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
|
|
|
9 |
router = APIRouter()
|
10 |
|
11 |
+
class SummarizeInput(BaseModel): # Renamed Input to SummarizeInput for clarity
|
12 |
"""
|
13 |
Pydantic BaseModel for validating the input request body for the /summarize endpoint.
|
14 |
It expects a single field: 'text' (string).
|
|
|
16 |
text: str
|
17 |
|
18 |
@router.post("/summarize", dependencies=[Depends(verify_api_key)])
|
19 |
+
def summarize(payload: SummarizeInput): # Renamed input to payload for consistency
|
20 |
"""
|
21 |
+
Summarizes the provided text.
|
22 |
|
23 |
Args:
|
24 |
+
payload (SummarizeInput): The request body containing the text to be summarized.
|
|
|
25 |
|
26 |
Returns:
|
27 |
+
dict: A dictionary containing the summarized text.
|
28 |
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
summarized_text = models.run_flan_prompt(prompts.summarize_prompt(text))
|
33 |
+
return {"result": summarized_text}
|
34 |
+
except Exception as e:
|
35 |
+
logger.error(f"Error in summarize: {e}", exc_info=True)
|
36 |
+
raise HTTPException(
|
37 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
38 |
+
detail=f"An error occurred during summarization: {e}"
|
39 |
+
)
|
app/routers/tone.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models, prompts
|
4 |
+
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
class ToneInput(BaseModel):
|
12 |
+
"""
|
13 |
+
Pydantic BaseModel for validating the input request body for the /tone_analysis endpoint.
|
14 |
+
It expects a single field: 'text' (string).
|
15 |
+
"""
|
16 |
+
text: str
|
17 |
+
|
18 |
+
@router.post("/tone_analysis", dependencies=[Depends(verify_api_key)])
|
19 |
+
def tone_analysis(payload: ToneInput):
|
20 |
+
"""
|
21 |
+
Analyzes the tone of the provided text and suggests improvements.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
payload (ToneInput): The request body containing the text to be analyzed.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
dict: A dictionary containing the detected tone and a suggestion.
|
28 |
+
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
detected_tone = models.classify_tone(text)
|
33 |
+
|
34 |
+
tone_suggestion_text = ""
|
35 |
+
# Provide a simple tone suggestion based on the detected tone.
|
36 |
+
# This logic can be expanded for more sophisticated suggestions based on context or user goals.
|
37 |
+
if detected_tone in ["neutral", "joy", "sadness", "anger", "fear", "disgust", "surprise"]:
|
38 |
+
if detected_tone in ["neutral", "joy"]:
|
39 |
+
tone_suggestion_text = models.run_flan_prompt(prompts.tone_prompt(text, "formal"))
|
40 |
+
else: # For emotions like anger, sadness, fear, etc., suggest a more neutral/calm tone
|
41 |
+
tone_suggestion_text = models.run_flan_prompt(prompts.tone_prompt(text, "neutral and calm"))
|
42 |
+
else:
|
43 |
+
tone_suggestion_text = f"The detected tone '{detected_tone}' seems appropriate for general communication."
|
44 |
+
|
45 |
+
return {
|
46 |
+
"tone_analysis": {
|
47 |
+
"detected": detected_tone,
|
48 |
+
"suggestion": tone_suggestion_text
|
49 |
+
}
|
50 |
+
}
|
51 |
+
except Exception as e:
|
52 |
+
logger.error(f"Error in tone_analysis: {e}", exc_info=True)
|
53 |
+
raise HTTPException(
|
54 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
55 |
+
detail=f"An error occurred during tone analysis: {e}"
|
56 |
+
)
|
app/routers/translate.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1 |
-
from fastapi import APIRouter, Depends
|
2 |
from pydantic import BaseModel
|
3 |
from app import models
|
4 |
from app.core.security import verify_api_key
|
|
|
|
|
|
|
5 |
|
6 |
-
# Create an APIRouter instance. This will handle routes specific to translation.
|
7 |
router = APIRouter()
|
8 |
|
9 |
class TranslateInput(BaseModel):
|
@@ -15,17 +17,25 @@ class TranslateInput(BaseModel):
|
|
15 |
target_lang: str
|
16 |
|
17 |
@router.post("/translate", dependencies=[Depends(verify_api_key)])
|
18 |
-
def translate(
|
19 |
"""
|
20 |
-
|
21 |
|
22 |
Args:
|
23 |
-
|
24 |
-
(dependencies=[Depends(verify_api_key)]): Ensures the API key is verified before execution.
|
25 |
|
26 |
Returns:
|
27 |
-
dict: A dictionary containing the translated
|
28 |
"""
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
from pydantic import BaseModel
|
3 |
from app import models
|
4 |
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
|
|
|
9 |
router = APIRouter()
|
10 |
|
11 |
class TranslateInput(BaseModel):
|
|
|
17 |
target_lang: str
|
18 |
|
19 |
@router.post("/translate", dependencies=[Depends(verify_api_key)])
|
20 |
+
def translate(payload: TranslateInput): # Renamed input to payload for consistency
|
21 |
"""
|
22 |
+
Translates the provided text to a target language.
|
23 |
|
24 |
Args:
|
25 |
+
payload (TranslateInput): The request body containing the text and target language.
|
|
|
26 |
|
27 |
Returns:
|
28 |
+
dict: A dictionary containing the translated text.
|
29 |
"""
|
30 |
+
text = payload.text
|
31 |
+
target_lang = payload.target_lang
|
32 |
+
|
33 |
+
try:
|
34 |
+
translated_text = models.run_translation(text, target_lang)
|
35 |
+
return {"result": translated_text}
|
36 |
+
except Exception as e:
|
37 |
+
logger.error(f"Error in translate: {e}", exc_info=True)
|
38 |
+
raise HTTPException(
|
39 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
40 |
+
detail=f"An error occurred during translation: {e}"
|
41 |
+
)
|
app/routers/vocabulary.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models, prompts
|
4 |
+
from app.core.security import verify_api_key
|
5 |
+
import logging # Import logging
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
router = APIRouter()
|
10 |
+
|
11 |
+
class VocabularyInput(BaseModel):
|
12 |
+
"""
|
13 |
+
Pydantic BaseModel for validating the input request body for the /vocabulary_suggestions endpoint.
|
14 |
+
It expects a single field: 'text' (string).
|
15 |
+
"""
|
16 |
+
text: str
|
17 |
+
|
18 |
+
@router.post("/vocabulary_suggestions", dependencies=[Depends(verify_api_key)])
|
19 |
+
def vocabulary_suggestions(payload: VocabularyInput):
|
20 |
+
"""
|
21 |
+
Provides suggestions for vocabulary improvement (e.g., stronger synonyms).
|
22 |
+
|
23 |
+
Args:
|
24 |
+
payload (VocabularyInput): The request body containing the text to be analyzed.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
dict: A dictionary containing vocabulary suggestions.
|
28 |
+
"""
|
29 |
+
text = payload.text
|
30 |
+
|
31 |
+
try:
|
32 |
+
suggestions_raw = models.run_flan_prompt(prompts.vocabulary_prompt(text))
|
33 |
+
|
34 |
+
return {
|
35 |
+
"vocabulary_suggestions": suggestions_raw
|
36 |
+
}
|
37 |
+
except Exception as e:
|
38 |
+
logger.error(f"Error in vocabulary_suggestions: {e}", exc_info=True)
|
39 |
+
raise HTTPException(
|
40 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
41 |
+
detail=f"An error occurred during vocabulary suggestion: {e}"
|
42 |
+
)
|
app/routers/voice.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, status # Import HTTPException and status
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models, prompts
|
4 |
+
from app.core.security import verify_api_key
|
5 |
+
import spacy
|
6 |
+
import logging # Import logging
|
7 |
+
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
router = APIRouter()
|
11 |
+
|
12 |
+
# Load the spaCy English language model.
|
13 |
+
try:
|
14 |
+
nlp = spacy.load("en_core_web_sm")
|
15 |
+
except OSError as e:
|
16 |
+
logger.error(f"SpaCy model 'en_core_web_sm' not found. Please run: python -m spacy download en_core_web_sm. Error: {e}", exc_info=True)
|
17 |
+
nlp = None # Set to None if initialization fails
|
18 |
+
# Re-raising here to prevent server startup if critical dependency is missing
|
19 |
+
raise RuntimeError("SpaCy model 'en_core_web_sm' not loaded. Please install it.")
|
20 |
+
|
21 |
+
|
22 |
+
class VoiceInput(BaseModel):
|
23 |
+
"""
|
24 |
+
Pydantic BaseModel for validating the input request body for the /voice_analysis endpoint.
|
25 |
+
It expects a single field: 'text' (string).
|
26 |
+
"""
|
27 |
+
text: str
|
28 |
+
|
29 |
+
@router.post("/voice_analysis", dependencies=[Depends(verify_api_key)])
|
30 |
+
def voice_analysis(payload: VoiceInput):
|
31 |
+
"""
|
32 |
+
Detects active/passive voice and suggests improvements.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
payload (VoiceInput): The request body containing the text to be analyzed.
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
dict: A dictionary containing the detected voice and a suggestion.
|
39 |
+
"""
|
40 |
+
if nlp is None:
|
41 |
+
raise HTTPException(
|
42 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
43 |
+
detail="Voice analysis service is not available (SpaCy model failed to load)."
|
44 |
+
)
|
45 |
+
|
46 |
+
text = payload.text
|
47 |
+
|
48 |
+
try:
|
49 |
+
doc = nlp(text)
|
50 |
+
|
51 |
+
voice_detected = "active"
|
52 |
+
voice_suggestion = "None \u2014 active voice is fine here."
|
53 |
+
|
54 |
+
for token in doc:
|
55 |
+
if token.dep_ == "auxpass":
|
56 |
+
voice_detected = "passive"
|
57 |
+
better_voice_prompt = prompts.active_voice_prompt(text)
|
58 |
+
voice_suggestion = models.run_flan_prompt(better_voice_prompt)
|
59 |
+
break
|
60 |
+
|
61 |
+
return {
|
62 |
+
"voice": {
|
63 |
+
"detected": voice_detected,
|
64 |
+
"suggestion": voice_suggestion
|
65 |
+
}
|
66 |
+
}
|
67 |
+
except Exception as e:
|
68 |
+
logger.error(f"Error in voice_analysis: {e}", exc_info=True)
|
69 |
+
raise HTTPException(
|
70 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
71 |
+
detail=f"An error occurred during voice analysis: {e}"
|
72 |
+
)
|
requirements.txt
CHANGED
@@ -8,3 +8,4 @@ spacy
|
|
8 |
nltk
|
9 |
language-tool-python
|
10 |
scikit-learn
|
|
|
|
8 |
nltk
|
9 |
language-tool-python
|
10 |
scikit-learn
|
11 |
+
textstat
|