Spaces:
Running
Running
File size: 3,825 Bytes
1a3fc34 60a2af1 1a3fc34 d02548d 60a2af1 1a3fc34 e770843 1a3fc34 e770843 1a3fc34 c11b0b1 0af5ad8 c11b0b1 60a2af1 0af5ad8 60a2af1 c11b0b1 ddaafbe 236be65 60a2af1 c11b0b1 0af5ad8 c11b0b1 236be65 0af5ad8 236be65 0af5ad8 236be65 0af5ad8 236be65 0af5ad8 236be65 d02548d 236be65 0af5ad8 236be65 ddaafbe 0af5ad8 c11b0b1 0af5ad8 236be65 0af5ad8 236be65 c11b0b1 0af5ad8 c11b0b1 0af5ad8 c11b0b1 0af5ad8 c11b0b1 ddaafbe 60a2af1 d02548d 60a2af1 c11b0b1 d02548d 0af5ad8 60a2af1 db92aa2 0af5ad8 db92aa2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import json
import requests
from lexer import lexer
from parser import Parser
from semantico import AnalizadorSemantico
from codigo_intermedio import GeneradorIntermedio
from sugerencias_nlp import procesar_comentarios
HF_TOKEN = os.environ.get("HF_TOKEN", "")
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
API_URL = "https://api-inference.huggingface.co/models/huggingface/CodeBERTa-language-id"
def sugerencia_nlp_error(error_msg):
payload = {
"inputs": f"ERROR: {error_msg}\nSUGERENCIA:",
"parameters": {
"max_new_tokens": 40,
"temperature": 0.7,
"return_full_text": False
}
}
try:
response = requests.post(API_URL, headers=HEADERS, json=payload, timeout=10)
if response.status_code == 200:
return response.json()[0]["generated_text"].strip()
return f"(sin sugerencia: {response.status_code})"
except Exception as e:
print("Error al obtener sugerencia NLP:", e)
return "(sugerencia no disponible por error de conexión)"
def main():
print("Inicio del análisis")
with open("entrada.txt", "r", encoding="utf-8") as f:
codigo = f.read()
errores_parser = []
errores_semanticos = []
variables = {}
comentarios_ext = []
anotaciones = []
ast = []
try:
print("Lexical analysis...")
tokens = lexer(codigo)
parser = Parser(tokens)
try:
print("Syntactic analysis...")
ast = parser.parse()
print("Parser completado con éxito")
except SyntaxError as e:
print("Error capturado en parser:", e)
errores_parser.append(str(e))
ast = []
except Exception as e:
print("Error en lexer o parser:", e)
errores_parser.append(f"Error crítico: {str(e)}")
ast = []
if ast:
try:
print("Análisis semántico...")
semantico = AnalizadorSemantico(ast)
resultado = semantico.analizar()
errores_semanticos = [
{"mensaje": err, "sugerencia": sugerencia_nlp_error(err)}
for err in resultado["errores_semanticos"]
]
variables = resultado["variables_declaradas"]
anotaciones = resultado.get("anotaciones", [])
print("Semántico completado")
print("Generando código intermedio...")
generador = GeneradorIntermedio()
intermedio = generador.generar(ast)
with open("codigo_intermedio.txt", "w", encoding="utf-8") as f:
for linea in intermedio:
f.write(linea + "\n")
except Exception as e:
print("Error en semántico:", e)
errores_semanticos.append({"mensaje": str(e), "sugerencia": "(no procesado)"})
try:
print("Procesando comentarios...")
comentarios_ext = [
{"comentario": c, "sugerencia": s}
for c, s in procesar_comentarios(codigo)
]
except Exception as e:
print("Error procesando comentarios:", e)
comentarios_ext = []
print("Escribiendo analisis.json...")
analisis = {
"variables_declaradas": variables,
"errores_parser": errores_parser,
"errores_semanticos": errores_semanticos,
"comentarios": comentarios_ext,
"anotaciones": anotaciones
}
with open("analisis.json", "w", encoding="utf-8") as f:
json.dump(analisis, f, indent=2)
print("Análisis completado.")
if __name__ == "__main__":
try:
main()
except Exception as e:
print("Error fatal en ejecución principal:", e)
with open("analisis.json", "w", encoding="utf-8") as f:
json.dump({"error_fatal": str(e)}, f, indent=2) |