from fastapi import FastAPI, HTTPException, Header from pydantic import BaseModel from reportlab.lib.pagesizes import letter from reportlab.pdfgen import canvas import base64 import os import logging from datetime import datetime from fastapi.responses import HTMLResponse from simple_salesforce import Salesforce import requests from dotenv import load_dotenv # Load environment variables load_dotenv() # Set up logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) app = FastAPI() # Environment variables SF_USERNAME = os.getenv("SF_USERNAME") SF_PASSWORD = os.getenv("SF_PASSWORD") SF_SECURITY_TOKEN = os.getenv("SF_SECURITY_TOKEN") SF_DOMAIN = os.getenv("SF_DOMAIN", "login") HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY") HUGGINGFACE_API_URL = os.getenv("HUGGINGFACE_API_URL") # Validate environment variables required_env_vars = ["SF_USERNAME", "SF_PASSWORD", "SF_SECURITY_TOKEN"] for var in required_env_vars: if not os.getenv(var): logger.error(f"Environment variable {var} is not set") raise ValueError(f"Environment variable {var} is not set") # Hugging Face configuration USE_HUGGINGFACE = bool(HUGGINGFACE_API_KEY and HUGGINGFACE_API_URL) logger.info(f"Hugging Face integration {'enabled' if USE_HUGGINGFACE else 'disabled'}") # Salesforce connection sf = None try: sf = Salesforce( username=SF_USERNAME, password=SF_PASSWORD, security_token=SF_SECURITY_TOKEN, domain=SF_DOMAIN ) logger.info("Successfully connected to Salesforce") except Exception as e: logger.error(f"Failed to connect to Salesforce: {str(e)}") raise RuntimeError(f"Cannot connect to Salesforce: {str(e)}") # VendorLog model class VendorLog(BaseModel): vendorLogId: str vendorId: str vendorRecordId: str workDetails: str qualityReport: str incidentLog: str workCompletionDate: str actualCompletionDate: str vendorLogName: str delayDays: int project: str # Store vendor logs vendor_logs = [] def validate_salesforce_fields(): """Validate required Salesforce fields""" try: vendor_log_fields = [f['name'] for f in sf.Vendor_Log__c.describe()['fields']] required_fields = [ 'Vendor__c', 'Work_Completion_Percentage__c', 'Quality_Percentage__c', 'Incident_Severity__c', 'Work_Completion_Date__c', 'Actual_Completion_Date__c', 'Delay_Days__c', 'Project__c' ] for field in required_fields: if field not in vendor_log_fields: logger.error(f"Field {field} not found in Vendor_Log__c") raise ValueError(f"Field {field} not found in Vendor_Log__c") score_fields = [f['name'] for f in sf.Subcontractor_Performance_Score__c.describe()['fields']] required_score_fields = [ 'Vendor__c', 'Month__c', 'Quality_Score__c', 'Timeliness_Score__c', 'Safety_Score__c', 'Communication_Score__c', 'Alert_Flag__c', 'Certification_URL__c' ] for field in required_score_fields: if field not in score_fields: logger.error(f"Field {field} not found in Subcontractor_Performance_Score__c") raise ValueError(f"Field {field} not found in Subcontractor_Performance_Score__c") logger.info("Salesforce fields validated successfully") except Exception as e: logger.error(f"Error validating Salesforce fields: {str(e)}") raise # Validate schema on startup validate_salesforce_fields() def fetch_vendor_logs_from_salesforce(): try: query = """ SELECT Id, Name, Vendor__c, Work_Completion_Percentage__c, Quality_Percentage__c, Incident_Severity__c, Work_Completion_Date__c, Actual_Completion_Date__c, Delay_Days__c, Project__c FROM Vendor_Log__c WHERE Vendor__c != null """ result = sf.query_all(query) logs = [] for record in result['records']: try: log = VendorLog( vendorLogId=record.get('Id', 'Unknown'), vendorId=record.get('Name', 'Unknown'), vendorRecordId=record.get('Vendor__c', 'Unknown'), workDetails=str(record.get('Work_Completion_Percentage__c', 0.0)), qualityReport=str(record.get('Quality_Percentage__c', 0.0)), incidentLog=record.get('Incident_Severity__c', 'None'), workCompletionDate=record.get('Work_Completion_Date__c', 'N/A'), actualCompletionDate=record.get('Actual_Completion_Date__c', 'N/A'), vendorLogName=record.get('Name', 'Unknown'), delayDays=int(record.get('Delay_Days__c', 0)), project=record.get('Project__c', 'Unknown') ) logs.append(log) except Exception as e: logger.warning(f"Skipping invalid Vendor_Log__c record {record.get('Id')}: {str(e)}") logger.info(f"Fetched {len(logs)} vendor logs") return logs except Exception as e: logger.error(f"Error fetching vendor logs: {str(e)}") return [] def calculate_scores_local(log: VendorLog): try: work_completion_percentage = float(log.workDetails or 0.0) quality_percentage = float(log.qualityReport or 0.0) quality_score = quality_percentage timeliness_score = 100.0 if log.delayDays <= 0 else 80.0 if log.delayDays <= 3 else 60.0 if log.delayDays <= 7 else 40.0 severity_map = {'None': 100.0, 'Low': 80.0, 'Minor': 80.0, 'Medium': 50.0, 'High': 20.0} safety_score = severity_map.get(log.incidentLog, 100.0) communication_score = (quality_score * 0.33 + timeliness_score * 0.33 + safety_score * 0.33) return { 'qualityScore': round(quality_score, 2), 'timelinessScore': round(timeliness_score, 2), 'safetyScore': round(safety_score, 2), 'communicationScore': round(communication_score, 2) } except Exception as e: logger.error(f"Error calculating local scores: {str(e)}") return {'qualityScore': 0.0, 'timelinessScore': 0.0, 'safetyScore': 0.0, 'communicationScore': 0.0} def calculate_scores_huggingface(log: VendorLog): try: payload = { 'vendor_id': log.vendorId, 'delay_days': log.delayDays, 'work_completion_percentage': float(log.workDetails or 0.0), 'quality_percentage': float(log.qualityReport or 0.0), 'incident_severity': log.incidentLog, 'communication_frequency': 5 } headers = {'Authorization': f'Bearer {HUGGINGFACE_API_KEY}', 'Content-Type': 'application/json'} response = requests.post(HUGGINGFACE_API_URL, json=payload, headers=headers, timeout=10) response.raise_for_status() result = response.json() return { 'qualityScore': round(result.get('quality_score', 0.0), 2), 'timelinessScore': round(result.get('timeliness_score', 0.0), 2), 'safetyScore': round(result.get('safety_score', 0.0), 2), 'communicationScore': round(result.get('communication_score', 0.0), 2) } except Exception as e: logger.error(f"Hugging Face API error: {str(e)}") return calculate_scores_local(log) # Fallback to local scoring def calculate_scores(log: VendorLog): return calculate_scores_huggingface(log) if USE_HUGGINGFACE else calculate_scores_local(log) def get_feedback(score: float, metric: str) -> str: try: if score >= 90: return "Excellent: Maintain this standard" elif score >= 70: return "Good: Keep up the good work" elif score >= 50: return f"Needs Improvement: {'Maintain schedules' if metric == 'Timeliness' else 'Improve quality' if metric == 'Quality' else 'Enhance safety' if metric == 'Safety' else 'Better communication'}" else: return f"Poor: {'Significant delays' if metric == 'Timeliness' else 'Quality issues' if metric == 'Quality' else 'Safety issues' if metric == 'Safety' else 'Communication issues'}" except Exception: return "Feedback unavailable" def generate_pdf(vendor_id: str, vendor_log_name: str, scores: dict): try: filename = f'report_{vendor_id}_{datetime.now().strftime("%Y%m%d%H%M%S")}.pdf' c = canvas.Canvas(filename, pagesize=letter) c.setFont('Helvetica', 12) c.drawString(100, 750, 'Subcontractor Performance Report') c.drawString(100, 730, f'Vendor ID: {vendor_id}') c.drawString(100, 710, f'Vendor Log Name: {vendor_log_name}') c.drawString(100, 690, f'Quality Score: {scores["qualityScore"]}% ({get_feedback(scores["qualityScore"], "Quality")})') c.drawString(100, 670, f'Timeliness Score: {scores["timelinessScore"]}% ({get_feedback(scores["timelinessScore"], "Timeliness")})') c.drawString(100, 650, f'Safety Score: {scores["safetyScore"]}% ({get_feedback(scores["safetyScore"], "Safety")})') c.drawString(100, 630, f'Communication Score: {scores["communicationScore"]}% ({get_feedback(scores["communicationScore"], "Communication")})') c.save() with open(filename, 'rb') as f: pdf_content = f.read() os.remove(filename) return pdf_content except Exception as e: logger.error(f"Error generating PDF: {str(e)}") raise HTTPException(status_code=500, detail="Failed to generate PDF") def determine_alert_flag(scores: dict, all_logs: list): try: if not all_logs: return False avg_score = sum(scores.values()) / 4 if avg_score < 50: return True lowest_avg = min([sum(log['scores'].values()) / 4 for log in all_logs], default=avg_score) return avg_score == lowest_avg except Exception as e: logger.error(f"Error determining alert flag: {str(e)}") return False def store_scores_in_salesforce(log: VendorLog, scores: dict, pdf_content: bytes, alert_flag: bool): try: score_record = sf.Subcontractor_Performance_Score__c.create({ 'Vendor__c': log.vendorRecordId, 'Month__c': datetime.today().replace(day=1).strftime('%Y-%m-%d'), 'Quality_Score__c': scores['qualityScore'], 'Timeliness_Score__c': scores['timelinessScore'], 'Safety_Score__c': scores['safetyScore'], 'Communication_Score__c': scores['communicationScore'], 'Alert_Flag__c': alert_flag }) score_record_id = score_record['id'] logger.info(f"Created score record: {score_record_id}") pdf_base64 = base64.b64encode(pdf_content).decode('utf-8') content_version = sf.ContentVersion.create({ 'Title': f'Performance_Report_{log.vendorId}', 'PathOnClient': f'report_{log.vendorId}.pdf', 'VersionData': pdf_base64, 'FirstPublishLocationId': score_record_id }) content_version_id = content_version['id'] content_version_record = sf.query(f"SELECT ContentDocumentId FROM ContentVersion WHERE Id = '{content_version_id}'") if content_version_record['totalSize'] == 0: logger.error(f"No ContentVersion for ID: {content_version_id}") raise ValueError("Failed to retrieve ContentDocumentId") content_document_id = content_version_record['records'][0]['ContentDocumentId'] pdf_url = f"https://{sf.sf_instance}/sfc/servlet.shepherd/document/download/{content_document_id}" sf.Subcontractor_Performance_Score__c.update(score_record_id, {'Certification_URL__c': pdf_url}) logger.info(f"Updated score record with PDF URL: {pdf_url}") except Exception as e: logger.error(f"Error storing scores in Salesforce: {str(e)}") raise HTTPException(status_code=500, detail="Failed to store scores") @app.post('/score') async def score_vendor(log: VendorLog): try: scores = calculate_scores(log) pdf_content = generate_pdf(log.vendorId, log.vendorLogName, scores) pdf_base64 = base64.b64encode(pdf_content).decode('utf-8') alert_flag = determine_alert_flag(scores, vendor_logs) store_scores_in_salesforce(log, scores, pdf_content, alert_flag) vendor_logs.append({ 'vendorLogId': log.vendorLogId, 'vendorId': log.vendorId, 'vendorLogName': log.vendorLogName, 'workDetails': log.workDetails, 'qualityReport': log.qualityReport, 'incidentLog': log.incidentLog, 'workCompletionDate': log.workCompletionDate, 'actualCompletionDate': log.actualCompletionDate, 'delayDays': log.delayDays, 'project': log.project, 'scores': scores, 'extracted': True }) return { 'vendorLogId': log.vendorLogId, 'vendorId': log.vendorId, 'vendorLogName': log.vendorLogName, 'qualityScore': scores['qualityScore'], 'timelinessScore': scores['timelinessScore'], 'safetyScore': scores['safetyScore'], 'communicationScore': scores['communicationScore'], 'pdfContent': pdf_base64, 'alert': alert_flag } except HTTPException as e: raise e except Exception as e: logger.error(f"Error in /score: {str(e)}") raise HTTPException(status_code=500, detail=f"Error processing vendor log: {str(e)}") @app.get('/', response_class=HTMLResponse) async def get_dashboard(): try: global vendor_logs fetched_logs = fetch_vendor_logs_from_salesforce() for log in fetched_logs: if not any(existing_log['vendorLogId'] == log.vendorLogId for existing_log in vendor_logs): scores = calculate_scores(log) pdf_content = generate_pdf(log.vendorId, log.vendorLogName, scores) alert_flag = determine_alert_flag(scores, vendor_logs) store_scores_in_salesforce(log, scores, pdf_content, alert_flag) vendor_logs.append({ 'vendorLogId': log.vendorLogId, 'vendorId': log.vendorId, 'vendorLogName': log.vendorLogName, 'workDetails': log.workDetails, 'qualityReport': log.qualityReport, 'incidentLog': log.incidentLog, 'workCompletionDate': log.workCompletionDate, 'actualCompletionDate': log.actualCompletionDate, 'delayDays': log.delayDays, 'project': log.project, 'scores': scores, 'extracted': True }) html_content = """
Vendor ID | Vendor Log Name | Project | Work Completion % | Quality % | Incident Severity | Work Completion Date | Actual Completion Date | Delay Days |
---|---|---|---|---|---|---|---|---|
No vendor logs available | ||||||||
{{ log['vendorId'] }} | {{ log['vendorLogName'] }} | {{ log['project'] }} | {{ log['workDetails'] }} | {{ log['qualityReport'] }} | {{ log['incidentLog'] }} | {{ log['workCompletionDate'] }} | {{ log['actualCompletionDate'] }} | {{ log['delayDays'] }} |
Vendor ID | Vendor Log Name | Project | Quality Score | Timeliness Score | Safety Score | Communication Score | Alert Flag |
---|---|---|---|---|---|---|---|
No scores available | |||||||
{{ log['vendorId'] }} | {{ log['vendorLogName'] }} | {{ log['project'] }} | {{ log['scores']['qualityScore'] }}% | {{ log['scores']['timelinessScore'] }}% | {{ log['scores']['safetyScore'] }}% | {{ log['scores']['communicationScore'] }}% | {{ 'Checked' if determine_alert_flag(log['scores'], vendor_logs) else 'Unchecked' }} |
Failed to load dashboard. Check logs for details.
", status_code=500) @app.post('/generate') async def generate_scores(): try: global vendor_logs vendor_logs = [] fetched_logs = fetch_vendor_logs_from_salesforce() for log in fetched_logs: scores = calculate_scores(log) pdf_content = generate_pdf(log.vendorId, log.vendorLogName, scores) alert_flag = determine_alert_flag(scores, vendor_logs) store_scores_in_salesforce(log, scores, pdf_content, alert_flag) vendor_logs.append({ 'vendorLogId': log.vendorLogId, 'vendorId': log.vendorId, 'vendorLogName': log.vendorLogName, 'workDetails': log.workDetails, 'qualityReport': log.qualityReport, 'incidentLog': log.incidentLog, 'workCompletionDate': log.workCompletionDate, 'actualCompletionDate': log.actualCompletionDate, 'delayDays': log.delayDays, 'project': log.project, 'scores': scores, 'extracted': True }) logger.info(f"Generated scores for {len(vendor_logs)} logs") return {"status": "success"} except Exception as e: logger.error(f"Error in /generate: {str(e)}") raise HTTPException(status_code=500, detail="Failed to generate scores") @app.get('/debug') async def debug_info(): try: log_count = sf.query("SELECT COUNT() FROM Vendor_Log__c")['totalSize'] fields = [f['name'] for f in sf.Vendor_Log__c.describe()['fields']] score_fields = [f['name'] for f in sf.Subcontractor_Performance_Score__c.describe()['fields']] return { "salesforce_connected": True, "vendor_log_count": log_count, "vendor_log_fields": fields, "score_fields": score_fields, "huggingface_enabled": USE_HUGGINGFACE } except Exception as e: logger.error(f"Debug error: {str(e)}") return {"salesforce_connected": False, "error": str(e)} if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)