VrundavGamit commited on
Commit
bff60bc
·
verified ·
1 Parent(s): 5ac9888

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. Dockerfile +16 -0
  2. app.py +70 -0
  3. requirements.txt +11 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ # Set the working directory inside the container
4
+ WORKDIR /app
5
+
6
+ # Copy all files from the current directory to the container's working directory
7
+ COPY . .
8
+
9
+ # Install dependencies from the requirements file without using cache to reduce image size
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ # Define the command to start the application using Gunicorn with 4 worker processes
13
+ # - `-w 4`: Uses 4 worker processes for handling requests
14
+ # - `-b 0.0.0.0:7860`: Binds the server to port 7860 on all network interfaces
15
+ # - `app:app`: Runs the Flask app (assuming `app.py` contains the Flask instance named `app`)
16
+ CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:churn_predictor_api"]
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import joblib
2
+ import pandas as pd
3
+ from flask import Flask, request, jsonify
4
+
5
+ # Initialize Flask app with a name
6
+ churn_predictor_api = Flask("Customer Churn Predictor")
7
+
8
+ # Load the trained churn prediction model
9
+ model = joblib.load("churn_prediction_model_v1_0.joblib")
10
+
11
+ # Define a route for the home page
12
+ @churn_predictor_api.get('/')
13
+ def home():
14
+ return "Welcome to the Customer Churn Prediction API!"
15
+
16
+ # Define an endpoint to predict churn for a single customer
17
+ @churn_predictor_api.post('/v1/customer')
18
+ def predict_churn():
19
+ # Get JSON data from the request
20
+ customer_data = request.get_json()
21
+
22
+ # Extract relevant customer features from the input data
23
+ sample = {
24
+ 'CreditScore': customer_data['CreditScore'],
25
+ 'Geography': customer_data['Geography'],
26
+ 'Age': customer_data['Age'],
27
+ 'Tenure': customer_data['Tenure'],
28
+ 'Balance': customer_data['Balance'],
29
+ 'NumOfProducts': customer_data['NumOfProducts'],
30
+ 'HasCrCard': customer_data['HasCrCard'],
31
+ 'IsActiveMember': customer_data['IsActiveMember'],
32
+ 'EstimatedSalary': customer_data['EstimatedSalary']
33
+ }
34
+
35
+ # Convert the extracted data into a DataFrame
36
+ input_data = pd.DataFrame([sample])
37
+
38
+ # Make a churn prediction using the trained model
39
+ prediction = model.predict(input_data).tolist()[0]
40
+
41
+ # Map prediction result to a human-readable label
42
+ prediction_label = "churn" if prediction == 1 else "not churn"
43
+
44
+ # Return the prediction as a JSON response
45
+ return jsonify({'Prediction': prediction_label})
46
+
47
+ # Define an endpoint to predict churn for a batch of customers
48
+ @churn_predictor_api.post('/v1/customerbatch')
49
+ def predict_churn_batch():
50
+ # Get the uploaded CSV file from the request
51
+ file = request.files['file']
52
+
53
+ # Read the file into a DataFrame
54
+ input_data = pd.read_csv(file)
55
+
56
+ # Make predictions for the batch data and convert raw predictions into a readable format
57
+ predictions = [
58
+ 'Churn' if x == 1
59
+ else "Not Churn"
60
+ for x in model.predict(input_data.drop("CustomerId",axis=1)).tolist()
61
+ ]
62
+
63
+ cust_id_list = input_data.CustomerId.values.tolist()
64
+ output_dict = dict(zip(cust_id_list, predictions))
65
+
66
+ return output_dict
67
+
68
+ # Run the Flask app in debug mode
69
+ if __name__ == '__main__':
70
+ app.run(debug=True)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pandas==2.2.2
2
+ numpy==2.0.2
3
+ scikit-learn==1.6.1
4
+ xgboost==2.1.4
5
+ joblib==1.4.2
6
+ Werkzeug==2.2.2
7
+ flask==2.2.2
8
+ gunicorn==20.1.0
9
+ requests==2.28.1
10
+ uvicorn[standard]
11
+ streamlit==1.43.2