File size: 3,391 Bytes
3c87883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# this file used all classes and functions of project and create a flow.
import os

from utils.zoom_in import croped_images,image_enhancements
from utils.distance import get_distances
from utils.generate_result import get_json_data

from object_detection.object_detection import ObjectDetection
from activity_detection.activity_detection import ActivityDetection
import os
from dotenv import load_dotenv
from pathlib import Path
import logging

env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)

path = {
    'ACTIVITY_DET_MODEL_PATH':str(os.getenv('ACTIVITY_DET_MODEL_PATH')),
    'OBJECT_DET_MODEL_PATH':str(os.getenv('OBJECT_DET_MODEL_PATH')),
}

# Configure the logger
logging.basicConfig(
    level=logging.DEBUG,
    format="%(asctime)s - %(levelname)s - %(message)s",
    filename="logs.log",
)

# Create a logger
logger = logging.getLogger("pipline")

def pipeline(image):
    """this function takes input as image from streamlit application then performs object detection,cropping image, 
    image enhancement, activity detection, distance estimation and get final results in json format and returns to 
    streamlit application. 

    Args:
        image (numpy array): get numpy array of image which has 3 channels

    Returns:
        final_results: JSON Array which has below object
        {
            'zoomed_img':np.array([]) ,
            'actual_boxes':[],
            'merged_boundries':{},
        }
    """
    # detect object of given image using YOLO and get json_data of each object
    try:
        # Detect object from image.
        object_detection = ObjectDetection()
        logger.info("object detection object is created...")
        object_detection.set_trained_model_path(path['OBJECT_DET_MODEL_PATH'])
        logger.info("object detection model path is set...")
        object_json_data = object_detection.inference(image)
        logger.info("object detection done successfully...")

        # get croped_images list which has overlapping boundry box and also get croped single object images
        croped_images_list,single_object_images = croped_images(image,object_json_data)
        logger.info("cropping of image is done successfully...")

        # enhance images of both croped images and single object images
        enhanced_images,single_object_images = image_enhancements(croped_images_list,single_object_images)
        logger.info("enhancement of image is done successfully...")

        # detect activity of person object using image classification
        activity_detection = ActivityDetection()
        logger.info('activity detection object is created successfully...')
        activity_detection.set_trained_model_path(path['ACTIVITY_DET_MODEL_PATH'])
        logger.info('activity detection model is set')
        detected_activity = activity_detection.inference(single_object_images)
        logger.info("detection of activity is done successfully...")

        # Calculate distances of all objects
        distances_list = get_distances(object_json_data)
        logger.info("distance of object is calculated successfully...")

        # get final json array 
        final_results = get_json_data(object_json_data,enhanced_images,detected_activity,distances_list)
        logger.info('final result of given image is created successfully...')

        return final_results
    except Exception as e:
        pass