File size: 3,300 Bytes
e92cb9b
3ae443a
e92cb9b
3ae443a
e92cb9b
3ae443a
 
e92cb9b
3ae443a
 
 
 
 
e92cb9b
 
 
 
 
 
 
 
3ae443a
 
 
 
 
e92cb9b
3ae443a
 
 
 
e92cb9b
3ae443a
 
 
 
 
e92cb9b
3ae443a
 
 
 
 
 
e92cb9b
3ae443a
e92cb9b
1e2ade4
 
 
 
 
3ae443a
1e2ade4
3ae443a
 
1e2ade4
3ae443a
 
e92cb9b
3ae443a
 
 
 
 
e92cb9b
3ae443a
 
 
 
e92cb9b
 
3ae443a
 
e92cb9b
3ae443a
 
e92cb9b
 
3ae443a
e92cb9b
3ae443a
 
 
 
 
 
e92cb9b
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import streamlit as st
import cv2
import numpy as np
from PIL import Image, ImageDraw
from transformers import DetrImageProcessor, DetrForObjectDetection, TrOCRProcessor, VisionEncoderDecoderModel
import torch

# Load Models
detr_processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
detr_model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-stage1")
trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-stage1")

# Streamlit App Configuration
st.title("Real-Time Car Number Plate Recognition")
st.write("This app uses Hugging Face Transformers, OpenCV, and Streamlit for detecting and recognizing car number plates in real-time.")

# Authorized Car Database
authorized_cars = {"KA01AB1234", "MH12XY5678", "DL8CAF9090"}  # Dummy data for verification

# Detect License Plates
def detect_license_plate(frame):
    pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    inputs = detr_processor(images=pil_image, return_tensors="pt")
    outputs = detr_model(**inputs)

    # Post-process outputs to get bounding boxes
    target_sizes = torch.tensor([pil_image.size[::-1]])
    results = detr_processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)
    return results[0]["boxes"], pil_image

# Recognize Text from Plates
def recognize_text_from_plate(cropped_plate):
    inputs = trocr_processor(images=cropped_plate, return_tensors="pt")
    outputs = trocr_model.generate(**inputs)
    return trocr_processor.batch_decode(outputs, skip_special_tokens=True)[0]

# Verify Plate Text
def verify_plate(plate_text):
    if plate_text in authorized_cars:
        return f"✅ Access Granted: {plate_text}"
    else:
        return f"❌ Access Denied: {plate_text}"

# Real-Time Video Processing with OpenCV
def live_feed():
    cap = cv2.VideoCapture(0)  # Open webcam
    if not cap.isOpened():
        st.error("Unable to access the camera.")
        return

    stframe = st.image([])  # Placeholder for video feed

    while True:
        ret, frame = cap.read()
        if not ret:
            st.error("Failed to capture frame from the camera. Exiting...")
            break

        # Detect plates
        boxes, pil_image = detect_license_plate(frame)
        draw = ImageDraw.Draw(pil_image)

        recognized_plates = []
        for box in boxes:
            # Crop and recognize plate
            cropped_plate = pil_image.crop((box[0], box[1], box[2], box[3]))
            plate_text = recognize_text_from_plate(cropped_plate)
            recognized_plates.append(plate_text)

            # Draw box and label
            draw.rectangle(box.tolist(), outline="red", width=3)
            draw.text((box[0], box[1]), plate_text, fill="red")

        # Convert back to OpenCV format
        processed_frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)

        # Stream video to Streamlit
        stframe.image(processed_frame, channels="BGR", use_column_width=True)

        # Display results
        for plate_text in recognized_plates:
            st.write(verify_plate(plate_text))

    cap.release()
    cv2.destroyAllWindows()

# Streamlit UI
if st.button("Start Camera"):
    live_feed()