File size: 3,637 Bytes
c16916d
8ae2ea0
 
 
 
7f0ac64
8ae2ea0
7f0ac64
 
 
 
c16916d
 
 
 
 
 
 
 
8ae2ea0
c16916d
8ae2ea0
c16916d
 
 
7f0ac64
 
c16916d
8ae2ea0
c16916d
 
 
8ae2ea0
c16916d
 
 
 
7f0ac64
 
8ae2ea0
c16916d
 
7f0ac64
 
c16916d
7f0ac64
c16916d
7f0ac64
 
8ae2ea0
c16916d
7f0ac64
8ae2ea0
7f0ac64
 
8b2b3e8
c16916d
 
8ae2ea0
c16916d
 
7f0ac64
c16916d
7f0ac64
 
 
c16916d
 
7f0ac64
 
c16916d
 
 
 
7f0ac64
 
c16916d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f0ac64
 
c16916d
 
8ae2ea0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# app.py
import gradio as gr
from ultralytics import YOLO
import cv2
import numpy as np
import torch
from PIL import Image
import pandas as pd
import os
import uuid
from datetime import datetime
import h3
import folium

# ========================
# Load Models
# ========================
yolo_model = YOLO("yolov8n.pt")
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small", trust_repo=True)
midas.to("cpu").eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms", trust_repo=True).small_transform

# ========================
# CSV Initialization
# ========================
csv_file = "tree_measurements.csv"
if not os.path.exists(csv_file):
    pd.DataFrame(columns=["Timestamp", "Estimated_Height", "Species", "Lat", "Lon", "H3_Index", "Image_File"]).to_csv(csv_file, index=False)

# Dummy Tree Classifier (replace with a real model or API later)
def classify_species(image):
    return "Unknown Species"

# ========================
# Tree Processing Function
# ========================
def process_tree(image, lat, lon):
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    img_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)

    # Run YOLOv8 detection
    results = yolo_model(img_cv)
    detections = results[0].boxes.data.cpu().numpy()
    if len(detections) == 0:
        return "No tree detected.", None, None

    # Crop first detected tree
    x1, y1, x2, y2, conf, cls = detections[0]
    tree_crop = img_cv[int(y1):int(y2), int(x1):int(x2)]

    # Estimate height from depth
    input_tensor = midas_transforms(Image.fromarray(cv2.cvtColor(tree_crop, cv2.COLOR_BGR2RGB))).unsqueeze(0)
    with torch.no_grad():
        depth = midas(input_tensor).squeeze().cpu().numpy()
    approx_height = round(np.max(depth) - np.min(depth), 2)

    # Species classification
    species = classify_species(image)

    # Location and H3
    h3_index = h3.geo_to_h3(lat, lon, 9)

    # Save image
    image_id = f"tree_{uuid.uuid4().hex[:8]}.png"
    image.save(image_id)

    # Save to CSV
    new_entry = pd.DataFrame([{
        "Timestamp": timestamp,
        "Estimated_Height": approx_height,
        "Species": species,
        "Lat": lat,
        "Lon": lon,
        "H3_Index": h3_index,
        "Image_File": image_id
    }])
    new_entry.to_csv(csv_file, mode='a', header=False, index=False)

    return f"Height: {approx_height} meters\nSpecies: {species}", Image.fromarray(tree_crop), generate_map()

# ========================
# Folium Map from CSV
# ========================
def generate_map():
    df = pd.read_csv(csv_file)
    fmap = folium.Map(location=[20, 78], zoom_start=5)
    for _, row in df.iterrows():
        folium.Marker(
            location=[row["Lat"], row["Lon"]],
            popup=f"{row['Timestamp']}\n{row['Species']}\n{row['Estimated_Height']} m"
        ).add_to(fmap)
    fmap.save("map.html")
    return "map.html"

# ========================
# Gradio UI
# ========================
with gr.Blocks() as demo:
    gr.Markdown("## 🌳 Tree Height & Species Estimator")
    with gr.Row():
        image_input = gr.Image(type="pil", label="Capture/Upload Tree Photo")
        lat_input = gr.Number(label="Latitude")
        lon_input = gr.Number(label="Longitude")
    submit_btn = gr.Button("Estimate Height & Species")
    output_text = gr.Textbox(label="Result")
    output_image = gr.Image(label="Detected Tree")
    output_map = gr.HTML(label="Tree Map")

    submit_btn.click(
        fn=process_tree,
        inputs=[image_input, lat_input, lon_input],
        outputs=[output_text, output_image, output_map]
    )

# ========================
demo.launch()