leadingbridge commited on
Commit
21e3bc3
·
verified ·
1 Parent(s): a24f96a

Create app,py

Browse files
Files changed (1) hide show
  1. app,py +75 -0
app,py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import train_test_split, GridSearchCV
5
+ from sklearn.ensemble import RandomForestRegressor
6
+ from sklearn.metrics import mean_squared_error, r2_score
7
+
8
+ # URL to the Excel dataset on Hugging Face
9
+ data_url = "https://huggingface.co/datasets/leadingbridge/flat/resolve/main/NorthPoint30.xlsx"
10
+
11
+ @st.cache_resource
12
+ def load_and_train_model():
13
+ df = pd.read_excel(data_url, engine="openpyxl")
14
+
15
+ # Drop columns that are not needed for prediction
16
+ cols_to_drop = ['Usage', 'Address', 'PricePerSquareFeet', 'InstrumentDate', 'Floor', 'Unit']
17
+ df.drop(columns=cols_to_drop, inplace=True, errors='ignore')
18
+
19
+ # Rename useful columns for consistency
20
+ df.rename(columns={"Floor.1": "Floor", "Unit.1": "Unit"}, inplace=True)
21
+
22
+ required_columns = [
23
+ 'District', 'PriceInMillion', 'Longitude', 'Latitude',
24
+ 'Floor', 'Unit', 'Area', 'Year', 'WeekNumber'
25
+ ]
26
+ if not all(col in df.columns for col in required_columns):
27
+ raise ValueError("Dataset is missing one or more required columns.")
28
+
29
+ feature_names = ['District', 'Longitude', 'Latitude', 'Floor', 'Unit', 'Area', 'Year', 'WeekNumber']
30
+ X = df[feature_names]
31
+ y = df['PriceInMillion']
32
+
33
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
34
+
35
+ rf_param_grid = {
36
+ 'n_estimators': [50, 100, 150],
37
+ 'max_depth': [4, 6, 8],
38
+ 'max_features': ['sqrt', 'log2', 3],
39
+ 'random_state': [42]
40
+ }
41
+
42
+ rf_grid = GridSearchCV(RandomForestRegressor(), rf_param_grid, refit=True, verbose=1, cv=5, error_score='raise')
43
+ rf_grid.fit(X_train, y_train)
44
+
45
+ model = rf_grid.best_estimator_
46
+ return model, feature_names
47
+
48
+ @st.cache_data
49
+ def predict_price(model, feature_names, new_data):
50
+ new_data_df = pd.DataFrame([new_data], columns=feature_names)
51
+ prediction = model.predict(new_data_df)
52
+ return prediction[0]
53
+
54
+ def main():
55
+ st.title("PROPERTY PRICE PREDICTION TOOL (Streamlit Version)")
56
+ st.markdown("Predict the price of a new property based on District, Longitude, Latitude, Floor, Unit, Area, Year, and Week Number.")
57
+
58
+ model, feature_names = load_and_train_model()
59
+
60
+ district = st.selectbox("District (1 = Taikoo Shing, 2 = Mei Foo Sun Chuen, 3 = South Horizons, 4 = Whampoa Garden)", list(range(1, 9)))
61
+ longitude = st.number_input("Longitude", value=114.200)
62
+ latitude = st.number_input("Latitude", value=22.300)
63
+ floor = st.selectbox("Floor", list(range(1, 71)))
64
+ unit = st.selectbox("Unit (e.g., A=1, B=2, C=3, ...)", list(range(1, 31)))
65
+ area = st.slider("Area (in sq. feet)", min_value=137, max_value=5000, value=300)
66
+ year = st.selectbox("Year", [2024, 2025])
67
+ weeknumber = st.selectbox("Week Number", list(range(1, 53)))
68
+
69
+ if st.button("Predict"):
70
+ new_data = [district, longitude, latitude, floor, unit, area, year, weeknumber]
71
+ prediction = predict_price(model, feature_names, new_data)
72
+ st.success(f"🏠 Estimated Price: **${prediction:,.2f} Million**")
73
+
74
+ if __name__ == "__main__":
75
+ main()