Spaces:
Runtime error
Runtime error
Commit
·
b5726de
1
Parent(s):
6c8d8d6
Update main.py
Browse files
main.py
CHANGED
@@ -1,15 +1,10 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
|
|
|
4 |
import pandas as pd
|
5 |
-
import numpy as np
|
6 |
-
import requests
|
7 |
-
from urllib.parse import urlparse, quote
|
8 |
-
import re
|
9 |
-
from bs4 import BeautifulSoup
|
10 |
-
import time
|
11 |
from joblib import Parallel, delayed
|
12 |
-
from
|
13 |
|
14 |
app = FastAPI()
|
15 |
|
@@ -21,120 +16,86 @@ def root():
|
|
21 |
return {"API": "Google Address Scrap"}
|
22 |
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
normalized_string = re.sub(r'[^\w\s]', '', normalized_string)
|
28 |
|
29 |
-
return normalized_string
|
30 |
-
|
31 |
-
|
32 |
-
def jaccard_similarity(string1, string2,n = 2, normalize=True):
|
33 |
try:
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
grams1 = set(ngrams(string1, n))
|
38 |
-
grams2 = set(ngrams(string2, n))
|
39 |
-
similarity = len(grams1.intersection(grams2)) / len(grams1.union(grams2))
|
40 |
except:
|
41 |
-
|
42 |
-
|
43 |
-
if string2=='did not extract address':
|
44 |
-
similarity=0
|
45 |
-
|
46 |
-
return similarity
|
47 |
-
|
48 |
-
def jaccard_sim_split_word_number(string1,string2):
|
49 |
-
numbers1 = ' '.join(re.findall(r'\d+', string1))
|
50 |
-
words1 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string1))
|
51 |
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
54 |
|
55 |
-
number_similarity=jaccard_similarity(numbers1,numbers2)
|
56 |
-
words_similarity=jaccard_similarity(words1,words2)
|
57 |
-
return (number_similarity+words_similarity)/2
|
58 |
-
|
59 |
-
def extract_website_domain(url):
|
60 |
-
parsed_url = urlparse(url)
|
61 |
-
return parsed_url.netloc
|
62 |
-
|
63 |
-
|
64 |
-
def google_address(address):
|
65 |
|
66 |
-
search_query = quote(address)
|
67 |
-
url=f'https://www.google.com/search?q={search_query}'
|
68 |
-
response = requests.get(url)
|
69 |
-
soup = BeautifulSoup(response.content, "html.parser")
|
70 |
-
|
71 |
-
texts_links = []
|
72 |
-
for link in soup.find_all("a"):
|
73 |
-
t,l=link.get_text(), link.get("href")
|
74 |
-
if (l[:11]=='/url?q=http') and (len(t)>20 ):
|
75 |
-
texts_links.append((t,l))
|
76 |
-
|
77 |
-
text = soup.get_text()
|
78 |
-
|
79 |
-
texts_links_des=[]
|
80 |
-
for i,t_l in enumerate(texts_links):
|
81 |
-
start=text.find(texts_links[i][0][:50])
|
82 |
-
try:
|
83 |
-
end=text.find(texts_links[i+1][0][:50])
|
84 |
-
except:
|
85 |
-
end=text.find('Related searches')
|
86 |
-
|
87 |
-
description=text[start:end]
|
88 |
-
texts_links_des.append((t_l[0],t_l[1],description))
|
89 |
-
|
90 |
-
df=pd.DataFrame(texts_links_des,columns=['Title','Link','Description'])
|
91 |
-
df['Description']=df['Description'].bfill()
|
92 |
-
df['Address Output']=df['Title'].str.extract(r'(.+? \d{5})').fillna("**DID NOT EXTRACT ADDRESS**")
|
93 |
-
|
94 |
-
df['Link']=[i[7:i.find('&sa=')] for i in df['Link']]
|
95 |
-
df['Website'] = df['Link'].apply(extract_website_domain)
|
96 |
-
|
97 |
-
df['Square Footage']=df['Description'].str.extract(r"((\d+) Square Feet|(\d+) sq. ft.|(\d+) sqft|(\d+) Sq. Ft.|(\d+) sq|(\d+(?:,\d+)?) Sq\. Ft\.|(\d+(?:,\d+)?) sq)")[0]
|
98 |
try:
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
except:
|
101 |
-
|
102 |
-
df['Beds']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"(\d+) bed")
|
103 |
-
|
104 |
-
|
105 |
-
df['Baths']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"((\d+) bath|(\d+(?:\.\d+)?) bath)")[0]
|
106 |
-
df['Baths']=df['Baths'].str.extract(r'([\d.]+)').astype(float)
|
107 |
|
108 |
-
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
df.insert(0,'Address Input',address)
|
114 |
-
|
115 |
-
return df
|
116 |
|
117 |
|
118 |
def catch_errors(addresses):
|
119 |
try:
|
120 |
-
return
|
121 |
except:
|
122 |
return pd.DataFrame({'Address Input':[addresses]})
|
123 |
|
124 |
|
125 |
def process_multiple_address(addresses):
|
126 |
-
results=Parallel(n_jobs=
|
127 |
return results
|
128 |
|
129 |
|
130 |
-
|
|
|
131 |
async def predict(address_input: str):
|
132 |
|
133 |
-
|
134 |
results = process_multiple_address(address_input_split)
|
135 |
results = pd.concat(results).reset_index(drop=1)
|
136 |
-
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
return prediction.to_json()
|
139 |
|
140 |
|
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
|
4 |
+
|
5 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
from joblib import Parallel, delayed
|
7 |
+
from redfin import Redfin
|
8 |
|
9 |
app = FastAPI()
|
10 |
|
|
|
16 |
return {"API": "Google Address Scrap"}
|
17 |
|
18 |
|
19 |
+
def red_fin_api(add):
|
20 |
+
client = Redfin()
|
21 |
+
response = client.search(add)
|
|
|
22 |
|
|
|
|
|
|
|
|
|
23 |
try:
|
24 |
+
url = response['payload']['exactMatch']['url']
|
25 |
+
initial_info = client.initial_info(url)
|
|
|
|
|
|
|
|
|
26 |
except:
|
27 |
+
initial_info = add
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
try:
|
30 |
+
property_id = initial_info['payload']['propertyId']
|
31 |
+
mls_data = client.below_the_fold(property_id)
|
32 |
+
except:
|
33 |
+
mls_data = add
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
try:
|
37 |
+
|
38 |
+
lat,lon=initial_info['payload']['latLong'].values()
|
39 |
+
img=initial_info['payload']['preloadImageUrls'][0]
|
40 |
+
|
41 |
+
# int_group=r[1]['payload']['amenitiesInfo']['superGroups'][0]['amenityGroups']
|
42 |
+
|
43 |
+
ext_prop=mls_data['payload']['amenitiesInfo']['superGroups'][1]['amenityGroups'][0]['amenityEntries']
|
44 |
+
ext_prop=pd.DataFrame(ext_prop)
|
45 |
+
ext_prop['amenityValues']=[i[0] for i in ext_prop['amenityValues'].values]
|
46 |
+
ext_prop2=ext_prop[['referenceName','amenityValues']].T
|
47 |
+
ext_prop2.columns=ext_prop2.values[0]
|
48 |
+
ext_prop3=ext_prop2.tail(1).reset_index(drop=1)
|
49 |
+
|
50 |
+
df=pd.DataFrame(mls_data['payload']['publicRecordsInfo']['basicInfo'],index=[0]).drop(columns=['apn','propertyLastUpdatedDate','displayTimeZone'])
|
51 |
+
|
52 |
+
df['Lat']=lat
|
53 |
+
df['Lon']=lon
|
54 |
+
# df['Image']=img
|
55 |
+
df2=df.join(ext_prop3)
|
56 |
+
df2.insert(0,'url',f'https://www.redfin.com{url}')
|
57 |
+
|
58 |
except:
|
59 |
+
df2=pd.DataFrame({'Missing':[1]})
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
+
df2.insert(0,'Address Input',add)
|
62 |
|
63 |
+
|
64 |
+
return df2
|
65 |
+
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
def catch_errors(addresses):
|
69 |
try:
|
70 |
+
return red_fin_api(addresses)
|
71 |
except:
|
72 |
return pd.DataFrame({'Address Input':[addresses]})
|
73 |
|
74 |
|
75 |
def process_multiple_address(addresses):
|
76 |
+
results=Parallel(n_jobs=64, prefer="threads")(delayed(catch_errors)(i) for i in addresses)
|
77 |
return results
|
78 |
|
79 |
|
80 |
+
|
81 |
+
@app.get('/Redfin_Address_Scrap')
|
82 |
async def predict(address_input: str):
|
83 |
|
84 |
+
address_input_split = address_input.split(';')
|
85 |
results = process_multiple_address(address_input_split)
|
86 |
results = pd.concat(results).reset_index(drop=1)
|
87 |
+
|
88 |
+
cols_order=['Address Input', 'sqFtFinished', 'totalSqFt', 'yearBuilt', 'propertyTypeName', 'beds', 'baths', 'numStories',
|
89 |
+
'url',
|
90 |
+
'Lat', 'Lon']
|
91 |
+
cols_other=[i for i in results.columns if i not in cols_order ]
|
92 |
+
results=results[cols_order+cols_other].reset_index()
|
93 |
+
|
94 |
+
results['index']=results['index']+1
|
95 |
+
results.index=results.index+1
|
96 |
+
results=results.rename(columns={'index':'Input Position'})
|
97 |
+
prediction['yearBuilt']=results['yearBuilt'].fillna(0).astype(int).astype(str).replace('0','')
|
98 |
+
|
99 |
return prediction.to_json()
|
100 |
|
101 |
|